hadoop git commit: HDDS-310. VolumeSet shutdown hook fails on datanode restart. Contributed by Bharat Viswanadham.

2018-08-01 Thread nanda
Repository: hadoop
Updated Branches:
  refs/heads/trunk 735b49255 -> 41da2050b


HDDS-310. VolumeSet shutdown hook fails on datanode restart. Contributed by 
Bharat Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/41da2050
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/41da2050
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/41da2050

Branch: refs/heads/trunk
Commit: 41da2050bdec14709a86fa8a5cf7da82415fd989
Parents: 735b492
Author: Nanda kumar 
Authored: Thu Aug 2 11:35:22 2018 +0530
Committer: Nanda kumar 
Committed: Thu Aug 2 11:35:22 2018 +0530

--
 .../ozone/container/common/volume/VolumeSet.java | 15 +--
 1 file changed, 13 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/41da2050/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
index 4a1487b..06f48fc 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
@@ -167,7 +167,7 @@ public class VolumeSet {
 
 // Ensure volume threads are stopped and scm df is saved during shutdown.
 shutdownHook = () -> {
-  shutdown();
+  saveVolumeSetUsed();
 };
 ShutdownHookManager.get().addShutdownHook(shutdownHook,
 SHUTDOWN_HOOK_PRIORITY);
@@ -303,7 +303,11 @@ public class VolumeSet {
 return choosingPolicy.chooseVolume(getVolumesList(), containerSize);
   }
 
-  public void shutdown() {
+  /**
+   * This method, call shutdown on each volume to shutdown volume usage
+   * thread and write scmUsed on each volume.
+   */
+  private void saveVolumeSetUsed() {
 for (HddsVolume hddsVolume : volumeMap.values()) {
   try {
 hddsVolume.shutdown();
@@ -312,7 +316,14 @@ public class VolumeSet {
 ex);
   }
 }
+  }
 
+  /**
+   * Shutdown's the volumeset, if saveVolumeSetUsed is false, call's
+   * {@link VolumeSet#saveVolumeSetUsed}.
+   */
+  public void shutdown() {
+saveVolumeSetUsed();
 if (shutdownHook != null) {
   ShutdownHookManager.get().removeShutdownHook(shutdownHook);
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8593. Add RM web service endpoint to get user information. Contributed by Akhil PB.

2018-08-01 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 680dac26d -> 1f77b20f0


YARN-8593. Add RM web service endpoint to get user information. Contributed by 
Akhil PB.

(cherry picked from commit 735b4925569541fb8e65dc0c668ccc2aa2ffb30b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1f77b20f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1f77b20f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1f77b20f

Branch: refs/heads/branch-3.1
Commit: 1f77b20f08e018ebb00056a442929e37854da395
Parents: 680dac2
Author: Sunil G 
Authored: Thu Aug 2 08:34:09 2018 +0530
Committer: Sunil G 
Committed: Thu Aug 2 08:35:54 2018 +0530

--
 .../server/resourcemanager/ResourceManager.java |  4 ++
 .../resourcemanager/webapp/RMWSConsts.java  |  3 +
 .../webapp/RMWebServiceProtocol.java| 10 +++
 .../resourcemanager/webapp/RMWebServices.java   | 12 
 .../webapp/dao/ClusterUserInfo.java | 64 
 .../webapp/TestRMWebServices.java   | 21 +++
 .../webapp/DefaultRequestInterceptorREST.java   |  8 +++
 .../webapp/FederationInterceptorREST.java   |  6 ++
 .../server/router/webapp/RouterWebServices.java | 12 
 .../webapp/MockRESTRequestInterceptor.java  |  6 ++
 .../PassThroughRESTRequestInterceptor.java  |  6 ++
 11 files changed, 152 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1f77b20f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index f14d440..bb85b67 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -222,6 +222,10 @@ public class ResourceManager extends CompositeService 
implements Recoverable {
 return clusterTimeStamp;
   }
 
+  public String getRMLoginUser() {
+return rmLoginUGI.getShortUserName();
+  }
+
   @VisibleForTesting
   protected static void setClusterTimeStamp(long timestamp) {
 clusterTimeStamp = timestamp;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1f77b20f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java
index 29ae81b..8d9ccf7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java
@@ -36,6 +36,9 @@ public final class RMWSConsts {
   /** Path for {@code RMWebServiceProtocol#getClusterInfo}. */
   public static final String INFO = "/info";
 
+  /** Path for {@code RMWebServiceProtocol#getClusterUserInfo}. */
+  public static final String CLUSTER_USER_INFO = "/userinfo";
+
   /** Path for {@code RMWebServiceProtocol#getClusterMetricsInfo}. */
   public static final String METRICS = "/metrics";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1f77b20f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServiceProtocol.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServiceProtocol.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/jav

hadoop git commit: YARN-8593. Add RM web service endpoint to get user information. Contributed by Akhil PB.

2018-08-01 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/trunk 23f394240 -> 735b49255


YARN-8593. Add RM web service endpoint to get user information. Contributed by 
Akhil PB.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/735b4925
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/735b4925
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/735b4925

Branch: refs/heads/trunk
Commit: 735b4925569541fb8e65dc0c668ccc2aa2ffb30b
Parents: 23f3942
Author: Sunil G 
Authored: Thu Aug 2 08:34:09 2018 +0530
Committer: Sunil G 
Committed: Thu Aug 2 08:34:09 2018 +0530

--
 .../server/resourcemanager/ResourceManager.java |  4 ++
 .../resourcemanager/webapp/RMWSConsts.java  |  3 +
 .../webapp/RMWebServiceProtocol.java| 10 +++
 .../resourcemanager/webapp/RMWebServices.java   | 12 
 .../webapp/dao/ClusterUserInfo.java | 64 
 .../webapp/TestRMWebServices.java   | 21 +++
 .../webapp/DefaultRequestInterceptorREST.java   |  8 +++
 .../webapp/FederationInterceptorREST.java   |  6 ++
 .../server/router/webapp/RouterWebServices.java | 12 
 .../webapp/MockRESTRequestInterceptor.java  |  6 ++
 .../PassThroughRESTRequestInterceptor.java  |  6 ++
 11 files changed, 152 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/735b4925/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index f14d440..bb85b67 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -222,6 +222,10 @@ public class ResourceManager extends CompositeService 
implements Recoverable {
 return clusterTimeStamp;
   }
 
+  public String getRMLoginUser() {
+return rmLoginUGI.getShortUserName();
+  }
+
   @VisibleForTesting
   protected static void setClusterTimeStamp(long timestamp) {
 clusterTimeStamp = timestamp;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/735b4925/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java
index 9822878..a3fd2a9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java
@@ -36,6 +36,9 @@ public final class RMWSConsts {
   /** Path for {@code RMWebServiceProtocol#getClusterInfo}. */
   public static final String INFO = "/info";
 
+  /** Path for {@code RMWebServiceProtocol#getClusterUserInfo}. */
+  public static final String CLUSTER_USER_INFO = "/userinfo";
+
   /** Path for {@code RMWebServiceProtocol#getClusterMetricsInfo}. */
   public static final String METRICS = "/metrics";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/735b4925/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServiceProtocol.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServiceProtocol.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServiceProtocol.java

hadoop git commit: HADOOP-15607. AliyunOSS: fix duplicated partNumber issue in AliyunOSSBlockOutputStream. Contributed by Jinhu Wu.

2018-08-01 Thread sammichen
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 d9b9c9125 -> cde4d8697


HADOOP-15607. AliyunOSS: fix duplicated partNumber issue in 
AliyunOSSBlockOutputStream. Contributed by Jinhu Wu.

(cherry picked from commit 418e957c64cc31f13ea07c1b9d47208dcb4b4101)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cde4d869
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cde4d869
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cde4d869

Branch: refs/heads/branch-2.9
Commit: cde4d8697833abbfe51761bb995d0592a0b3dfa2
Parents: d9b9c91
Author: Sammi Chen 
Authored: Thu Aug 2 10:13:22 2018 +0800
Committer: Sammi Chen 
Committed: Thu Aug 2 10:27:38 2018 +0800

--
 .../aliyun/oss/AliyunOSSBlockOutputStream.java  | 59 
 .../fs/aliyun/oss/AliyunOSSFileSystemStore.java |  2 +
 .../oss/TestAliyunOSSBlockOutputStream.java | 12 +++-
 3 files changed, 49 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cde4d869/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSBlockOutputStream.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSBlockOutputStream.java
 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSBlockOutputStream.java
index 2d9a13b..42cb0b1 100644
--- 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSBlockOutputStream.java
+++ 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSBlockOutputStream.java
@@ -33,7 +33,9 @@ import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
@@ -51,7 +53,7 @@ public class AliyunOSSBlockOutputStream extends OutputStream {
   private boolean closed;
   private String key;
   private File blockFile;
-  private List blockFiles = new ArrayList<>();
+  private Map blockFiles = new HashMap<>();
   private long blockSize;
   private int blockId = 0;
   private long blockWritten = 0L;
@@ -95,8 +97,9 @@ public class AliyunOSSBlockOutputStream extends OutputStream {
 
 blockStream.flush();
 blockStream.close();
-if (!blockFiles.contains(blockFile)) {
-  blockFiles.add(blockFile);
+if (!blockFiles.values().contains(blockFile)) {
+  blockId++;
+  blockFiles.put(blockId, blockFile);
 }
 
 try {
@@ -110,7 +113,7 @@ public class AliyunOSSBlockOutputStream extends 
OutputStream {
 @Override
 public PartETag call() throws Exception {
   PartETag partETag = store.uploadPart(blockFile, key, 
uploadId,
-  blockId + 1);
+  blockId);
   return partETag;
 }
   });
@@ -124,11 +127,7 @@ public class AliyunOSSBlockOutputStream extends 
OutputStream {
 store.completeMultipartUpload(key, uploadId, partETags);
   }
 } finally {
-  for (File tFile: blockFiles) {
-if (tFile.exists() && !tFile.delete()) {
-  LOG.warn("Failed to delete temporary file {}", tFile);
-}
-  }
+  removePartFiles();
   closed = true;
 }
   }
@@ -145,41 +144,55 @@ public class AliyunOSSBlockOutputStream extends 
OutputStream {
 if (closed) {
   throw new IOException("Stream closed.");
 }
-try {
-  blockStream.write(b, off, len);
-  blockWritten += len;
-  if (blockWritten >= blockSize) {
-uploadCurrentPart();
-blockWritten = 0L;
+blockStream.write(b, off, len);
+blockWritten += len;
+if (blockWritten >= blockSize) {
+  uploadCurrentPart();
+  blockWritten = 0L;
+}
+  }
+
+  private void removePartFiles() throws IOException {
+for (ListenableFuture partETagFuture : partETagsFutures) {
+  if (!partETagFuture.isDone()) {
+continue;
   }
-} finally {
-  for (File tFile: blockFiles) {
-if (tFile.exists() && !tFile.delete()) {
-  LOG.warn("Failed to delete temporary file {}", tFile);
+
+  try {
+File blockFile = blockFiles.get(partETagFuture.get().getPartNumber());
+if (blockFile != null && blockFile.exists() && !blockFile.delete()) {
+  LOG.warn("Failed to delete temporary file {}", blockFile);
 }
+  } catch (InterruptedException | ExecutionException e) {
+throw new IOException(e);
   }
 }
   }
 
   private void uploadCurrentPart() throws IOException {
-

hadoop git commit: HADOOP-15607. AliyunOSS: fix duplicated partNumber issue in AliyunOSSBlockOutputStream. Contributed by Jinhu Wu.

2018-08-01 Thread sammichen
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 21e416ad2 -> 418e957c6


HADOOP-15607. AliyunOSS: fix duplicated partNumber issue in 
AliyunOSSBlockOutputStream. Contributed by Jinhu Wu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/418e957c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/418e957c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/418e957c

Branch: refs/heads/branch-2
Commit: 418e957c64cc31f13ea07c1b9d47208dcb4b4101
Parents: 21e416a
Author: Sammi Chen 
Authored: Thu Aug 2 10:13:22 2018 +0800
Committer: Sammi Chen 
Committed: Thu Aug 2 10:14:54 2018 +0800

--
 .../aliyun/oss/AliyunOSSBlockOutputStream.java  | 59 
 .../fs/aliyun/oss/AliyunOSSFileSystemStore.java |  2 +
 .../oss/TestAliyunOSSBlockOutputStream.java | 12 +++-
 3 files changed, 49 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/418e957c/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSBlockOutputStream.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSBlockOutputStream.java
 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSBlockOutputStream.java
index 2d9a13b..42cb0b1 100644
--- 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSBlockOutputStream.java
+++ 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSBlockOutputStream.java
@@ -33,7 +33,9 @@ import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
@@ -51,7 +53,7 @@ public class AliyunOSSBlockOutputStream extends OutputStream {
   private boolean closed;
   private String key;
   private File blockFile;
-  private List blockFiles = new ArrayList<>();
+  private Map blockFiles = new HashMap<>();
   private long blockSize;
   private int blockId = 0;
   private long blockWritten = 0L;
@@ -95,8 +97,9 @@ public class AliyunOSSBlockOutputStream extends OutputStream {
 
 blockStream.flush();
 blockStream.close();
-if (!blockFiles.contains(blockFile)) {
-  blockFiles.add(blockFile);
+if (!blockFiles.values().contains(blockFile)) {
+  blockId++;
+  blockFiles.put(blockId, blockFile);
 }
 
 try {
@@ -110,7 +113,7 @@ public class AliyunOSSBlockOutputStream extends 
OutputStream {
 @Override
 public PartETag call() throws Exception {
   PartETag partETag = store.uploadPart(blockFile, key, 
uploadId,
-  blockId + 1);
+  blockId);
   return partETag;
 }
   });
@@ -124,11 +127,7 @@ public class AliyunOSSBlockOutputStream extends 
OutputStream {
 store.completeMultipartUpload(key, uploadId, partETags);
   }
 } finally {
-  for (File tFile: blockFiles) {
-if (tFile.exists() && !tFile.delete()) {
-  LOG.warn("Failed to delete temporary file {}", tFile);
-}
-  }
+  removePartFiles();
   closed = true;
 }
   }
@@ -145,41 +144,55 @@ public class AliyunOSSBlockOutputStream extends 
OutputStream {
 if (closed) {
   throw new IOException("Stream closed.");
 }
-try {
-  blockStream.write(b, off, len);
-  blockWritten += len;
-  if (blockWritten >= blockSize) {
-uploadCurrentPart();
-blockWritten = 0L;
+blockStream.write(b, off, len);
+blockWritten += len;
+if (blockWritten >= blockSize) {
+  uploadCurrentPart();
+  blockWritten = 0L;
+}
+  }
+
+  private void removePartFiles() throws IOException {
+for (ListenableFuture partETagFuture : partETagsFutures) {
+  if (!partETagFuture.isDone()) {
+continue;
   }
-} finally {
-  for (File tFile: blockFiles) {
-if (tFile.exists() && !tFile.delete()) {
-  LOG.warn("Failed to delete temporary file {}", tFile);
+
+  try {
+File blockFile = blockFiles.get(partETagFuture.get().getPartNumber());
+if (blockFile != null && blockFile.exists() && !blockFile.delete()) {
+  LOG.warn("Failed to delete temporary file {}", blockFile);
 }
+  } catch (InterruptedException | ExecutionException e) {
+throw new IOException(e);
   }
 }
   }
 
   private void uploadCurrentPart() throws IOException {
-blockFiles.add(blockFile);
 blockStream.flush();
 blockStream.

hadoop git commit: YARN-8610. Fixed initiate upgrade error message. Contributed by Chandni Singh

2018-08-01 Thread eyang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 c8fbe7921 -> 680dac26d


YARN-8610.  Fixed initiate upgrade error message.
Contributed by Chandni Singh

(cherry picked from commit 23f394240e1568a38025e63e9dc0842e8c5235f7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/680dac26
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/680dac26
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/680dac26

Branch: refs/heads/branch-3.1
Commit: 680dac26d80ad863ebd6445a180d82a6876a0dfc
Parents: c8fbe79
Author: Eric Yang 
Authored: Wed Aug 1 20:41:43 2018 -0400
Committer: Eric Yang 
Committed: Wed Aug 1 20:43:22 2018 -0400

--
 .../java/org/apache/hadoop/yarn/service/client/ServiceClient.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/680dac26/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
index b5773a6..2d1f1b2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
@@ -253,7 +253,7 @@ public class ServiceClient extends AppAdminClient 
implements SliderExitCodes,
 if (!liveService.getState().equals(ServiceState.STABLE)) {
   String message = service.getName() + " is at " +
   liveService.getState()
-  + " state, upgrade can not be invoked when service is STABLE.";
+  + " state and upgrade can only be initiated when service is STABLE.";
   LOG.error(message);
   throw new YarnException(message);
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8610. Fixed initiate upgrade error message. Contributed by Chandni Singh

2018-08-01 Thread eyang
Repository: hadoop
Updated Branches:
  refs/heads/trunk f2e29acbf -> 23f394240


YARN-8610.  Fixed initiate upgrade error message.
Contributed by Chandni Singh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/23f39424
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/23f39424
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/23f39424

Branch: refs/heads/trunk
Commit: 23f394240e1568a38025e63e9dc0842e8c5235f7
Parents: f2e29ac
Author: Eric Yang 
Authored: Wed Aug 1 20:41:43 2018 -0400
Committer: Eric Yang 
Committed: Wed Aug 1 20:41:43 2018 -0400

--
 .../java/org/apache/hadoop/yarn/service/client/ServiceClient.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/23f39424/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
index 4b67998..5668d9f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
@@ -257,7 +257,7 @@ public class ServiceClient extends AppAdminClient 
implements SliderExitCodes,
 if (!liveService.getState().equals(ServiceState.STABLE)) {
   String message = service.getName() + " is at " +
   liveService.getState()
-  + " state, upgrade can not be invoked when service is STABLE.";
+  + " state and upgrade can only be initiated when service is STABLE.";
   LOG.error(message);
   throw new YarnException(message);
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: Revert "Fix potential FSImage corruption. Contributed by Ekanth Sethuramalingam & Arpit Agarwal."

2018-08-01 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1.1 273aa4958 -> 2b9a8c1d3


Revert "Fix potential FSImage corruption. Contributed by Ekanth Sethuramalingam 
& Arpit Agarwal."

This reverts commit 53c7d82d539f1a4afcb37ebeaaa0a1a7c25fe942.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2b9a8c1d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2b9a8c1d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2b9a8c1d

Branch: refs/heads/branch-3.1.1
Commit: 2b9a8c1d3a2caf1e733d57f346af3ff0d5ba529c
Parents: 273aa49
Author: Arpit Agarwal 
Authored: Wed Aug 1 15:51:07 2018 -0700
Committer: Arpit Agarwal 
Committed: Wed Aug 1 15:51:07 2018 -0700

--
 .../server/namenode/AclEntryStatusFormat.java   |  6 +-
 .../namenode/INodeWithAdditionalFields.java |  4 +-
 .../hdfs/server/namenode/XAttrFormat.java   | 67 +++-
 3 files changed, 28 insertions(+), 49 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b9a8c1d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java
index 2c5b23b..82aa214 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java
@@ -38,8 +38,7 @@ import com.google.common.collect.ImmutableList;
  * [1:3) -- the type of the entry (AclEntryType) 
  * [3:6) -- the permission of the entry (FsAction) 
  * [6:7) -- A flag to indicate whether Named entry or not 
- * [7:8) -- Reserved 
- * [8:32) -- the name of the entry, which is an ID that points to a 
+ * [7:32) -- the name of the entry, which is an ID that points to a 
  * string in the StringTableSection. 
  */
 public enum AclEntryStatusFormat {
@@ -48,8 +47,7 @@ public enum AclEntryStatusFormat {
   TYPE(SCOPE.BITS, 2),
   PERMISSION(TYPE.BITS, 3),
   NAMED_ENTRY_CHECK(PERMISSION.BITS, 1),
-  RESERVED(NAMED_ENTRY_CHECK.BITS, 1),
-  NAME(RESERVED.BITS, 24);
+  NAME(NAMED_ENTRY_CHECK.BITS, 25);
 
   private final LongBitFormat BITS;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b9a8c1d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java
index 84d99e4..9adcc3e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java
@@ -35,8 +35,8 @@ public abstract class INodeWithAdditionalFields extends INode
 implements LinkedElement {
   enum PermissionStatusFormat {
 MODE(null, 16),
-GROUP(MODE.BITS, 24),
-USER(GROUP.BITS, 24);
+GROUP(MODE.BITS, 25),
+USER(GROUP.BITS, 23);
 
 final LongBitFormat BITS;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b9a8c1d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFormat.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFormat.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFormat.java
index f9f06db..7e704d0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFormat.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFormat.java
@@ -27,56 +27,25 @@ import org.apache.hadoop.hdfs.XAttrHelper;
 
 import com.google.common.base.Preconditions;
 import com.google.common.primitives.Ints;
-import org.apache.hadoop.hdfs.util.LongBitFormat;
 
 /**
  * Class to pack XAttrs into byte[].
  * For each XAttr:
  *   The first 4 bytes represents XAttr namespace and name
  * [0:3)  - XAttr namespace
- * [3:8) - Reserved
- * [8:32) - The name of the entry, which is an ID that points to a
+ * [3:32) - The name of the entry, which is an ID that points to a
  *  string in map
  *  

[2/3] hadoop git commit: HADOOP-15476. fix logging for split-dns multihome . Contributed by Ajay Kumar.

2018-08-01 Thread arp
HADOOP-15476. fix logging for split-dns multihome . Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c8fbe792
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c8fbe792
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c8fbe792

Branch: refs/heads/branch-3.1
Commit: c8fbe7921b58737286e968346ec75b2ee179f93d
Parents: 62cc373
Author: Arpit Agarwal 
Authored: Wed Aug 1 12:32:01 2018 -0700
Committer: Arpit Agarwal 
Committed: Wed Aug 1 12:32:13 2018 -0700

--
 .../src/main/java/org/apache/hadoop/ipc/Client.java| 6 ++
 1 file changed, 2 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8fbe792/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 163e80d..e147048 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -677,7 +677,8 @@ public class Client implements AutoCloseable {
 this.socket.setReuseAddress(true);
 localAddr = NetUtils.bindToLocalAddress(localAddr,
 bindToWildCardAddress);
-LOG.debug("Binding {} to {}", principal, localAddr);
+LOG.debug("Binding {} to {}", principal,
+(bindToWildCardAddress) ? "0.0.0.0" : localAddr);
 this.socket.bind(new InetSocketAddress(localAddr, 0));
   }
 }
@@ -1281,9 +1282,6 @@ public class Client implements AutoCloseable {
 this.bindToWildCardAddress = conf
 .getBoolean(CommonConfigurationKeys.IPC_CLIENT_BIND_WILDCARD_ADDR_KEY,
 CommonConfigurationKeys.IPC_CLIENT_BIND_WILDCARD_ADDR_DEFAULT);
-LOG.debug("{} set to true. Will bind client sockets to wildcard "
-+ "address.",
-CommonConfigurationKeys.IPC_CLIENT_BIND_WILDCARD_ADDR_KEY);
 
 this.clientId = ClientId.getClientId();
 this.sendParamsExecutor = clientExcecutorFactory.refAndGetInstance();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[3/3] hadoop git commit: HADOOP-15476. fix logging for split-dns multihome . Contributed by Ajay Kumar.

2018-08-01 Thread arp
HADOOP-15476. fix logging for split-dns multihome . Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/273aa495
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/273aa495
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/273aa495

Branch: refs/heads/branch-3.1.1
Commit: 273aa4958dd1d6ca5266994a57126095a6914f61
Parents: 551c425
Author: Arpit Agarwal 
Authored: Wed Aug 1 12:32:01 2018 -0700
Committer: Arpit Agarwal 
Committed: Wed Aug 1 12:33:27 2018 -0700

--
 .../src/main/java/org/apache/hadoop/ipc/Client.java| 6 ++
 1 file changed, 2 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/273aa495/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 163e80d..e147048 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -677,7 +677,8 @@ public class Client implements AutoCloseable {
 this.socket.setReuseAddress(true);
 localAddr = NetUtils.bindToLocalAddress(localAddr,
 bindToWildCardAddress);
-LOG.debug("Binding {} to {}", principal, localAddr);
+LOG.debug("Binding {} to {}", principal,
+(bindToWildCardAddress) ? "0.0.0.0" : localAddr);
 this.socket.bind(new InetSocketAddress(localAddr, 0));
   }
 }
@@ -1281,9 +1282,6 @@ public class Client implements AutoCloseable {
 this.bindToWildCardAddress = conf
 .getBoolean(CommonConfigurationKeys.IPC_CLIENT_BIND_WILDCARD_ADDR_KEY,
 CommonConfigurationKeys.IPC_CLIENT_BIND_WILDCARD_ADDR_DEFAULT);
-LOG.debug("{} set to true. Will bind client sockets to wildcard "
-+ "address.",
-CommonConfigurationKeys.IPC_CLIENT_BIND_WILDCARD_ADDR_KEY);
 
 this.clientId = ClientId.getClientId();
 this.sendParamsExecutor = clientExcecutorFactory.refAndGetInstance();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/3] hadoop git commit: HADOOP-15476. fix logging for split-dns multihome . Contributed by Ajay Kumar.

2018-08-01 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 62cc373dc -> c8fbe7921
  refs/heads/branch-3.1.1 551c4250b -> 273aa4958
  refs/heads/trunk 603a57476 -> f2e29acbf


HADOOP-15476. fix logging for split-dns multihome . Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f2e29acb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f2e29acb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f2e29acb

Branch: refs/heads/trunk
Commit: f2e29acbfa0b7e1fcecbdcf3e791c96114b456a5
Parents: 603a574
Author: Arpit Agarwal 
Authored: Wed Aug 1 12:32:01 2018 -0700
Committer: Arpit Agarwal 
Committed: Wed Aug 1 12:32:01 2018 -0700

--
 .../src/main/java/org/apache/hadoop/ipc/Client.java| 6 ++
 1 file changed, 2 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2e29acb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 163e80d..e147048 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -677,7 +677,8 @@ public class Client implements AutoCloseable {
 this.socket.setReuseAddress(true);
 localAddr = NetUtils.bindToLocalAddress(localAddr,
 bindToWildCardAddress);
-LOG.debug("Binding {} to {}", principal, localAddr);
+LOG.debug("Binding {} to {}", principal,
+(bindToWildCardAddress) ? "0.0.0.0" : localAddr);
 this.socket.bind(new InetSocketAddress(localAddr, 0));
   }
 }
@@ -1281,9 +1282,6 @@ public class Client implements AutoCloseable {
 this.bindToWildCardAddress = conf
 .getBoolean(CommonConfigurationKeys.IPC_CLIENT_BIND_WILDCARD_ADDR_KEY,
 CommonConfigurationKeys.IPC_CLIENT_BIND_WILDCARD_ADDR_DEFAULT);
-LOG.debug("{} set to true. Will bind client sockets to wildcard "
-+ "address.",
-CommonConfigurationKeys.IPC_CLIENT_BIND_WILDCARD_ADDR_KEY);
 
 this.clientId = ClientId.getClientId();
 this.sendParamsExecutor = clientExcecutorFactory.refAndGetInstance();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13421. [PROVIDED Phase 2] Implement DNA_BACKUP command in Datanode. Contributed by Ewan Higgs.

2018-08-01 Thread virajith
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-12090 d52a2afbf -> bcd1aaab3


HDFS-13421. [PROVIDED Phase 2] Implement DNA_BACKUP command in Datanode. 
Contributed by Ewan Higgs.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bcd1aaab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bcd1aaab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bcd1aaab

Branch: refs/heads/HDFS-12090
Commit: bcd1aaab3d84deee9ba020a143a1e3dcf95cfce4
Parents: d52a2af
Author: Virajith Jalaparti 
Authored: Wed Aug 1 12:13:31 2018 -0700
Committer: Virajith Jalaparti 
Committed: Wed Aug 1 12:13:31 2018 -0700

--
 .../apache/hadoop/hdfs/BlockInputStream.java|  52 
 .../hdfs/server/datanode/BPOfferService.java|   6 +
 .../hadoop/hdfs/server/datanode/DataNode.java   |  20 ++-
 .../SyncServiceSatisfierDatanodeWorker.java |  97 +++
 .../SyncTaskExecutionFeedbackCollector.java |  54 
 .../executor/BlockSyncOperationExecutor.java| 122 +++
 .../executor/BlockSyncReaderFactory.java|  92 ++
 .../executor/BlockSyncTaskRunner.java   |  69 +++
 .../hadoop/fs/TestHDFSMultipartUploader.java|   3 +-
 .../hadoop/hdfs/TestBlockInputStream.java   |  84 +
 .../TestBlockSyncOperationExecutor.java |  94 ++
 11 files changed, 690 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcd1aaab/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockInputStream.java
new file mode 100644
index 000..152f83e
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockInputStream.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+import java.io.InputStream;
+
+/**
+ * Facade around BlockReader that indeed implements the InputStream interface.
+ */
+public class BlockInputStream extends InputStream {
+  private final BlockReader blockReader;
+
+  public BlockInputStream(BlockReader blockReader) {
+this.blockReader = blockReader;
+  }
+
+  @Override
+  public int read() throws IOException {
+byte[] b = new byte[1];
+int c = blockReader.read(b, 0, b.length);
+if (c > 0) {
+  return b[0];
+} else {
+  return -1;
+}
+  }
+
+  @Override
+  public int read(byte b[], int off, int len) throws IOException {
+return blockReader.read(b, off, len);
+  }
+
+  @Override
+  public long skip(long n) throws IOException {
+return blockReader.skip(n);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcd1aaab/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
index a25f6a9..b8eef5e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
@@ -795,6 +795,12 @@ class BPOfferService {
   ((BlockECReconstructionCommand) cmd).getECTasks();
   dn.getErasureCodingWorker().processErasureCodingTasks(ecTasks);
   break;
+case DatanodeProtocol.DNA_BACKUP:
+  LOG.info("DatanodeCommand action: DNA_BACKUP");
+  Collection backupTasks =
+  ((SyncCommand) cmd).getSyncTasks();
+  dn.getSyncServiceSatisfierData

hadoop git commit: YARN-8600. RegistryDNS hang when remote lookup does not reply. Contributed by Eric Yang

2018-08-01 Thread skumpf
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 2a94823f3 -> 62cc373dc


YARN-8600. RegistryDNS hang when remote lookup does not reply. Contributed by 
Eric Yang

(cherry picked from commit 603a57476ce0bf9514f0432a235f29432ca4c323)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/62cc373d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/62cc373d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/62cc373d

Branch: refs/heads/branch-3.1
Commit: 62cc373dc53d79461de5540822d61686dcaf8c6c
Parents: 2a94823
Author: Shane Kumpf 
Authored: Wed Aug 1 12:22:01 2018 -0600
Committer: Shane Kumpf 
Committed: Wed Aug 1 12:31:28 2018 -0600

--
 .../hadoop/registry/server/dns/LookupTask.java  | 39 
 .../hadoop/registry/server/dns/RegistryDNS.java | 21 ---
 .../registry/server/dns/TestRegistryDNS.java|  8 
 3 files changed, 63 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/62cc373d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/LookupTask.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/LookupTask.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/LookupTask.java
new file mode 100644
index 000..c2fc4a9
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/LookupTask.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.registry.server.dns;
+
+import java.util.concurrent.Callable;
+
+import org.xbill.DNS.Lookup;
+import org.xbill.DNS.Name;
+import org.xbill.DNS.Record;
+
+public class LookupTask implements Callable {
+
+  private Name name;
+  private int type;
+
+  public LookupTask(Name name, int type) {
+this.name = name;
+this.type = type;
+  }
+
+  @Override
+  public Record[] call() throws Exception {
+return new Lookup(name, type).run();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62cc373d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java
index 0022843..52e49a3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java
@@ -99,9 +99,13 @@ import java.util.Properties;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
 import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
@@ -941,7 +945,7 @@ public class RegistryDNS extends AbstractService implements 
DNSOperations,
* @param portlocal port.
* @throws IOException if the UDP processing fails.
*/
-  private void serveNIOUDP(DatagramChannel channel,
+  private synchronized void serveNIOUDP(DatagramChannel channel,
   InetAddress addr, int port) throws Exception {
 SocketAddress remoteAddress = null;
 try {
@@ -11

hadoop git commit: YARN-8600. RegistryDNS hang when remote lookup does not reply. Contributed by Eric Yang

2018-08-01 Thread skumpf
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1.1 a2a1d329a -> 551c4250b


YARN-8600. RegistryDNS hang when remote lookup does not reply. Contributed by 
Eric Yang

(cherry picked from commit 603a57476ce0bf9514f0432a235f29432ca4c323)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/551c4250
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/551c4250
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/551c4250

Branch: refs/heads/branch-3.1.1
Commit: 551c4250bd333c177ea9ef48398928ccaf1c7f05
Parents: a2a1d32
Author: Shane Kumpf 
Authored: Wed Aug 1 12:22:01 2018 -0600
Committer: Shane Kumpf 
Committed: Wed Aug 1 12:25:30 2018 -0600

--
 .../hadoop/registry/server/dns/LookupTask.java  | 39 
 .../hadoop/registry/server/dns/RegistryDNS.java | 21 ---
 .../registry/server/dns/TestRegistryDNS.java|  8 
 3 files changed, 63 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/551c4250/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/LookupTask.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/LookupTask.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/LookupTask.java
new file mode 100644
index 000..c2fc4a9
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/LookupTask.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.registry.server.dns;
+
+import java.util.concurrent.Callable;
+
+import org.xbill.DNS.Lookup;
+import org.xbill.DNS.Name;
+import org.xbill.DNS.Record;
+
+public class LookupTask implements Callable {
+
+  private Name name;
+  private int type;
+
+  public LookupTask(Name name, int type) {
+this.name = name;
+this.type = type;
+  }
+
+  @Override
+  public Record[] call() throws Exception {
+return new Lookup(name, type).run();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/551c4250/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java
index 0022843..52e49a3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java
@@ -99,9 +99,13 @@ import java.util.Properties;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
 import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
@@ -941,7 +945,7 @@ public class RegistryDNS extends AbstractService implements 
DNSOperations,
* @param portlocal port.
* @throws IOException if the UDP processing fails.
*/
-  private void serveNIOUDP(DatagramChannel channel,
+  private synchronized void serveNIOUDP(DatagramChannel channel,
   InetAddress addr, int port) throws Exception {
 SocketAddress remoteAddress = null;
 try {
@@

hadoop git commit: YARN-8600. RegistryDNS hang when remote lookup does not reply. Contributed by Eric Yang

2018-08-01 Thread skumpf
Repository: hadoop
Updated Branches:
  refs/heads/trunk 67c65da26 -> 603a57476


YARN-8600. RegistryDNS hang when remote lookup does not reply. Contributed by 
Eric Yang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/603a5747
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/603a5747
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/603a5747

Branch: refs/heads/trunk
Commit: 603a57476ce0bf9514f0432a235f29432ca4c323
Parents: 67c65da
Author: Shane Kumpf 
Authored: Wed Aug 1 12:22:01 2018 -0600
Committer: Shane Kumpf 
Committed: Wed Aug 1 12:22:01 2018 -0600

--
 .../hadoop/registry/server/dns/LookupTask.java  | 39 
 .../hadoop/registry/server/dns/RegistryDNS.java | 21 ---
 .../registry/server/dns/TestRegistryDNS.java|  8 
 3 files changed, 63 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/603a5747/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/LookupTask.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/LookupTask.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/LookupTask.java
new file mode 100644
index 000..c2fc4a9
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/LookupTask.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.registry.server.dns;
+
+import java.util.concurrent.Callable;
+
+import org.xbill.DNS.Lookup;
+import org.xbill.DNS.Name;
+import org.xbill.DNS.Record;
+
+public class LookupTask implements Callable {
+
+  private Name name;
+  private int type;
+
+  public LookupTask(Name name, int type) {
+this.name = name;
+this.type = type;
+  }
+
+  @Override
+  public Record[] call() throws Exception {
+return new Lookup(name, type).run();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/603a5747/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java
index 0022843..52e49a3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java
@@ -99,9 +99,13 @@ import java.util.Properties;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
 import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
@@ -941,7 +945,7 @@ public class RegistryDNS extends AbstractService implements 
DNSOperations,
* @param portlocal port.
* @throws IOException if the UDP processing fails.
*/
-  private void serveNIOUDP(DatagramChannel channel,
+  private synchronized void serveNIOUDP(DatagramChannel channel,
   InetAddress addr, int port) throws Exception {
 SocketAddress remoteAddress = null;
 try {
@@ -1177,13 +1181,20 @@ public class RegistryDNS extends AbstractService 
implements D

[2/2] hadoop git commit: Setting version to 3.1.1

2018-08-01 Thread wangda
Setting version to 3.1.1


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a2a1d329
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a2a1d329
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a2a1d329

Branch: refs/heads/branch-3.1.1
Commit: a2a1d329a99d6bbcc9631a72848dc75cd372650d
Parents: e4f530a
Author: Wangda Tan 
Authored: Wed Aug 1 11:03:57 2018 -0700
Committer: Wangda Tan 
Committed: Wed Aug 1 11:03:57 2018 -0700

--
 hadoop-assemblies/pom.xml| 4 ++--
 hadoop-build-tools/pom.xml   | 2 +-
 hadoop-client-modules/hadoop-client-api/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-check-invariants/pom.xml | 4 ++--
 .../hadoop-client-check-test-invariants/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-integration-tests/pom.xml| 4 ++--
 hadoop-client-modules/hadoop-client-minicluster/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-runtime/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client/pom.xml  | 4 ++--
 hadoop-client-modules/pom.xml| 2 +-
 hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml| 4 ++--
 hadoop-cloud-storage-project/pom.xml | 4 ++--
 hadoop-common-project/hadoop-annotations/pom.xml | 4 ++--
 hadoop-common-project/hadoop-auth-examples/pom.xml   | 4 ++--
 hadoop-common-project/hadoop-auth/pom.xml| 4 ++--
 hadoop-common-project/hadoop-common/pom.xml  | 4 ++--
 hadoop-common-project/hadoop-kms/pom.xml | 4 ++--
 hadoop-common-project/hadoop-minikdc/pom.xml | 4 ++--
 hadoop-common-project/hadoop-nfs/pom.xml | 4 ++--
 hadoop-common-project/pom.xml| 4 ++--
 hadoop-dist/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml| 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client-common/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml | 4 ++--
 .../hadoop-mapreduce-client-hs-plugins/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-jobclient/pom.xml| 4 ++--
 .../hadoop-mapreduce-client-nativetask/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-shuffle/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client-uploader/pom.xml | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml   | 4 ++--
 hadoop-mapreduce-project/pom.xml | 4 ++--
 hadoop-maven-plugins/pom.xml | 2 +-
 hadoop-minicluster/pom.xml   | 4 ++--
 hadoop-project-dist/pom.xml  | 4 ++--
 hadoop-project/pom.xml   | 4 ++--
 hadoop-tools/hadoop-aliyun/pom.xml   | 2 +-
 hadoop-tools/hadoop-archive-logs/pom.xml | 4 ++--
 hadoop-tools/hadoop-archives/pom.xml | 4 ++--
 hadoop-tools/hadoop-aws/pom.xml  | 4 ++--
 hadoop-tools/hadoop-azure-datalake/pom.xml   | 2 +-
 hadoop-tools/hadoop-azure/pom.xml| 2 +-
 hadoop-tools/hadoop-datajoin/pom.xml | 4 ++--
 hadoop-tools/hadoop-distcp/pom.xml   | 4 ++--
 hadoop-tools/hadoop-extras/pom.xml   | 4 ++--
 hadoop-tools/hadoop-fs2img/pom.xml   | 4 ++--
 hadoop-tools/hadoop-gridmix/pom.xml  | 4 ++--
 hadoop-tools/hadoop-kafka/pom.xml| 4 ++--
 hadoop-tools/hadoop-openstack/pom.xml| 4 ++--
 hadoop-tools/hadoop-pipes/pom.xml| 4 ++--
 hadoop-tools/hadoop-resourceestimator/pom.xml| 2 +-
 had

[1/2] hadoop git commit: Setting version to 3.1.1

2018-08-01 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1.1 e4f530a9a -> a2a1d329a


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2a1d329/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-2/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-2/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-2/pom.xml
index 21610e8..707deba 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-2/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-2/pom.xml
@@ -22,13 +22,13 @@
   
 hadoop-yarn-server-timelineservice-hbase-server
 org.apache.hadoop
-3.1.1-SNAPSHOT
+3.1.1
   
   4.0.0
 
   hadoop-yarn-server-timelineservice-hbase-server-2
   Apache Hadoop YARN TimelineService HBase Server 2.0
-  3.1.1-SNAPSHOT
+  3.1.1
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2a1d329/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/pom.xml
index 9d045ca..60a3e07 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/pom.xml
@@ -22,12 +22,12 @@
   
 hadoop-yarn-server-timelineservice-hbase
 org.apache.hadoop
-3.1.1-SNAPSHOT
+3.1.1
   
   4.0.0
 
   hadoop-yarn-server-timelineservice-hbase-server
-  3.1.1-SNAPSHOT
+  3.1.1
   Apache Hadoop YARN TimelineService HBase Servers
   pom
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2a1d329/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/pom.xml
index 24a3f34..c571ce8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/pom.xml
@@ -22,12 +22,12 @@
   
 hadoop-yarn-server
 org.apache.hadoop
-3.1.1-SNAPSHOT
+3.1.1
   
 
   4.0.0
   hadoop-yarn-server-timelineservice-hbase
-  3.1.1-SNAPSHOT
+  3.1.1
   Apache Hadoop YARN TimelineService HBase Backend
   pom
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2a1d329/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml
index fe0c786..21b9370 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/pom.xml
@@ -22,11 +22,11 @@
   
 hadoop-yarn-server
 org.apache.hadoop
-3.1.1-SNAPSHOT
+3.1.1
   
   4.0.0
   hadoop-yarn-server-timelineservice
-  3.1.1-SNAPSHOT
+  3.1.1
   Apache Hadoop YARN Timeline Service
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2a1d329/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-se

[06/50] hadoop git commit: HDDS-201. Add name for LeaseManager. Contributed by Sandeep Nemuri.

2018-08-01 Thread xkrogen
HDDS-201. Add name for LeaseManager. Contributed by Sandeep Nemuri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a1922959
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a1922959
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a1922959

Branch: refs/heads/HDFS-12943
Commit: a19229594e48fad9f50dbdb1f0b2fcbf7443ce66
Parents: 9089790
Author: Nanda kumar 
Authored: Thu Jul 26 19:00:23 2018 +0530
Committer: Nanda kumar 
Committed: Thu Jul 26 19:00:23 2018 +0530

--
 .../apache/hadoop/ozone/lease/LeaseManager.java | 14 -
 .../hadoop/ozone/lease/TestLeaseManager.java| 21 ++--
 .../hdds/server/events/TestEventWatcher.java|  2 +-
 .../hdds/scm/container/ContainerMapping.java|  4 ++--
 .../hdds/scm/pipelines/PipelineSelector.java|  4 ++--
 .../scm/server/StorageContainerManager.java |  3 ++-
 .../replication/TestReplicationManager.java |  4 ++--
 7 files changed, 28 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1922959/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java
index b8390dd..756a41a 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java
@@ -42,6 +42,7 @@ public class LeaseManager {
   private static final Logger LOG =
   LoggerFactory.getLogger(LeaseManager.class);
 
+  private final String name;
   private final long defaultTimeout;
   private Map> activeLeases;
   private LeaseMonitor leaseMonitor;
@@ -51,10 +52,13 @@ public class LeaseManager {
   /**
* Creates an instance of lease manager.
*
+   * @param name
+   *Name for the LeaseManager instance.
* @param defaultTimeout
*Default timeout in milliseconds to be used for lease creation.
*/
-  public LeaseManager(long defaultTimeout) {
+  public LeaseManager(String name, long defaultTimeout) {
+this.name = name;
 this.defaultTimeout = defaultTimeout;
   }
 
@@ -62,11 +66,11 @@ public class LeaseManager {
* Starts the lease manager service.
*/
   public void start() {
-LOG.debug("Starting LeaseManager service");
+LOG.debug("Starting {} LeaseManager service", name);
 activeLeases = new ConcurrentHashMap<>();
 leaseMonitor = new LeaseMonitor();
 leaseMonitorThread = new Thread(leaseMonitor);
-leaseMonitorThread.setName("LeaseManager#LeaseMonitor");
+leaseMonitorThread.setName(name + "-LeaseManager#LeaseMonitor");
 leaseMonitorThread.setDaemon(true);
 leaseMonitorThread.setUncaughtExceptionHandler((thread, throwable) -> {
   // Let us just restart this thread after logging an error.
@@ -75,7 +79,7 @@ public class LeaseManager {
   thread.toString(), throwable);
   leaseMonitorThread.start();
 });
-LOG.debug("Starting LeaseManager#LeaseMonitor Thread");
+LOG.debug("Starting {}-LeaseManager#LeaseMonitor Thread", name);
 leaseMonitorThread.start();
 isRunning = true;
   }
@@ -203,7 +207,7 @@ public class LeaseManager {
 @Override
 public void run() {
   while(monitor) {
-LOG.debug("LeaseMonitor: checking for lease expiry");
+LOG.debug("{}-LeaseMonitor: checking for lease expiry", name);
 long sleepTime = Long.MAX_VALUE;
 
 for (T resource : activeLeases.keySet()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1922959/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java
--
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java
index 517c1a7..bdc70fc 100644
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java
@@ -67,7 +67,7 @@ public class TestLeaseManager {
   public void testLeaseAcquireAndRelease() throws LeaseException {
 //It is assumed that the test case execution won't take more than 5 
seconds,
 //if it takes more time increase the defaultTimeout value of LeaseManager.
-LeaseManager manager = new LeaseManager<>(5000);
+LeaseManager manager = new LeaseManager<>("Test", 5000);
 manager.start();
 DummyResource resourceOne = new DummyResource("one");
 DummyResource resourceTwo =

[42/50] hadoop git commit: HDFS-13322 fuse dfs - uid persists when switching between ticket caches. Contributed by Istvan Fajth.

2018-08-01 Thread xkrogen
HDFS-13322 fuse dfs - uid persists when switching between ticket caches.  
Contributed by Istvan Fajth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/40f9b0c5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/40f9b0c5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/40f9b0c5

Branch: refs/heads/HDFS-12943
Commit: 40f9b0c5c13f40921b6976589543a04efa489f93
Parents: c835fc0
Author: Aaron Fabbri 
Authored: Tue Jul 31 15:21:38 2018 -0700
Committer: Aaron Fabbri 
Committed: Tue Jul 31 18:44:49 2018 -0700

--
 .../src/main/native/fuse-dfs/fuse_connect.c| 17 +
 1 file changed, 13 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/40f9b0c5/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_connect.c
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_connect.c
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_connect.c
index 6ee4ad5..f08917a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_connect.c
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_connect.c
@@ -192,7 +192,7 @@ int fuseConnectInit(const char *nnUri, int port)
 }
 
 /**
- * Compare two libhdfs connections by username
+ * Compare two libhdfs connections by username and Kerberos ticket cache path
  *
  * @param aThe first libhdfs connection
  * @param bThe second libhdfs connection
@@ -201,22 +201,26 @@ int fuseConnectInit(const char *nnUri, int port)
  */
 static int hdfsConnCompare(const struct hdfsConn *a, const struct hdfsConn *b)
 {
-  return strcmp(a->usrname, b->usrname);
+  int rc = strcmp(a->usrname, b->usrname);
+  if (rc) return rc;
+  return gHdfsAuthConf == AUTH_CONF_KERBEROS && strcmp(a->kpath, b->kpath);
 }
 
 /**
  * Find a libhdfs connection by username
  *
  * @param usrname The username to look up
+ * @param kpath   The Kerberos ticket cache file path
  *
  * @returnThe connection, or NULL if none could be found
  */
-static struct hdfsConn* hdfsConnFind(const char *usrname)
+static struct hdfsConn* hdfsConnFind(const char *usrname, const char *kpath)
 {
   struct hdfsConn exemplar;
 
   memset(&exemplar, 0, sizeof(exemplar));
   exemplar.usrname = (char*)usrname;
+  exemplar.kpath = (char*)kpath;
   return RB_FIND(hdfsConnTree, &gConnTree, &exemplar);
 }
 
@@ -542,8 +546,13 @@ static int fuseConnect(const char *usrname, struct 
fuse_context *ctx,
   int ret;
   struct hdfsConn* conn;
 
+  char kpath[PATH_MAX] = { 0 };
+  if (gHdfsAuthConf == AUTH_CONF_KERBEROS) {
+findKerbTicketCachePath(ctx, kpath, sizeof(kpath));
+  }
+
   pthread_mutex_lock(&gConnMutex);
-  conn = hdfsConnFind(usrname);
+  conn = hdfsConnFind(usrname, kpath);
   if (!conn) {
 ret = fuseNewConnect(usrname, ctx, &conn);
 if (ret) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[03/50] hadoop git commit: HDFS-11060. make DEFAULT_MAX_CORRUPT_FILEBLOCKS_RETURNED configurable. Contributed by Lantao Jin.

2018-08-01 Thread xkrogen
HDFS-11060. make DEFAULT_MAX_CORRUPT_FILEBLOCKS_RETURNED configurable. 
Contributed by Lantao Jin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e95c5e9f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e95c5e9f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e95c5e9f

Branch: refs/heads/HDFS-12943
Commit: e95c5e9f62452ee848875ec2f8642eab4992cd23
Parents: 9485c9a
Author: Wei-Chiu Chuang 
Authored: Wed Jul 25 11:04:18 2018 -0700
Committer: Wei-Chiu Chuang 
Committed: Wed Jul 25 11:04:18 2018 -0700

--
 .../src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java | 3 +++
 .../apache/hadoop/hdfs/server/namenode/FSNamesystem.java| 8 ++--
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml | 9 +
 .../hdfs/server/namenode/TestListCorruptFileBlocks.java | 2 +-
 4 files changed, 19 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e95c5e9f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index cc902b0..5a1266c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -241,6 +241,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final int DFS_NAMENODE_MAINTENANCE_REPLICATION_MIN_DEFAULT
   = 1;
 
+  public static final String  
DFS_NAMENODE_MAX_CORRUPT_FILE_BLOCKS_RETURNED_KEY = 
"dfs.namenode.max-corrupt-file-blocks-returned";
+  public static final int 
DFS_NAMENODE_MAX_CORRUPT_FILE_BLOCKS_RETURNED_DEFAULT = 100;
+
   public static final String  DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY =
   
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY;
   public static final int DFS_NAMENODE_REPLICATION_MAX_STREAMS_DEFAULT = 2;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e95c5e9f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 66bc567..8c95f7d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -425,7 +425,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   public static final Log auditLog = LogFactory.getLog(
   FSNamesystem.class.getName() + ".audit");
 
-  static final int DEFAULT_MAX_CORRUPT_FILEBLOCKS_RETURNED = 100;
+  private final int maxCorruptFileBlocksReturn;
   static int BLOCK_DELETION_INCREMENT = 1000;
   private final boolean isPermissionEnabled;
   private final UserGroupInformation fsOwner;
@@ -831,6 +831,10 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   DFSConfigKeys.DFS_NAMENODE_FILE_CLOSE_NUM_COMMITTED_ALLOWED_KEY,
   DFSConfigKeys.DFS_NAMENODE_FILE_CLOSE_NUM_COMMITTED_ALLOWED_DEFAULT);
 
+  this.maxCorruptFileBlocksReturn = conf.getInt(
+  DFSConfigKeys.DFS_NAMENODE_MAX_CORRUPT_FILE_BLOCKS_RETURNED_KEY,
+  DFSConfigKeys.DFS_NAMENODE_MAX_CORRUPT_FILE_BLOCKS_RETURNED_DEFAULT);
+
   this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf);
   
   this.standbyShouldCheckpoint = conf.getBoolean(
@@ -5508,7 +5512,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   if (src.startsWith(path)){
 corruptFiles.add(new CorruptFileBlockInfo(src, blk));
 count++;
-if (count >= DEFAULT_MAX_CORRUPT_FILEBLOCKS_RETURNED)
+if (count >= maxCorruptFileBlocksReturn)
   break;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e95c5e9f/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 384cedf..a10be27 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdf

[46/50] hadoop git commit: YARN-8606. Opportunistic scheduling does not work post RM failover. Contributed by Bibin A Chundatt.

2018-08-01 Thread xkrogen
YARN-8606. Opportunistic scheduling does not work post RM failover. Contributed 
by Bibin A Chundatt.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a48a0cc7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a48a0cc7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a48a0cc7

Branch: refs/heads/HDFS-12943
Commit: a48a0cc7fd8e7ac1c07b260e6078077824f27c35
Parents: 5cc8e99
Author: Sunil G 
Authored: Wed Aug 1 12:17:18 2018 +0530
Committer: Sunil G 
Committed: Wed Aug 1 12:17:18 2018 +0530

--
 ...pportunisticContainerAllocatorAMService.java |  4 +-
 .../server/resourcemanager/ResourceManager.java | 37 ++--
 .../yarn/server/resourcemanager/TestRMHA.java   | 44 
 3 files changed, 72 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a48a0cc7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java
index 9b13627..15c2a89 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -417,7 +418,8 @@ public class OpportunisticContainerAllocatorAMService
 return nodeMonitor.getThresholdCalculator();
   }
 
-  private synchronized List getLeastLoadedNodes() {
+  @VisibleForTesting
+  synchronized List getLeastLoadedNodes() {
 long currTime = System.currentTimeMillis();
 if ((currTime - lastCacheUpdateTime > cacheRefreshInterval)
 || (cachedNodes == null)) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a48a0cc7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index 0b7e87c..f14d440 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -757,9 +757,11 @@ public class ResourceManager extends CompositeService 
implements Recoverable {
   }
 
   masterService = createApplicationMasterService();
+  createAndRegisterOpportunisticDispatcher(masterService);
   addService(masterService) ;
   rmContext.setApplicationMasterService(masterService);
 
+
   applicationACLsManager = new ApplicationACLsManager(conf);
 
   queueACLsManager = createQueueACLsManager(scheduler, conf);
@@ -807,6 +809,23 @@ public class ResourceManager extends CompositeService 
implements Recoverable {
   super.serviceInit(conf);
 }
 
+private void createAndRegisterOpportunisticDispatcher(
+ApplicationMasterService service) {
+  if (!isOpportunisticSchedulingEnabled(conf)) {
+return;
+  }
+  EventDispatcher oppContainerAllocEventDispatcher = new EventDispatcher(
+  (OpportunisticContainerAllocatorAMService) service,
+  OpportunisticContainerAllocatorAMService.class.getName());
+  // Add an event dispatcher for the
+  // OpportunisticContainerAllocatorAMService to handle node
+  // additions, updates and removals. Since th

[34/50] hadoop git commit: YARN-7974. Allow updating application tracking url after registration. Contributed by Jonathan Hung

2018-08-01 Thread xkrogen
YARN-7974. Allow updating application tracking url after registration. 
Contributed by Jonathan Hung


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3e06a5dc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3e06a5dc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3e06a5dc

Branch: refs/heads/HDFS-12943
Commit: 3e06a5dcea8224ba71aec284df23b47d536bb06d
Parents: ee53602
Author: Jonathan Hung 
Authored: Mon Jul 30 17:41:01 2018 -0700
Committer: Jonathan Hung 
Committed: Mon Jul 30 17:44:18 2018 -0700

--
 .../api/protocolrecords/AllocateRequest.java| 47 +++-
 .../src/main/proto/yarn_service_protos.proto|  1 +
 .../hadoop/yarn/client/api/AMRMClient.java  | 11 +++
 .../yarn/client/api/async/AMRMClientAsync.java  | 11 +++
 .../api/async/impl/AMRMClientAsyncImpl.java |  5 ++
 .../yarn/client/api/impl/AMRMClientImpl.java| 11 +++
 .../yarn/client/api/impl/TestAMRMClient.java| 77 
 .../impl/pb/AllocateRequestPBImpl.java  | 27 ++-
 .../resourcemanager/DefaultAMSProcessor.java|  2 +-
 .../rmapp/attempt/RMAppAttemptImpl.java | 20 +
 .../event/RMAppAttemptStatusupdateEvent.java| 11 +++
 .../TestApplicationMasterService.java   | 34 +
 .../server/resourcemanager/TestRMRestart.java   | 45 
 13 files changed, 298 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e06a5dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java
index eee50e3..799088b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java
@@ -73,7 +73,21 @@ public abstract class AllocateRequest {
 .releaseList(containersToBeReleased)
 .resourceBlacklistRequest(resourceBlacklistRequest).build();
   }
-  
+
+  @Public
+  @Unstable
+  public static AllocateRequest newInstance(int responseID, float appProgress,
+  List resourceAsk,
+  List containersToBeReleased,
+  ResourceBlacklistRequest resourceBlacklistRequest,
+  String trackingUrl) {
+return AllocateRequest.newBuilder().responseId(responseID)
+.progress(appProgress).askList(resourceAsk)
+.releaseList(containersToBeReleased)
+.resourceBlacklistRequest(resourceBlacklistRequest)
+.trackingUrl(trackingUrl).build();
+  }
+
   @Public
   @Unstable
   public static AllocateRequest newInstance(int responseID, float appProgress,
@@ -240,6 +254,22 @@ public abstract class AllocateRequest {
   List schedulingRequests) {
   }
 
+  /**
+   * Get the tracking url update for this heartbeat.
+   * @return tracking url to update this application with
+   */
+  @Public
+  @Unstable
+  public abstract String getTrackingUrl();
+
+  /**
+   * Set the new tracking url for this application.
+   * @param trackingUrl the new tracking url
+   */
+  @Public
+  @Unstable
+  public abstract void setTrackingUrl(String trackingUrl);
+
   @Public
   @Unstable
   public static AllocateRequestBuilder newBuilder() {
@@ -356,6 +386,19 @@ public abstract class AllocateRequest {
 }
 
 /**
+ * Set the trackingUrl of the request.
+ * @see AllocateRequest#setTrackingUrl(String)
+ * @param trackingUrl new tracking url
+ * @return {@link AllocateRequestBuilder}
+ */
+@Public
+@Unstable
+public AllocateRequestBuilder trackingUrl(String trackingUrl) {
+  allocateRequest.setTrackingUrl(trackingUrl);
+  return this;
+}
+
+/**
  * Return generated {@link AllocateRequest} object.
  * @return {@link AllocateRequest}
  */
@@ -365,4 +408,4 @@ public abstract class AllocateRequest {
   return allocateRequest;
 }
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e06a5dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
index 92a65ad..acd452d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/ha

[26/50] hadoop git commit: HADOOP-15607. AliyunOSS: fix duplicated partNumber issue in AliyunOSSBlockOutputStream. Contributed by Jinhu Wu.

2018-08-01 Thread xkrogen
HADOOP-15607. AliyunOSS: fix duplicated partNumber issue in 
AliyunOSSBlockOutputStream. Contributed by Jinhu Wu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0857f116
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0857f116
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0857f116

Branch: refs/heads/HDFS-12943
Commit: 0857f116b754d83d3c540cd6f989087af24fef27
Parents: 007e6f5
Author: Sammi Chen 
Authored: Mon Jul 30 10:53:44 2018 +0800
Committer: Sammi Chen 
Committed: Mon Jul 30 10:53:44 2018 +0800

--
 .../aliyun/oss/AliyunOSSBlockOutputStream.java  | 59 
 .../fs/aliyun/oss/AliyunOSSFileSystemStore.java |  2 +
 .../oss/TestAliyunOSSBlockOutputStream.java | 12 +++-
 3 files changed, 49 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0857f116/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSBlockOutputStream.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSBlockOutputStream.java
 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSBlockOutputStream.java
index 12d551b..0a833b2 100644
--- 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSBlockOutputStream.java
+++ 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSBlockOutputStream.java
@@ -33,7 +33,9 @@ import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 
@@ -50,7 +52,7 @@ public class AliyunOSSBlockOutputStream extends OutputStream {
   private boolean closed;
   private String key;
   private File blockFile;
-  private List blockFiles = new ArrayList<>();
+  private Map blockFiles = new HashMap<>();
   private long blockSize;
   private int blockId = 0;
   private long blockWritten = 0L;
@@ -94,8 +96,9 @@ public class AliyunOSSBlockOutputStream extends OutputStream {
 
 blockStream.flush();
 blockStream.close();
-if (!blockFiles.contains(blockFile)) {
-  blockFiles.add(blockFile);
+if (!blockFiles.values().contains(blockFile)) {
+  blockId++;
+  blockFiles.put(blockId, blockFile);
 }
 
 try {
@@ -107,7 +110,7 @@ public class AliyunOSSBlockOutputStream extends 
OutputStream {
   ListenableFuture partETagFuture =
   executorService.submit(() -> {
 PartETag partETag = store.uploadPart(blockFile, key, uploadId,
-blockId + 1);
+blockId);
 return partETag;
   });
   partETagsFutures.add(partETagFuture);
@@ -120,11 +123,7 @@ public class AliyunOSSBlockOutputStream extends 
OutputStream {
 store.completeMultipartUpload(key, uploadId, partETags);
   }
 } finally {
-  for (File tFile: blockFiles) {
-if (tFile.exists() && !tFile.delete()) {
-  LOG.warn("Failed to delete temporary file {}", tFile);
-}
-  }
+  removePartFiles();
   closed = true;
 }
   }
@@ -141,38 +140,52 @@ public class AliyunOSSBlockOutputStream extends 
OutputStream {
 if (closed) {
   throw new IOException("Stream closed.");
 }
-try {
-  blockStream.write(b, off, len);
-  blockWritten += len;
-  if (blockWritten >= blockSize) {
-uploadCurrentPart();
-blockWritten = 0L;
+blockStream.write(b, off, len);
+blockWritten += len;
+if (blockWritten >= blockSize) {
+  uploadCurrentPart();
+  blockWritten = 0L;
+}
+  }
+
+  private void removePartFiles() throws IOException {
+for (ListenableFuture partETagFuture : partETagsFutures) {
+  if (!partETagFuture.isDone()) {
+continue;
   }
-} finally {
-  for (File tFile: blockFiles) {
-if (tFile.exists() && !tFile.delete()) {
-  LOG.warn("Failed to delete temporary file {}", tFile);
+
+  try {
+File blockFile = blockFiles.get(partETagFuture.get().getPartNumber());
+if (blockFile != null && blockFile.exists() && !blockFile.delete()) {
+  LOG.warn("Failed to delete temporary file {}", blockFile);
 }
+  } catch (InterruptedException | ExecutionException e) {
+throw new IOException(e);
   }
 }
   }
 
   private void uploadCurrentPart() throws IOException {
-blockFiles.add(blockFile);
 blockStream.flush();
 blockStream.close();
 if (blockId == 0) {
   uploadId = store.getUploadId(key);
 }
+
+b

[40/50] hadoop git commit: YARN-8579. Recover NMToken of previous attempted component data. Contributed by Gour Saha

2018-08-01 Thread xkrogen
YARN-8579.  Recover NMToken of previous attempted component data.
Contributed by Gour Saha


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c7ebcd76
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c7ebcd76
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c7ebcd76

Branch: refs/heads/HDFS-12943
Commit: c7ebcd76bf3dd14127336951f2be3de772e7826a
Parents: 4b540bb
Author: Eric Yang 
Authored: Tue Jul 31 18:01:02 2018 -0400
Committer: Eric Yang 
Committed: Tue Jul 31 18:01:02 2018 -0400

--
 .../hadoop/yarn/service/ServiceScheduler.java |  1 +
 .../scheduler/SchedulerApplicationAttempt.java|  3 ++-
 .../scheduler/fair/FairScheduler.java |  8 ++--
 .../applicationsmanager/TestAMRestart.java| 18 ++
 4 files changed, 23 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7ebcd76/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
index cfaf356..0801ad0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
@@ -649,6 +649,7 @@ public class ServiceScheduler extends CompositeService {
 @Override
 public void onContainersReceivedFromPreviousAttempts(
 List containers) {
+  LOG.info("Containers recovered after AM registered: {}", containers);
   if (containers == null || containers.isEmpty()) {
 return;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7ebcd76/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
index dd6d38f..f9df2b8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
@@ -785,6 +785,7 @@ public class SchedulerApplicationAttempt implements 
SchedulableEntity {
   List returnContainerList = new ArrayList<>
   (recoveredPreviousAttemptContainers);
   recoveredPreviousAttemptContainers.clear();
+  updateNMTokens(returnContainerList);
   return returnContainerList;
 } finally {
   writeLock.unlock();
@@ -1466,4 +1467,4 @@ public class SchedulerApplicationAttempt implements 
SchedulableEntity {
   public Map getApplicationSchedulingEnvs() {
 return this.applicationSchedulingEnvs;
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7ebcd76/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index 20d1afe..037cebf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/ha

[14/50] hadoop git commit: HDDS-270. Move generic container util functions to ContianerUtils. Contributed by Hanisha Koneru.

2018-08-01 Thread xkrogen
HDDS-270. Move generic container util functions to ContianerUtils.
Contributed by Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3cc7ce81
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3cc7ce81
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3cc7ce81

Branch: refs/heads/HDFS-12943
Commit: 3cc7ce816e4ffb7287aa05cc5e00b2e058b4a2a4
Parents: 64e739e
Author: Anu Engineer 
Authored: Fri Jul 27 07:12:21 2018 -0700
Committer: Anu Engineer 
Committed: Fri Jul 27 07:12:21 2018 -0700

--
 .../common/helpers/ContainerUtils.java  | 34 
 .../container/common/interfaces/Container.java  |  6 
 .../container/keyvalue/KeyValueContainer.java   | 22 +
 .../helpers/KeyValueContainerLocationUtil.java  | 17 --
 .../container/ozoneimpl/ContainerReader.java| 27 ++--
 .../keyvalue/TestKeyValueContainer.java | 24 --
 .../common/impl/TestContainerPersistence.java   | 13 +++-
 7 files changed, 62 insertions(+), 81 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cc7ce81/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
index 77a891a..469c969 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
@@ -199,20 +199,6 @@ public final class ContainerUtils {
   }
 
   /**
-   * Returns container file location.
-   *
-   * @param containerData - Data
-   * @param location - Root path
-   * @return Path
-   */
-  public static File getContainerFile(ContainerData containerData,
-  Path location) {
-return location.resolve(Long.toString(containerData
-.getContainerID()).concat(CONTAINER_EXTENSION))
-.toFile();
-  }
-
-  /**
* Persistent a {@link DatanodeDetails} to a local file.
*
* @throws IOException when read/write error occurs
@@ -300,4 +286,24 @@ public final class ContainerUtils {
 }
   }
 
+  /**
+   * Get the .container file from the containerBaseDir
+   * @param containerBaseDir container base directory. The name of this
+   * directory is same as the containerID
+   * @return the .container file
+   */
+  public static File getContainerFile(File containerBaseDir) {
+// Container file layout is
+// .../<>/metadata/<>.container
+String containerFilePath = OzoneConsts.CONTAINER_META_PATH + File.separator
++ getContainerID(containerBaseDir) + OzoneConsts.CONTAINER_EXTENSION;
+return new File(containerBaseDir, containerFilePath);
+  }
+
+  /**
+   * ContainerID can be decoded from the container base directory name
+   */
+  public static long getContainerID(File containerBaseDir) {
+return Long.parseLong(containerBaseDir.getName());
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cc7ce81/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java
index fe35e1d..fc91920 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.hdfs.util.RwLock;
 import org.apache.hadoop.ozone.container.common.impl.ContainerData;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
 
+import java.io.File;
 import java.util.Map;
 
 
@@ -92,6 +93,11 @@ public interface Container extends RwLock {
   ContainerProtos.ContainerType getContainerType();
 
   /**
+   * Returns containerFile.
+   */
+  File getContainerFile();
+
+  /**
* updates the DeleteTransactionId.
* @param deleteTransactionId
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cc7ce81/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
--
diff --git 

[45/50] hadoop git commit: YARN-8522. Application fails with InvalidResourceRequestException. (Zian Chen via wangda)

2018-08-01 Thread xkrogen
YARN-8522. Application fails with InvalidResourceRequestException. (Zian Chen 
via wangda)

Change-Id: I34dd7fa49bd4d10580c4a78051033b1068d28f1e


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5cc8e991
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5cc8e991
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5cc8e991

Branch: refs/heads/HDFS-12943
Commit: 5cc8e99147276a059979813f7fd323dd7d77b248
Parents: f4db753
Author: Wangda Tan 
Authored: Tue Jul 31 17:48:44 2018 -0700
Committer: Wangda Tan 
Committed: Tue Jul 31 22:34:53 2018 -0700

--
 .../pb/ApplicationSubmissionContextPBImpl.java  | 87 +++-
 1 file changed, 46 insertions(+), 41 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5cc8e991/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java
index 0c91e18..b30224e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java
@@ -84,7 +84,7 @@ extends ApplicationSubmissionContext {
 viaProto = true;
   }
   
-  public ApplicationSubmissionContextProto getProto() {
+  public synchronized ApplicationSubmissionContextProto getProto() {
   mergeLocalToProto();
 proto = viaProto ? proto : builder.build();
 viaProto = true;
@@ -164,7 +164,7 @@ extends ApplicationSubmissionContext {
   }
 
   @Override
-  public Priority getPriority() {
+  public synchronized Priority getPriority() {
 ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder;
 if (this.priority != null) {
   return this.priority;
@@ -177,7 +177,7 @@ extends ApplicationSubmissionContext {
   }
   
   @Override
-  public void setPriority(Priority priority) {
+  public synchronized void setPriority(Priority priority) {
 maybeInitBuilder();
 if (priority == null)
   builder.clearPriority();
@@ -185,7 +185,7 @@ extends ApplicationSubmissionContext {
   }
   
   @Override
-  public ApplicationId getApplicationId() {
+  public synchronized ApplicationId getApplicationId() {
 ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder;
 if (this.applicationId != null) {
   return applicationId;
@@ -198,7 +198,7 @@ extends ApplicationSubmissionContext {
   }
 
   @Override
-  public void setApplicationId(ApplicationId applicationId) {
+  public synchronized void setApplicationId(ApplicationId applicationId) {
 maybeInitBuilder();
 if (applicationId == null)
   builder.clearApplicationId();
@@ -206,7 +206,7 @@ extends ApplicationSubmissionContext {
   }
   
   @Override
-  public String getApplicationName() {
+  public synchronized String getApplicationName() {
 ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder;
 if (!p.hasApplicationName()) {
   return null;
@@ -215,7 +215,7 @@ extends ApplicationSubmissionContext {
   }
 
   @Override
-  public void setApplicationName(String applicationName) {
+  public synchronized void setApplicationName(String applicationName) {
 maybeInitBuilder();
 if (applicationName == null) {
   builder.clearApplicationName();
@@ -225,7 +225,7 @@ extends ApplicationSubmissionContext {
   }
 
   @Override
-  public String getQueue() {
+  public synchronized String getQueue() {
 ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder;
 if (!p.hasQueue()) {
   return null;
@@ -234,7 +234,7 @@ extends ApplicationSubmissionContext {
   }
 
   @Override
-  public String getApplicationType() {
+  public synchronized String getApplicationType() {
 ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder;
 if (!p.hasApplicationType()) {
   return null;
@@ -252,13 +252,13 @@ extends ApplicationSubmissionContext {
   }
 
   @Override
-  public Set getApplicationTags() {
+  public synchronized Set getApplicationTags() {
 initApplicationTags();
 return this.applicationTags;
   }
 
   @Override
-  public void setQueue(String queue) {
+  public synchronized void setQueue(String queue) {
 maybeInitBuilder();
 if (queue == null) {
   builder.clearQueu

[43/50] hadoop git commit: YARN-8397. Potential thread leak in ActivitiesManager. Contributed by Rohith Sharma K S.

2018-08-01 Thread xkrogen
YARN-8397. Potential thread leak in ActivitiesManager. Contributed by Rohith 
Sharma K S.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6310c0d1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6310c0d1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6310c0d1

Branch: refs/heads/HDFS-12943
Commit: 6310c0d17d6422a595f856a55b4f1fb82be43739
Parents: 40f9b0c
Author: Sunil G 
Authored: Wed Aug 1 08:33:01 2018 +0530
Committer: Sunil G 
Committed: Wed Aug 1 08:33:30 2018 +0530

--
 .../scheduler/activities/ActivitiesManager.java | 20 +++-
 .../scheduler/capacity/CapacityScheduler.java   |  1 +
 2 files changed, 16 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6310c0d1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java
index af73ae3..8498c40 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java
@@ -57,6 +57,7 @@ public class ActivitiesManager extends AbstractService {
   private Thread cleanUpThread;
   private int timeThreshold = 600 * 1000;
   private final RMContext rmContext;
+  private volatile boolean stopped;
 
   public ActivitiesManager(RMContext rmContext) {
 super(ActivitiesManager.class.getName());
@@ -113,7 +114,7 @@ public class ActivitiesManager extends AbstractService {
 cleanUpThread = new Thread(new Runnable() {
   @Override
   public void run() {
-while (true) {
+while (!stopped && !Thread.currentThread().isInterrupted()) {
   Iterator>> ite =
   completedNodeAllocations.entrySet().iterator();
   while (ite.hasNext()) {
@@ -140,20 +141,29 @@ public class ActivitiesManager extends AbstractService {
 
   try {
 Thread.sleep(5000);
-  } catch (Exception e) {
-// ignore
+  } catch (InterruptedException e) {
+LOG.info(getName() + " thread interrupted");
+break;
   }
 }
   }
 });
-
+cleanUpThread.setName("ActivitiesManager thread.");
 cleanUpThread.start();
 super.serviceStart();
   }
 
   @Override
   protected void serviceStop() throws Exception {
-cleanUpThread.interrupt();
+stopped = true;
+if (cleanUpThread != null) {
+  cleanUpThread.interrupt();
+  try {
+cleanUpThread.join();
+  } catch (InterruptedException ie) {
+LOG.warn("Interrupted Exception while stopping", ie);
+  }
+}
 super.serviceStop();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6310c0d1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 37f56de..0b7fe92 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -439,6 +439,7 @@ public class CapacityScheduler extends
   public void serviceStop() throws Exception {
 try {
   writeLock.lock();
+  this.activitiesManager.stop();
   if (scheduleAsynchronously && asyncSchedulerThreads != null) 

[05/50] hadoop git commit: HADOOP-15611. Log more details for FairCallQueue. Contributed by Ryan Wu.

2018-08-01 Thread xkrogen
HADOOP-15611. Log more details for FairCallQueue. Contributed by Ryan Wu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9089790c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9089790c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9089790c

Branch: refs/heads/HDFS-12943
Commit: 9089790cabb4771198be0fe64c1317a3ff1c80f1
Parents: f93ecf5
Author: Yiqun Lin 
Authored: Thu Jul 26 18:08:28 2018 +0800
Committer: Yiqun Lin 
Committed: Thu Jul 26 18:08:28 2018 +0800

--
 .../main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java   | 8 
 .../org/apache/hadoop/ipc/WeightedRoundRobinMultiplexer.java | 3 +++
 2 files changed, 11 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9089790c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java
index f12ecb6..8bb0ce4 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java
@@ -391,6 +391,7 @@ public class DecayRpcScheduler implements RpcScheduler,
* counts current.
*/
   private void decayCurrentCounts() {
+LOG.debug("Start to decay current counts.");
 try {
   long totalDecayedCount = 0;
   long totalRawCount = 0;
@@ -410,7 +411,12 @@ public class DecayRpcScheduler implements RpcScheduler,
 totalDecayedCount += nextValue;
 decayedCount.set(nextValue);
 
+LOG.debug("Decaying counts for the user: {}, " +
+"its decayedCount: {}, rawCount: {}", entry.getKey(),
+nextValue, rawCount.get());
 if (nextValue == 0) {
+  LOG.debug("The decayed count for the user {} is zero " +
+  "and being cleaned.", entry.getKey());
   // We will clean up unused keys here. An interesting optimization
   // might be to have an upper bound on keyspace in callCounts and only
   // clean once we pass it.
@@ -422,6 +428,8 @@ public class DecayRpcScheduler implements RpcScheduler,
   totalDecayedCallCount.set(totalDecayedCount);
   totalRawCallCount.set(totalRawCount);
 
+  LOG.debug("After decaying the stored counts, totalDecayedCount: {}, " +
+  "totalRawCallCount: {}.", totalDecayedCount, totalRawCount);
   // Now refresh the cache of scheduling decisions
   recomputeScheduleCache();
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9089790c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WeightedRoundRobinMultiplexer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WeightedRoundRobinMultiplexer.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WeightedRoundRobinMultiplexer.java
index d308725..096cc1a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WeightedRoundRobinMultiplexer.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WeightedRoundRobinMultiplexer.java
@@ -109,6 +109,9 @@ public class WeightedRoundRobinMultiplexer implements 
RpcMultiplexer {
 // Finally, reset requestsLeft. This will enable moveToNextQueue to be
 // called again, for the new currentQueueIndex
 this.requestsLeft.set(this.queueWeights[nextIdx]);
+LOG.debug("Moving to next queue from queue index {} to index {}, " +
+"number of requests left for current queue: {}.",
+thisIdx, nextIdx, requestsLeft);
   }
 
   /**


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[31/50] hadoop git commit: HADOOP-15637. LocalFs#listLocatedStatus does not filter out hidden .crc files. Contributed by Erik Krogen.

2018-08-01 Thread xkrogen
HADOOP-15637. LocalFs#listLocatedStatus does not filter out hidden .crc files. 
Contributed by Erik Krogen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e8f952ef
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e8f952ef
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e8f952ef

Branch: refs/heads/HDFS-12943
Commit: e8f952ef06ae05d2b504300d6f19beb8a052b6f1
Parents: 3517a47
Author: Chen Liang 
Authored: Mon Jul 30 10:25:07 2018 -0700
Committer: Chen Liang 
Committed: Mon Jul 30 10:25:07 2018 -0700

--
 .../java/org/apache/hadoop/fs/ChecksumFs.java   | 37 +++
 .../fs/FileContextMainOperationsBaseTest.java   | 38 
 2 files changed, 75 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8f952ef/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
index 75622ad..c56f6e0 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
@@ -27,10 +27,12 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.EnumSet;
 
+import java.util.NoSuchElementException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Progressable;
 import org.slf4j.Logger;
@@ -527,4 +529,39 @@ public abstract class ChecksumFs extends FilterFs {
 }
 return results.toArray(new FileStatus[results.size()]);
   }
+
+  @Override
+  public RemoteIterator listLocatedStatus(final Path f)
+  throws AccessControlException, FileNotFoundException,
+ UnresolvedLinkException, IOException {
+final RemoteIterator iter =
+getMyFs().listLocatedStatus(f);
+return new RemoteIterator() {
+
+  private LocatedFileStatus next = null;
+
+  @Override
+  public boolean hasNext() throws IOException {
+while (next == null && iter.hasNext()) {
+  LocatedFileStatus unfilteredNext = iter.next();
+  if (!isChecksumFile(unfilteredNext.getPath())) {
+next = unfilteredNext;
+  }
+}
+return next != null;
+  }
+
+  @Override
+  public LocatedFileStatus next() throws IOException {
+if (!hasNext()) {
+  throw new NoSuchElementException();
+}
+LocatedFileStatus tmp = next;
+next = null;
+return tmp;
+  }
+
+};
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8f952ef/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
index 62ecd9f..c07a6ff 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
@@ -369,6 +369,44 @@ public abstract class FileContextMainOperationsBaseTest  {
 pathsIterator = fc.listStatus(getTestRootPath(fc, "test/hadoop/a"));
 Assert.assertFalse(pathsIterator.hasNext());
   }
+
+  @Test
+  public void testListFiles() throws Exception {
+Path[] testDirs = {
+getTestRootPath(fc, "test/dir1"),
+getTestRootPath(fc, "test/dir1/dir1"),
+getTestRootPath(fc, "test/dir2")
+};
+Path[] testFiles = {
+new Path(testDirs[0], "file1"),
+new Path(testDirs[0], "file2"),
+new Path(testDirs[1], "file2"),
+new Path(testDirs[2], "file1")
+};
+
+for (Path path : testDirs) {
+  fc.mkdir(path, FsPermission.getDefault(), true);
+}
+for (Path p : testFiles) {
+  FSDataOutputStream out = fc.create(p).build();
+  out.writeByte(0);
+  out.close();
+}
+
+RemoteIterator filesIterator =
+fc.util().listFiles(getTestRootPath(fc, "test"), true);
+LocatedFileStatus[] fileStats =
+new LocatedFileStatus[testFiles.len

[19/50] hadoop git commit: YARN-8596. Allow SQLFederationStateStore to submit the same app in the same subcluster. Contributed by Giovanni Matteo Fumarola.

2018-08-01 Thread xkrogen
YARN-8596. Allow SQLFederationStateStore to submit the same app in the same 
subcluster. Contributed by Giovanni Matteo Fumarola.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/79091cf7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/79091cf7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/79091cf7

Branch: refs/heads/HDFS-12943
Commit: 79091cf76f6e966f64ac1d65e43e95782695e678
Parents: 2cccf40
Author: Inigo Goiri 
Authored: Fri Jul 27 15:23:57 2018 -0700
Committer: Inigo Goiri 
Committed: Fri Jul 27 15:23:57 2018 -0700

--
 .../store/impl/SQLFederationStateStore.java  | 14 +++---
 .../store/impl/FederationStateStoreBaseTest.java | 19 +++
 2 files changed, 26 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/79091cf7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java
index e62dcaf..273118a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java
@@ -564,13 +564,13 @@ public class SQLFederationStateStore implements 
FederationStateStore {
 // Check the ROWCOUNT value, if it is equal to 0 it means the call
 // did not add a new application into FederationStateStore
 if (cstmt.getInt(4) == 0) {
-  String errMsg = "The application " + appId
-  + " was not insert into the StateStore";
-  FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg);
-}
-// Check the ROWCOUNT value, if it is different from 1 it means the 
call
-// had a wrong behavior. Maybe the database is not set correctly.
-if (cstmt.getInt(4) != 1) {
+  LOG.info(
+  "The application {} was not inserted in the StateStore because 
it"
+  + " was already present in SubCluster {}",
+  appId, subClusterHome);
+} else if (cstmt.getInt(4) != 1) {
+  // Check the ROWCOUNT value, if it is different from 1 it means the
+  // call had a wrong behavior. Maybe the database is not set 
correctly.
   String errMsg = "Wrong behavior during the insertion of SubCluster "
   + subClusterId;
   FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79091cf7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/FederationStateStoreBaseTest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/FederationStateStoreBaseTest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/FederationStateStoreBaseTest.java
index 15cc0f0..b17f870 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/FederationStateStoreBaseTest.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/FederationStateStoreBaseTest.java
@@ -282,6 +282,25 @@ public abstract class FederationStateStoreBaseTest {
   }
 
   @Test
+  public void testAddApplicationHomeSubClusterAppAlreadyExistsInTheSameSC()
+  throws Exception {
+ApplicationId appId = ApplicationId.newInstance(1, 1);
+SubClusterId subClusterId1 = SubClusterId.newInstance("SC1");
+addApplicationHomeSC(appId, subClusterId1);
+
+ApplicationHomeSubCluster ahsc2 =
+ApplicationHomeSubCluster.newInstance(appId, subClusterId1);
+
+AddApplicationHomeSubClusterResponse response =
+stateStore.addApplicationHomeSubCluster(
+AddApplicationHomeSubClusterRequest.newIns

[16/50] hadoop git commit: YARN-8571. Validate service principal format prior to launching yarn service. Contributed by Eric Yang

2018-08-01 Thread xkrogen
YARN-8571. Validate service principal format prior to launching yarn service. 
Contributed by Eric Yang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b429f19d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b429f19d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b429f19d

Branch: refs/heads/HDFS-12943
Commit: b429f19d32d8f60a3535e047ef10cfb3edeb54c8
Parents: 1c40bc2
Author: Billie Rinaldi 
Authored: Fri Jul 27 11:30:19 2018 -0700
Committer: Billie Rinaldi 
Committed: Fri Jul 27 11:30:19 2018 -0700

--
 .../exceptions/RestApiErrorMessages.java|  4 
 .../yarn/service/utils/ServiceApiUtil.java  | 10 
 .../hadoop/yarn/service/TestServiceApiUtil.java | 25 
 3 files changed, 39 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b429f19d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java
index f10d884..8f831ee 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java
@@ -125,4 +125,8 @@ public interface RestApiErrorMessages {
 
   String ERROR_COMP_DOES_NOT_NEED_UPGRADE = "The component (%s) does not need" 
+
   " an upgrade.";
+  String ERROR_KERBEROS_PRINCIPAL_NAME_FORMAT = "Kerberos principal (%s) does 
" +
+  " not contain a hostname.";
+  String ERROR_KERBEROS_PRINCIPAL_MISSING = "Kerberos principal or keytab is" +
+  " missing.";
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b429f19d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
index bebf52c..9219569 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
@@ -243,6 +243,16 @@ public class ServiceApiUtil {
 
   public static void validateKerberosPrincipal(
   KerberosPrincipal kerberosPrincipal) throws IOException {
+try {
+  if (!kerberosPrincipal.getPrincipalName().contains("/")) {
+throw new IllegalArgumentException(String.format(
+RestApiErrorMessages.ERROR_KERBEROS_PRINCIPAL_NAME_FORMAT,
+kerberosPrincipal.getPrincipalName()));
+  }
+} catch (NullPointerException e) {
+  throw new IllegalArgumentException(
+  RestApiErrorMessages.ERROR_KERBEROS_PRINCIPAL_MISSING);
+}
 if (!StringUtils.isEmpty(kerberosPrincipal.getKeytab())) {
   try {
 // validate URI format

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b429f19d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceApiUtil.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceApiUtil.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceApiUtil.java
index 47b2803..c2a80e7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn

[30/50] hadoop git commit: HDDS-287. Add Close ContainerAction to Datanode#StateContext when the container gets full. Contributed by Nanda kumar.

2018-08-01 Thread xkrogen
HDDS-287. Add Close ContainerAction to Datanode#StateContext when the container 
gets full. Contributed by Nanda kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3517a478
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3517a478
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3517a478

Branch: refs/heads/HDFS-12943
Commit: 3517a47897457c11096ab57a4cb0b096a838a3ec
Parents: 952dc2f
Author: Nanda kumar 
Authored: Mon Jul 30 21:18:42 2018 +0530
Committer: Nanda kumar 
Committed: Mon Jul 30 21:18:42 2018 +0530

--
 .../container/common/impl/HddsDispatcher.java   |  63 +++-
 .../statemachine/DatanodeStateMachine.java  |   2 +-
 .../common/statemachine/StateContext.java   |  14 +-
 .../container/ozoneimpl/OzoneContainer.java |   6 +-
 .../common/impl/TestHddsDispatcher.java | 152 +++
 .../container/common/impl/package-info.java |  22 +++
 .../common/interfaces/TestHandler.java  |   4 +-
 .../container/ozoneimpl/TestOzoneContainer.java |   2 +-
 .../ozone/container/common/TestEndPoint.java|  12 +-
 .../common/impl/TestCloseContainerHandler.java  |   2 +-
 .../container/metrics/TestContainerMetrics.java |   2 +-
 .../container/ozoneimpl/TestOzoneContainer.java |   2 +-
 .../container/server/TestContainerServer.java   |   2 +-
 .../genesis/BenchMarkDatanodeDispatcher.java|   6 +-
 14 files changed, 270 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3517a478/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
index 3d418e5..ee232db 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
@@ -21,12 +21,21 @@ package org.apache.hadoop.ozone.container.common.impl;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Maps;
+import org.apache.hadoop.conf.StorageUnit;
+import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto
+.StorageContainerDatanodeProtocolProtos.ContainerInfo;
+import org.apache.hadoop.hdds.protocol.proto
+.StorageContainerDatanodeProtocolProtos.ContainerAction;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import 
org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.apache.hadoop.ozone.container.common.interfaces.Handler;
+import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
@@ -35,11 +44,14 @@ import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 .ContainerCommandResponseProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 .ContainerType;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+.ContainerLifeCycleState;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.util.Map;
+import java.util.Optional;
 
 /**
  * Ozone Container dispatcher takes a call from the netty server and routes it
@@ -53,6 +65,8 @@ public class HddsDispatcher implements ContainerDispatcher {
   private final Configuration conf;
   private final ContainerSet containerSet;
   private final VolumeSet volumeSet;
+  private final StateContext context;
+  private final float containerCloseThreshold;
   private String scmID;
   private ContainerMetrics metrics;
 
@@ -61,10 +75,11 @@ public class HddsDispatcher implements ContainerDispatcher {
* XceiverServerHandler.
*/
   public HddsDispatcher(Configuration config, ContainerSet contSet,
-  VolumeSet volumes) {
+  VolumeSet volumes, StateContext context) {
 this.conf = config;
 this.containerSet = contSet;
 

[38/50] hadoop git commit: YARN-8605. TestDominantResourceFairnessPolicy.testModWhileSorting is flaky. (Wilfred Spiegelenburg via Haibo Chen)

2018-08-01 Thread xkrogen
YARN-8605. TestDominantResourceFairnessPolicy.testModWhileSorting is flaky. 
(Wilfred Spiegelenburg via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8aa93a57
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8aa93a57
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8aa93a57

Branch: refs/heads/HDFS-12943
Commit: 8aa93a575e896c609b97ddab58853b1eb95f0dee
Parents: 9fea5c9
Author: Haibo Chen 
Authored: Tue Jul 31 11:32:40 2018 -0700
Committer: Haibo Chen 
Committed: Tue Jul 31 11:32:40 2018 -0700

--
 .../TestDominantResourceFairnessPolicy.java | 38 +++-
 1 file changed, 12 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8aa93a57/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java
index 55b7163..c963e0d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java
@@ -24,7 +24,6 @@ import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
 import java.util.ArrayList;
-import java.util.Collections;
 import java.util.Comparator;
 import java.util.List;
 import java.util.Map;
@@ -458,33 +457,20 @@ public class TestDominantResourceFairnessPolicy {
 }
 Comparator DRFComparator = createComparator(10, 5);
 
-// To simulate unallocated resource changes
-Thread modThread = modificationThread(schedulableList);
-modThread.start();
+/*
+ * The old sort should fail, but timing it makes testing to flaky.
+ * TimSort which is used does not handle the concurrent modification of
+ * objects it is sorting. This is the test that should fail:
+ *  modThread.start();
+ *  try {
+ *Collections.sort(schedulableList, DRFComparator);
+ *  } catch (IllegalArgumentException iae) {
+ *// failed sort
+ *  }
+ */
 
-// This should fail: make sure that we do test correctly
-// TimSort which is used does not handle the concurrent modification of
-// objects it is sorting.
-try {
-  Collections.sort(schedulableList, DRFComparator);
-  fail("Sorting should have failed and did not");
-} catch (IllegalArgumentException iae) {
-  assertEquals(iae.getMessage(), "Comparison method violates its general 
contract!");
-}
-try {
-  modThread.join();
-} catch (InterruptedException ie) {
-  fail("ModThread join failed: " + ie.getMessage());
-}
-
-// clean up and try again using TreeSet which should work
-schedulableList.clear();
-for (int i=0; i<1; i++) {
-  schedulableList.add(
-  (FakeSchedulable)createSchedulable((i%10)*100, (i%3)*2));
-}
 TreeSet sortedSchedulable = new TreeSet<>(DRFComparator);
-modThread = modificationThread(schedulableList);
+Thread modThread = modificationThread(schedulableList);
 modThread.start();
 sortedSchedulable.addAll(schedulableList);
 try {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[24/50] hadoop git commit: HDDS-273. DeleteLog entries should be purged only after corresponding DNs commit the transaction. Contributed by Lokesh Jain.

2018-08-01 Thread xkrogen
HDDS-273. DeleteLog entries should be purged only after corresponding DNs 
commit the transaction. Contributed by Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/feb795b5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/feb795b5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/feb795b5

Branch: refs/heads/HDFS-12943
Commit: feb795b58d2a3c20bdbddea1638a83f6637d3fc9
Parents: 6b038f8
Author: Mukul Kumar Singh 
Authored: Sun Jul 29 01:02:24 2018 +0530
Committer: Mukul Kumar Singh 
Committed: Sun Jul 29 01:02:24 2018 +0530

--
 .../DeleteBlocksCommandHandler.java |  12 +-
 .../StorageContainerDatanodeProtocol.proto  |   4 +-
 .../hadoop/hdds/scm/block/BlockManagerImpl.java |   2 +-
 .../block/DatanodeDeletedBlockTransactions.java |  47 ++--
 .../hadoop/hdds/scm/block/DeletedBlockLog.java  |  23 +-
 .../hdds/scm/block/DeletedBlockLogImpl.java | 123 ++
 .../scm/server/SCMDatanodeProtocolServer.java   |  19 +-
 .../hdds/scm/block/TestDeletedBlockLog.java | 232 ++-
 8 files changed, 256 insertions(+), 206 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/feb795b5/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
index 9640f93..b0d4cbc 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
@@ -113,8 +113,8 @@ public class DeleteBlocksCommandHandler implements 
CommandHandler {
 DeleteBlockTransactionResult.Builder txResultBuilder =
 DeleteBlockTransactionResult.newBuilder();
 txResultBuilder.setTxID(entry.getTxID());
+long containerId = entry.getContainerID();
 try {
-  long containerId = entry.getContainerID();
   Container cont = containerSet.getContainer(containerId);
   if (cont == null) {
 throw new StorageContainerException("Unable to find the container "
@@ -126,7 +126,8 @@ public class DeleteBlocksCommandHandler implements 
CommandHandler {
 KeyValueContainerData containerData = (KeyValueContainerData)
 cont.getContainerData();
 deleteKeyValueContainerBlocks(containerData, entry);
-txResultBuilder.setSuccess(true);
+txResultBuilder.setContainerID(containerId)
+.setSuccess(true);
 break;
   default:
 LOG.error(
@@ -136,9 +137,12 @@ public class DeleteBlocksCommandHandler implements 
CommandHandler {
 } catch (IOException e) {
   LOG.warn("Failed to delete blocks for container={}, TXID={}",
   entry.getContainerID(), entry.getTxID(), e);
-  txResultBuilder.setSuccess(false);
+  txResultBuilder.setContainerID(containerId)
+  .setSuccess(false);
 }
-resultBuilder.addResults(txResultBuilder.build());
+resultBuilder.addResults(txResultBuilder.build())
+.setDnId(context.getParent().getDatanodeDetails()
+.getUuid().toString());
   });
   ContainerBlocksDeletionACKProto blockDeletionACK = resultBuilder.build();
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/feb795b5/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
--
diff --git 
a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
 
b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
index d89567b..0c52efb 100644
--- 
a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
+++ 
b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
@@ -229,9 +229,11 @@ message DeletedBlocksTransaction {
 message ContainerBlocksDeletionACKProto {
   message DeleteBlockTransactionResult {
 required int64 txID = 1;
-required bool success = 2;
+required int64 containerID = 2;
+required bool success = 3;
   }
   repeated DeleteBlockTransactionResult results = 1;
+  required string dnId = 2;
 }
 
 // SendACK response retu

[33/50] hadoop git commit: HDDS-293. Reduce memory usage and object creation in KeyData.

2018-08-01 Thread xkrogen
HDDS-293. Reduce memory usage and object creation in KeyData.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ee53602a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ee53602a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ee53602a

Branch: refs/heads/HDFS-12943
Commit: ee53602a8179e76f4102d9062d0bebe8bb09d875
Parents: 2b39ad2
Author: Tsz Wo Nicholas Sze 
Authored: Mon Jul 30 15:00:29 2018 -0700
Committer: Tsz Wo Nicholas Sze 
Committed: Mon Jul 30 15:00:29 2018 -0700

--
 .../ozone/container/common/helpers/KeyData.java |  84 +
 .../common/impl/OpenContainerBlockMap.java  |   2 +-
 .../container/keyvalue/KeyValueHandler.java |   3 -
 .../container/common/helpers/TestKeyData.java   | 119 +++
 4 files changed, 179 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee53602a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java
index 1919ed9..84a6f71 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.container.common.helpers;
 
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.client.BlockID;
+import com.google.common.base.Preconditions;
 
 import java.io.IOException;
 import java.util.Collections;
@@ -35,11 +36,17 @@ public class KeyData {
   private final Map metadata;
 
   /**
+   * Represent a list of chunks.
+   * In order to reduce memory usage, chunkList is declared as an {@link 
Object}.
+   * When #elements == 0, chunkList is null.
+   * When #elements == 1, chunkList refers to the only element.
+   * When #elements > 1, chunkList refers to the list.
+   *
* Please note : when we are working with keys, we don't care what they point
* to. So we We don't read chunkinfo nor validate them. It is responsibility
* of higher layer like ozone. We just read and write data from network.
*/
-  private List chunks;
+  private Object chunkList;
 
   /**
* total size of the key.
@@ -73,7 +80,7 @@ public class KeyData {
 }
 keyData.setChunks(data.getChunksList());
 if (data.hasSize()) {
-  keyData.setSize(data.getSize());
+  Preconditions.checkArgument(data.getSize() == keyData.getSize());
 }
 return keyData;
   }
@@ -86,13 +93,13 @@ public class KeyData {
 ContainerProtos.KeyData.Builder builder =
 ContainerProtos.KeyData.newBuilder();
 builder.setBlockID(this.blockID.getDatanodeBlockIDProtobuf());
-builder.addAllChunks(this.chunks);
 for (Map.Entry entry : metadata.entrySet()) {
   ContainerProtos.KeyValue.Builder keyValBuilder =
   ContainerProtos.KeyValue.newBuilder();
   builder.addMetadata(keyValBuilder.setKey(entry.getKey())
   .setValue(entry.getValue()).build());
 }
+builder.addAllChunks(getChunks());
 builder.setSize(size);
 return builder.build();
   }
@@ -132,30 +139,65 @@ public class KeyData {
 metadata.remove(key);
   }
 
+  @SuppressWarnings("unchecked")
+  private List castChunkList() {
+return (List)chunkList;
+  }
+
   /**
* Returns chunks list.
*
* @return list of chunkinfo.
*/
   public List getChunks() {
-return chunks;
+return chunkList == null? Collections.emptyList()
+: chunkList instanceof ContainerProtos.ChunkInfo?
+Collections.singletonList((ContainerProtos.ChunkInfo)chunkList)
+: Collections.unmodifiableList(castChunkList());
   }
 
   /**
* Adds chinkInfo to the list
*/
   public void addChunk(ContainerProtos.ChunkInfo chunkInfo) {
-if (chunks == null) {
-  chunks = new ArrayList<>();
+if (chunkList == null) {
+  chunkList = chunkInfo;
+} else {
+  final List list;
+  if (chunkList instanceof ContainerProtos.ChunkInfo) {
+list = new ArrayList<>(2);
+list.add((ContainerProtos.ChunkInfo)chunkList);
+chunkList = list;
+  } else {
+list = castChunkList();
+  }
+  list.add(chunkInfo);
 }
-chunks.add(chunkInfo);
+size += chunkInfo.getLen();
   }
 
   /**
* removes the chunk.
*/
-  public void removeChunk(ContainerProtos.ChunkInfo chunkInfo) {
-chunks.remove(chunkInfo);
+  public boolean removeChunk(ContainerProtos.ChunkInfo chunkInfo) {
+final boolea

[27/50] hadoop git commit: YARN-8591. [ATSv2] NPE while checking for entity acl in non-secure cluster. Contributed by Rohith Sharma K S.

2018-08-01 Thread xkrogen
YARN-8591. [ATSv2] NPE while checking for entity acl in non-secure cluster. 
Contributed by Rohith Sharma K S.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/63e08ec0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/63e08ec0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/63e08ec0

Branch: refs/heads/HDFS-12943
Commit: 63e08ec071852640babea9e39780327a0907712a
Parents: 0857f11
Author: Sunil G 
Authored: Mon Jul 30 14:48:04 2018 +0530
Committer: Sunil G 
Committed: Mon Jul 30 14:48:04 2018 +0530

--
 .../server/timelineservice/reader/TimelineReaderWebServices.java | 3 ++-
 .../reader/TestTimelineReaderWebServicesBasicAcl.java| 4 
 2 files changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/63e08ec0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
index 7f96bfb..b10b705 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
@@ -3532,7 +3532,8 @@ public class TimelineReaderWebServices {
   static boolean checkAccess(TimelineReaderManager readerManager,
   UserGroupInformation ugi, String entityUser) {
 if (isDisplayEntityPerUserFilterEnabled(readerManager.getConfig())) {
-  if (!validateAuthUserWithEntityUser(readerManager, ugi, entityUser)) {
+  if (ugi != null && !validateAuthUserWithEntityUser(readerManager, ugi,
+  entityUser)) {
 String userName = ugi.getShortUserName();
 String msg = "User " + userName
 + " is not allowed to read TimelineService V2 data.";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/63e08ec0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesBasicAcl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesBasicAcl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesBasicAcl.java
index 4239bf0..6651457 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesBasicAcl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesBasicAcl.java
@@ -88,6 +88,10 @@ public class TestTimelineReaderWebServicesBasicAcl {
 Assert.assertFalse(TimelineReaderWebServices
 .validateAuthUserWithEntityUser(manager, null, user1));
 
+// true because ugi is null
+Assert.assertTrue(
+TimelineReaderWebServices.checkAccess(manager, null, user1));
+
 // incoming ugi is admin asking for entity owner user1
 Assert.assertTrue(
 TimelineReaderWebServices.checkAccess(manager, adminUgi, user1));


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[25/50] hadoop git commit: HDDS-248. Refactor DatanodeContainerProtocol.proto Contributed by Hanisha Koneru.

2018-08-01 Thread xkrogen
HDDS-248. Refactor DatanodeContainerProtocol.proto Contributed by Hanisha 
Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/007e6f51
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/007e6f51
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/007e6f51

Branch: refs/heads/HDFS-12943
Commit: 007e6f51135adb5864f6bfc258010fd09576387b
Parents: feb795b
Author: Bharat Viswanadham 
Authored: Sat Jul 28 14:50:43 2018 -0700
Committer: Bharat Viswanadham 
Committed: Sat Jul 28 14:57:11 2018 -0700

--
 .../scm/storage/ContainerProtocolCalls.java | 37 +---
 .../main/proto/DatanodeContainerProtocol.proto  | 96 +---
 .../container/common/impl/HddsDispatcher.java   | 51 +--
 .../CloseContainerCommandHandler.java   |  8 +-
 .../server/ratis/ContainerStateMachine.java |  6 +-
 .../keyvalue/KeyValueContainerData.java |  9 --
 .../container/keyvalue/KeyValueHandler.java | 16 +---
 .../container/ozoneimpl/OzoneContainer.java |  2 +-
 .../container/keyvalue/TestKeyValueHandler.java | 12 ++-
 .../scm/cli/container/InfoContainerHandler.java |  1 -
 .../ozone/container/ContainerTestHelper.java| 59 ++--
 .../common/impl/TestCloseContainerHandler.java  | 18 ++--
 .../genesis/BenchMarkDatanodeDispatcher.java| 19 ++--
 13 files changed, 148 insertions(+), 186 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/007e6f51/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
index 36cdfc9..abad9e3 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
@@ -29,6 +29,8 @@ import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 .ContainerCommandResponseProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+.CloseContainerRequestProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 .DatanodeBlockID;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 .GetKeyRequestProto;
@@ -86,15 +88,18 @@ public final class ContainerProtocolCalls  {
 .newBuilder()
 .setBlockID(datanodeBlockID);
 String id = xceiverClient.getPipeline().getLeader().getUuidString();
+
 ContainerCommandRequestProto request = ContainerCommandRequestProto
 .newBuilder()
 .setCmdType(Type.GetKey)
+.setContainerID(datanodeBlockID.getContainerID())
 .setTraceID(traceID)
 .setDatanodeUuid(id)
 .setGetKey(readKeyRequest)
 .build();
 ContainerCommandResponseProto response = 
xceiverClient.sendCommand(request);
 validateContainerResponse(response);
+
 return response.getGetKey();
   }
 
@@ -118,7 +123,9 @@ public final class ContainerProtocolCalls  {
 String id = xceiverClient.getPipeline().getLeader().getUuidString();
 ContainerCommandRequestProto request =
 ContainerCommandRequestProto.newBuilder()
-.setCmdType(Type.GetCommittedBlockLength).setTraceID(traceID)
+.setCmdType(Type.GetCommittedBlockLength)
+.setContainerID(blockID.getContainerID())
+.setTraceID(traceID)
 .setDatanodeUuid(id)
 .setGetCommittedBlockLength(getBlockLengthRequestBuilder).build();
 ContainerCommandResponseProto response = 
xceiverClient.sendCommand(request);
@@ -143,6 +150,7 @@ public final class ContainerProtocolCalls  {
 ContainerCommandRequestProto request = ContainerCommandRequestProto
 .newBuilder()
 .setCmdType(Type.PutKey)
+.setContainerID(containerKeyData.getBlockID().getContainerID())
 .setTraceID(traceID)
 .setDatanodeUuid(id)
 .setPutKey(createKeyRequest)
@@ -171,6 +179,7 @@ public final class ContainerProtocolCalls  {
 ContainerCommandRequestProto request = ContainerCommandRequestProto
 .newBuilder()
 .setCmdType(Type.ReadChunk)
+.setContainerID(blockID.getContainerID())
 .setTraceID(traceID)
 .setDatanodeUuid(id)
 .setReadChunk(readChunkRequest)
@@ -202,6 +211,7 @@ public final class ContainerProtocolCalls  {
 ContainerCommandRequestProto request = ContainerCommandRequestProto
 .newBuilder()
 .setCmdType(Type.WriteChunk)
+.setCont

[41/50] hadoop git commit: HDDS-271. Create a block iterator to iterate blocks in a container. Contributed by Bharat Viswanadham.

2018-08-01 Thread xkrogen
HDDS-271. Create a block iterator to iterate blocks in a container. Contributed 
by Bharat Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c835fc08
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c835fc08
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c835fc08

Branch: refs/heads/HDFS-12943
Commit: c835fc08adf556d2f848f2f241155cbfe3375695
Parents: c7ebcd7
Author: Bharat Viswanadham 
Authored: Tue Jul 31 16:26:09 2018 -0700
Committer: Bharat Viswanadham 
Committed: Tue Jul 31 16:26:09 2018 -0700

--
 .../apache/hadoop/utils/MetaStoreIterator.java  |   2 +-
 .../common/interfaces/BlockIterator.java|  57 
 .../keyvalue/KeyValueBlockIterator.java | 148 ++
 .../keyvalue/TestKeyValueBlockIterator.java | 275 +++
 4 files changed, 481 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c835fc08/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetaStoreIterator.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetaStoreIterator.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetaStoreIterator.java
index 758d194..52d0a3e 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetaStoreIterator.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetaStoreIterator.java
@@ -24,7 +24,7 @@ import java.util.Iterator;
  * Iterator for MetaDataStore DB.
  * @param 
  */
-interface MetaStoreIterator extends Iterator {
+public interface MetaStoreIterator extends Iterator {
 
   /**
* seek to first entry.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c835fc08/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/BlockIterator.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/BlockIterator.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/BlockIterator.java
new file mode 100644
index 000..f6931e3
--- /dev/null
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/BlockIterator.java
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.common.interfaces;
+
+
+import java.io.IOException;
+import java.util.NoSuchElementException;
+
+/**
+ * Block Iterator for container. Each container type need to implement this
+ * interface.
+ * @param 
+ */
+public interface BlockIterator {
+
+  /**
+   * This checks if iterator has next element. If it has returns true,
+   * otherwise false.
+   * @return boolean
+   */
+  boolean hasNext() throws IOException;
+
+  /**
+   * Seek to first entry.
+   */
+  void seekToFirst();
+
+  /**
+   * Seek to last entry.
+   */
+  void seekToLast();
+
+  /**
+   * Get next block in the container.
+   * @return next block or null if there are no blocks
+   * @throws IOException
+   */
+  T nextBlock() throws IOException, NoSuchElementException;
+
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c835fc08/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java
new file mode 100644
index 000..f800223
--- /dev/null
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java
@@ -0,0 +1,148 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license

[23/50] hadoop git commit: HDDS-246. Datanode should throw BlockNotCommittedException for uncommitted blocks to Ozone Client. Contributed by Shashikant Banerjee.

2018-08-01 Thread xkrogen
HDDS-246. Datanode should throw BlockNotCommittedException for uncommitted 
blocks to Ozone Client. Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6b038f82
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6b038f82
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6b038f82

Branch: refs/heads/HDFS-12943
Commit: 6b038f82da8fa8c1c4f1e1bf448eacc6dd523044
Parents: 3d58684
Author: Mukul Kumar Singh 
Authored: Sat Jul 28 22:04:11 2018 +0530
Committer: Mukul Kumar Singh 
Committed: Sat Jul 28 22:04:11 2018 +0530

--
 .../main/proto/DatanodeContainerProtocol.proto  |  1 +
 .../common/impl/OpenContainerBlockMap.java  | 12 ++
 .../container/keyvalue/KeyValueHandler.java | 12 --
 .../ozone/scm/TestCommittedBlockLengthAPI.java  | 45 +++-
 4 files changed, 57 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b038f82/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
--
diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto 
b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
index a3c4467..6969fa6 100644
--- a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
@@ -138,6 +138,7 @@ enum Result {
   CONTAINER_FILES_CREATE_ERROR = 32;
   CONTAINER_CHECKSUM_ERROR = 33;
   UNKNOWN_CONTAINER_TYPE = 34;
+  BLOCK_NOT_COMMITTED = 35;
 }
 
 /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b038f82/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java
index 6a93c9d..8e2667d 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java
@@ -129,6 +129,18 @@ public class OpenContainerBlockMap {
 -> blocks.removeAndGetSize(blockID.getLocalID()) == 0? null: blocks);
   }
 
+  /**
+   * Returns true if the block exists in the map, false otherwise
+   *
+   * @param blockID
+   * @return True, if it exists, false otherwise
+   */
+  public boolean checkIfBlockExists(BlockID blockID) {
+KeyDataMap keyDataMap = containers.get(blockID.getContainerID());
+return keyDataMap == null ? false :
+keyDataMap.get(blockID.getLocalID()) != null;
+  }
+
   @VisibleForTesting
   KeyDataMap getKeyDataMap(long containerId) {
 return containers.get(containerId);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b038f82/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index b08e128..0b26a14 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -91,6 +91,8 @@ import static 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 .Result.GET_SMALL_FILE_ERROR;
 import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 .Result.PUT_SMALL_FILE_ERROR;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+.Result.BLOCK_NOT_COMMITTED;
 
 import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 .Stage;
@@ -494,10 +496,14 @@ public class KeyValueHandler extends Handler {
 
 long blockLength;
 try {
-  BlockID blockID = BlockID.getFromProtobuf(
-  request.getGetCommittedBlockLength().getBlockID());
+  BlockID blockID = BlockID
+  .getFromProtobuf(request.getGetCommittedBlockLength().getBlockID());
+  // Check if it really exists in the openContainerBlockMap
+  if (openContainerBlockMap.checkIfBlockExists(blockID)) {
+String msg = "Block " + blockID + " is not committed yet.";
+throw new StorageContainerException(msg, BLOCK_NOT_COMMITTED);
+  }
 

[02/50] hadoop git commit: YARN-4606. CapacityScheduler: applications could get starved because computation of #activeUsers considers pending apps. Contributed by Manikandan R

2018-08-01 Thread xkrogen
YARN-4606. CapacityScheduler: applications could get starved because 
computation of #activeUsers considers pending apps. Contributed by Manikandan R


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9485c9ae
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9485c9ae
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9485c9ae

Branch: refs/heads/HDFS-12943
Commit: 9485c9aee6e9bb935c3e6ae4da81d70b621781de
Parents: 5f0b924
Author: Eric E Payne 
Authored: Wed Jul 25 16:22:04 2018 +
Committer: Eric E Payne 
Committed: Wed Jul 25 16:22:04 2018 +

--
 .../scheduler/capacity/UsersManager.java|  27 +++-
 .../capacity/TestCapacityScheduler.java | 128 +++
 .../capacity/TestContainerAllocation.java   |  43 +++
 3 files changed, 197 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9485c9ae/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java
index 747a488..83ee6c0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java
@@ -85,6 +85,7 @@ public class UsersManager implements AbstractUsersManager {
 
   private final QueueMetrics metrics;
   private AtomicInteger activeUsers = new AtomicInteger(0);
+  private AtomicInteger activeUsersWithOnlyPendingApps = new AtomicInteger(0);
   private Map> usersApplications =
   new HashMap>();
 
@@ -671,9 +672,23 @@ public class UsersManager implements AbstractUsersManager {
 // update in local storage
 userLimitPerSchedulingMode.put(schedulingMode, computedUserLimit);
 
+computeNumActiveUsersWithOnlyPendingApps();
+
 return userLimitPerSchedulingMode;
   }
 
+  // This method is called within the lock.
+  private void computeNumActiveUsersWithOnlyPendingApps() {
+int numPendingUsers = 0;
+for (User user : users.values()) {
+  if ((user.getPendingApplications() > 0)
+  && (user.getActiveApplications() <= 0)) {
+numPendingUsers++;
+  }
+}
+activeUsersWithOnlyPendingApps = new AtomicInteger(numPendingUsers);
+  }
+
   private Resource computeUserLimit(String userName, Resource clusterResource,
   String nodePartition, SchedulingMode schedulingMode, boolean activeUser) 
{
 Resource partitionResource = labelManager.getResourceByLabel(nodePartition,
@@ -839,6 +854,11 @@ public class UsersManager implements AbstractUsersManager {
 try {
   this.writeLock.lock();
 
+  User userDesc = getUser(user);
+  if (userDesc != null && userDesc.getActiveApplications() <= 0) {
+return;
+  }
+
   Set userApps = usersApplications.get(user);
   if (userApps == null) {
 userApps = new HashSet();
@@ -893,7 +913,7 @@ public class UsersManager implements AbstractUsersManager {
 
   @Override
   public int getNumActiveUsers() {
-return activeUsers.get();
+return activeUsers.get() + activeUsersWithOnlyPendingApps.get();
   }
 
   float sumActiveUsersTimesWeights() {
@@ -1090,4 +1110,9 @@ public class UsersManager implements AbstractUsersManager 
{
   this.writeLock.unlock();
 }
   }
+
+  @VisibleForTesting
+  public int getNumActiveUsersWithOnlyPendingApps() {
+return activeUsersWithOnlyPendingApps.get();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9485c9ae/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/sch

[10/50] hadoop git commit: YARN-8545. Return allocated resource to RM for failed container. Contributed by Chandni Singh

2018-08-01 Thread xkrogen
YARN-8545.  Return allocated resource to RM for failed container.
Contributed by Chandni Singh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/40fad328
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/40fad328
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/40fad328

Branch: refs/heads/HDFS-12943
Commit: 40fad32824d2f8f960c779d78357e62103453da0
Parents: d70d845
Author: Eric Yang 
Authored: Thu Jul 26 18:22:57 2018 -0400
Committer: Eric Yang 
Committed: Thu Jul 26 18:22:57 2018 -0400

--
 .../hadoop/yarn/service/ServiceScheduler.java   |  3 +-
 .../yarn/service/component/Component.java   | 42 +++-
 .../component/instance/ComponentInstance.java   | 21 +++---
 .../instance/ComponentInstanceEvent.java|  2 +
 .../containerlaunch/ContainerLaunchService.java | 12 --
 .../hadoop/yarn/service/MockServiceAM.java  | 34 +++-
 .../hadoop/yarn/service/TestServiceAM.java  | 35 
 .../yarn/service/component/TestComponent.java   |  3 +-
 .../instance/TestComponentInstance.java | 26 ++--
 9 files changed, 135 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/40fad328/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
index d3e8e4f..cfaf356 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
@@ -687,7 +687,8 @@ public class ServiceScheduler extends CompositeService {
 }
 ComponentEvent event =
 new ComponentEvent(instance.getCompName(), CONTAINER_COMPLETED)
-.setStatus(status).setInstance(instance);
+.setStatus(status).setInstance(instance)
+.setContainerId(containerId);
 dispatcher.getEventHandler().handle(event);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40fad328/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
index a1ee796..aaa23da 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.yarn.service.component;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.ExecutionType;
@@ -518,10 +519,10 @@ public class Component implements 
EventHandler {
   private static class ContainerCompletedTransition extends BaseTransition {
 @Override
 public void transition(Component component, ComponentEvent event) {
-
+  Preconditions.checkNotNull(event.getContainerId());
   component.updateMetrics(event.getStatus());
   component.dispatcher.getEventHandler().handle(
-  new ComponentInstanceEvent(event.getStatus().getContainerId(), STOP)
+  new ComponentInstanceEvent(event.getContainerId(), STOP)
   .setStatus(event.getStatus()));
 
   ComponentRestartPolicy restartPolicy =
@@ -784,28 +785,33 @@ public class Component implements 
EventHandler {
   }
 
   private void

[13/50] hadoop git commit: HDFS-13727. Log full stack trace if DiskBalancer exits with an unhandled exception. Contributed by Gabor Bota.

2018-08-01 Thread xkrogen
HDFS-13727. Log full stack trace if DiskBalancer exits with an unhandled 
exception.
Contributed by Gabor Bota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/64e739e3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/64e739e3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/64e739e3

Branch: refs/heads/HDFS-12943
Commit: 64e739e344ac474046d4f4ecf0865dd92be13762
Parents: 8d3c068
Author: Anu Engineer 
Authored: Fri Jul 27 06:11:56 2018 -0700
Committer: Anu Engineer 
Committed: Fri Jul 27 06:11:56 2018 -0700

--
 .../main/java/org/apache/hadoop/hdfs/tools/DiskBalancerCLI.java  | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/64e739e3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancerCLI.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancerCLI.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancerCLI.java
index 00e6f04..34bd68b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancerCLI.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancerCLI.java
@@ -172,7 +172,9 @@ public class DiskBalancerCLI extends Configured implements 
Tool {
 try {
   res = ToolRunner.run(shell, argv);
 } catch (Exception ex) {
-  LOG.error(ex.toString());
+  String msg = String.format("Exception thrown while running %s.",
+  DiskBalancerCLI.class.getSimpleName());
+  LOG.error(msg, ex);
   res = 1;
 }
 System.exit(res);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[28/50] hadoop git commit: HDFS-12716. 'dfs.datanode.failed.volumes.tolerated' to support minimum number of volumes to be available. Contributed by Ranith Sardar and usharani

2018-08-01 Thread xkrogen
HDFS-12716. 'dfs.datanode.failed.volumes.tolerated' to support minimum number 
of volumes to be available. Contributed by Ranith Sardar and usharani


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3108d27e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3108d27e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3108d27e

Branch: refs/heads/HDFS-12943
Commit: 3108d27edde941d153a58f71fb1096cce2995531
Parents: 63e08ec
Author: Brahma Reddy Battula 
Authored: Mon Jul 30 15:50:04 2018 +0530
Committer: Brahma Reddy Battula 
Committed: Mon Jul 30 15:50:04 2018 +0530

--
 .../hadoop/hdfs/server/datanode/DataNode.java   |  7 +++-
 .../datanode/checker/DatasetVolumeChecker.java  |  6 ++-
 .../checker/StorageLocationChecker.java | 28 ++
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 40 
 .../src/main/resources/hdfs-default.xml |  2 +
 .../TestDataNodeVolumeFailureToleration.java|  6 ++-
 6 files changed, 68 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3108d27e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 7df92f6..1e9c57a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -293,6 +293,8 @@ public class DataNode extends ReconfigurableBase
   "  and rolling upgrades.";
 
   static final int CURRENT_BLOCK_FORMAT_VERSION = 1;
+  public static final int MAX_VOLUME_FAILURE_TOLERATED_LIMIT = -1;
+  public static final String MAX_VOLUME_FAILURES_TOLERATED_MSG = "should be 
greater than -1";
 
   /** A list of property that are reconfigurable at runtime. */
   private static final List RECONFIGURABLE_PROPERTIES =
@@ -1389,10 +1391,11 @@ public class DataNode extends ReconfigurableBase
 
 int volFailuresTolerated = dnConf.getVolFailuresTolerated();
 int volsConfigured = dnConf.getVolsConfigured();
-if (volFailuresTolerated < 0 || volFailuresTolerated >= volsConfigured) {
+if (volFailuresTolerated < MAX_VOLUME_FAILURE_TOLERATED_LIMIT
+|| volFailuresTolerated >= volsConfigured) {
   throw new DiskErrorException("Invalid value configured for "
   + "dfs.datanode.failed.volumes.tolerated - " + volFailuresTolerated
-  + ". Value configured is either less than 0 or >= "
+  + ". Value configured is either greater than -1 or >= "
   + "to the number of configured volumes (" + volsConfigured + ").");
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3108d27e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
index 3889e23..30602c0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
@@ -28,6 +28,7 @@ import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
@@ -153,10 +154,11 @@ public class DatasetVolumeChecker {
 
 lastAllVolumesCheck = timer.monotonicNow() - minDiskCheckGapMs;
 
-if (maxVolumeFailuresTolerated < 0) {
+if (maxVolumeFailuresTolerated < 
DataNode.MAX_VOLUME_FAILURE_TOLERATED_LIMIT) {
   throw new DiskErrorException("Invalid value configured for "
   + DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY + " - "
-  + maxVolumeFailuresTolerated + " (should be non-negative)");
+  + maxVolumeFailuresTolerated + " "
+  + DataNode.MAX_VOLUME_FAILURES_TOLERATED_MSG);
 }
 

[35/50] hadoop git commit: HDDS-305. Datanode StateContext#addContainerActionIfAbsent will add container action even if there already is a ContainerAction. Contributed by Nanda kumar.

2018-08-01 Thread xkrogen
HDDS-305. Datanode StateContext#addContainerActionIfAbsent will add container 
action even if there already is a ContainerAction. Contributed by Nanda kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7631e0ad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7631e0ad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7631e0ad

Branch: refs/heads/HDFS-12943
Commit: 7631e0adaefcccdbee693089b4c391bea4107a19
Parents: 3e06a5d
Author: Nanda kumar 
Authored: Tue Jul 31 17:27:51 2018 +0530
Committer: Nanda kumar 
Committed: Tue Jul 31 17:27:51 2018 +0530

--
 .../ozone/container/common/impl/HddsDispatcher.java| 13 +
 .../main/proto/StorageContainerDatanodeProtocol.proto  |  2 +-
 .../states/endpoint/TestHeartbeatEndpointTask.java |  5 +
 3 files changed, 3 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7631e0ad/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
index ee232db..d92eb17 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
@@ -168,19 +168,8 @@ public class HddsDispatcher implements ContainerDispatcher 
{
   double containerUsedPercentage = 1.0f * containerData.getBytesUsed() /
   StorageUnit.GB.toBytes(containerData.getMaxSizeGB());
   if (containerUsedPercentage >= containerCloseThreshold) {
-
-ContainerInfo containerInfo = ContainerInfo.newBuilder()
-.setContainerID(containerData.getContainerID())
-.setReadCount(containerData.getReadCount())
-.setWriteCount(containerData.getWriteCount())
-.setReadBytes(containerData.getReadBytes())
-.setWriteBytes(containerData.getWriteBytes())
-.setUsed(containerData.getBytesUsed())
-.setState(HddsProtos.LifeCycleState.OPEN)
-.build();
-
 ContainerAction action = ContainerAction.newBuilder()
-.setContainer(containerInfo)
+.setContainerID(containerData.getContainerID())
 .setAction(ContainerAction.Action.CLOSE)
 .setReason(ContainerAction.Reason.CONTAINER_FULL)
 .build();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7631e0ad/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
--
diff --git 
a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
 
b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
index 0c52efb..71c41e3 100644
--- 
a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
+++ 
b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
@@ -157,7 +157,7 @@ message ContainerAction {
 CONTAINER_FULL = 1;
   }
 
-  required ContainerInfo container = 1;
+  required int64 containerID = 1;
   required Action action = 2;
   optional Reason reason = 3;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7631e0ad/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java
--
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java
index b4d718d..13de11f 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java
@@ -289,10 +289,7 @@ public class TestHeartbeatEndpointTask {
 
   private ContainerAction getContainerAction() {
 ContainerAction.Builder builder = ContainerAction.newBuilder();
-ContainerInfo containerInfo = ContainerInfo.newBuilder()
-.setContainerID(1L)
-.build();
-builder.setContainer(containerInfo)
+builder.setContainerID(1L)
 .setAction(ContainerAction.Action.CLOSE)
 .setR

[04/50] hadoop git commit: YARN-8330. Improved publishing ALLOCATED events to ATS. Contributed by Suma Shivaprasad

2018-08-01 Thread xkrogen
YARN-8330.  Improved publishing ALLOCATED events to ATS.
Contributed by Suma Shivaprasad


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f93ecf5c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f93ecf5c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f93ecf5c

Branch: refs/heads/HDFS-12943
Commit: f93ecf5c1e0b3db27424963814fc01ec43eb76e0
Parents: e95c5e9
Author: Eric Yang 
Authored: Wed Jul 25 18:49:30 2018 -0400
Committer: Eric Yang 
Committed: Wed Jul 25 18:49:30 2018 -0400

--
 .../rmcontainer/RMContainerImpl.java| 64 +++-
 .../rmcontainer/TestRMContainerImpl.java| 11 +++-
 2 files changed, 43 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f93ecf5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
index efac666..945e7cb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
@@ -244,23 +244,13 @@ public class RMContainerImpl implements RMContainer {
 this.readLock = lock.readLock();
 this.writeLock = lock.writeLock();
 
-saveNonAMContainerMetaInfo = rmContext.getYarnConfiguration().getBoolean(
-   YarnConfiguration.APPLICATION_HISTORY_SAVE_NON_AM_CONTAINER_META_INFO,
-   YarnConfiguration
- .DEFAULT_APPLICATION_HISTORY_SAVE_NON_AM_CONTAINER_META_INFO);
+saveNonAMContainerMetaInfo =
+shouldPublishNonAMContainerEventstoATS(rmContext);
 
 if (container.getId() != null) {
   rmContext.getRMApplicationHistoryWriter().containerStarted(this);
 }
 
-// If saveNonAMContainerMetaInfo is true, store system metrics for all
-// containers. If false, and if this container is marked as the AM, metrics
-// will still be published for this container, but that calculation happens
-// later.
-if (saveNonAMContainerMetaInfo && null != container.getId()) {
-  rmContext.getSystemMetricsPublisher().containerCreated(
-  this, this.creationTime);
-}
 if (this.container != null) {
   this.allocationTags = this.container.getAllocationTags();
 }
@@ -590,8 +580,12 @@ public class RMContainerImpl implements RMContainer {
   container.getNodeId(), container.getContainerId(),
   container.getAllocationTags());
 
-  container.eventHandler.handle(new RMAppAttemptEvent(
-  container.appAttemptId, RMAppAttemptEventType.CONTAINER_ALLOCATED));
+  container.eventHandler.handle(
+  new RMAppAttemptEvent(container.appAttemptId,
+  RMAppAttemptEventType.CONTAINER_ALLOCATED));
+
+  publishNonAMContainerEventstoATS(container);
+
 }
   }
 
@@ -610,9 +604,11 @@ public class RMContainerImpl implements RMContainer {
   // Tell the app
   container.eventHandler.handle(new RMAppRunningOnNodeEvent(container
   .getApplicationAttemptId().getApplicationId(), container.nodeId));
+
+  publishNonAMContainerEventstoATS(container);
 }
   }
-  
+
   private static final class ContainerAcquiredWhileRunningTransition extends
   BaseTransition {
 
@@ -718,17 +714,12 @@ public class RMContainerImpl implements RMContainer {
 container);
 
   boolean saveNonAMContainerMetaInfo =
-  container.rmContext.getYarnConfiguration().getBoolean(
-  YarnConfiguration
-.APPLICATION_HISTORY_SAVE_NON_AM_CONTAINER_META_INFO,
-  YarnConfiguration
-.DEFAULT_APPLICATION_HISTORY_SAVE_NON_AM_CONTAINER_META_INFO);
+  shouldPublishNonAMContainerEventstoATS(container.rmContext);
 
   if (saveNonAMContainerMetaInfo || container.isAMContainer()) {
 container.rmContext.getSystemMetricsPublisher().containerFinished(
 container, container.finishTime);
   }
-
 }
 
 private static void updateAttemptMetrics(RMContainerImpl container) {
@@ -754,6 +745,29 @@ public class RMContainerImpl implements

[18/50] hadoop git commit: YARN-8517. getContainer and getContainers ResourceManager REST API methods are not documented (snemeth via rkanter)

2018-08-01 Thread xkrogen
YARN-8517. getContainer and getContainers ResourceManager REST API methods are 
not documented (snemeth via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2cccf406
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2cccf406
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2cccf406

Branch: refs/heads/HDFS-12943
Commit: 2cccf4061cc4021c48e29879700dbc94f832b7d1
Parents: fecbac4
Author: Robert Kanter 
Authored: Fri Jul 27 14:35:03 2018 -0700
Committer: Robert Kanter 
Committed: Fri Jul 27 14:35:03 2018 -0700

--
 .../InvalidResourceRequestException.java|  36 ++
 .../resourcemanager/DefaultAMSProcessor.java|  23 +-
 .../scheduler/SchedulerUtils.java   |  55 +-
 .../scheduler/TestSchedulerUtils.java   | 630 ++-
 4 files changed, 430 insertions(+), 314 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cccf406/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/InvalidResourceRequestException.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/InvalidResourceRequestException.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/InvalidResourceRequestException.java
index f4fd2fa..1ea9eef 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/InvalidResourceRequestException.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/InvalidResourceRequestException.java
@@ -30,19 +30,55 @@ import org.apache.hadoop.yarn.api.records.ResourceRequest;
  * 
  */
 public class InvalidResourceRequestException extends YarnException {
+  public static final String LESS_THAN_ZERO_RESOURCE_MESSAGE_TEMPLATE =
+  "Invalid resource request! Cannot allocate containers as "
+  + "requested resource is less than 0! "
+  + "Requested resource type=[%s], " + "Requested resource=%s";
+
+  public static final String GREATER_THAN_MAX_RESOURCE_MESSAGE_TEMPLATE =
+  "Invalid resource request! Cannot allocate containers as "
+  + "requested resource is greater than " +
+  "maximum allowed allocation. "
+  + "Requested resource type=[%s], "
+  + "Requested resource=%s, maximum allowed allocation=%s, "
+  + "please note that maximum allowed allocation is calculated 
"
+  + "by scheduler based on maximum resource of registered "
+  + "NodeManagers, which might be less than configured "
+  + "maximum allocation=%s";
+
+  public static final String UNKNOWN_REASON_MESSAGE_TEMPLATE =
+  "Invalid resource request! "
+  + "Cannot allocate containers for an unknown reason! "
+  + "Requested resource type=[%s], Requested resource=%s";
+
+  public enum InvalidResourceType {
+LESS_THAN_ZERO, GREATER_THEN_MAX_ALLOCATION, UNKNOWN;
+  }
 
   private static final long serialVersionUID = 13498237L;
+  private final InvalidResourceType invalidResourceType;
 
   public InvalidResourceRequestException(Throwable cause) {
 super(cause);
+this.invalidResourceType = InvalidResourceType.UNKNOWN;
   }
 
   public InvalidResourceRequestException(String message) {
+this(message, InvalidResourceType.UNKNOWN);
+  }
+
+  public InvalidResourceRequestException(String message,
+  InvalidResourceType invalidResourceType) {
 super(message);
+this.invalidResourceType = invalidResourceType;
   }
 
   public InvalidResourceRequestException(String message, Throwable cause) {
 super(message, cause);
+this.invalidResourceType = InvalidResourceType.UNKNOWN;
   }
 
+  public InvalidResourceType getInvalidResourceType() {
+return invalidResourceType;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cccf406/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
index 71558a7..43f73e4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-serv

[07/50] hadoop git commit: HDFS-13622. mkdir should print the parent directory in the error message when parent directories do not exist. Contributed by Shweta.

2018-08-01 Thread xkrogen
HDFS-13622. mkdir should print the parent directory in the error message when 
parent directories do not exist. Contributed by Shweta.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/be150a17
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/be150a17
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/be150a17

Branch: refs/heads/HDFS-12943
Commit: be150a17b15d15f5de6d4839d5e805e8d6c57850
Parents: a192295
Author: Xiao Chen 
Authored: Thu Jul 26 10:23:30 2018 -0700
Committer: Xiao Chen 
Committed: Thu Jul 26 10:24:32 2018 -0700

--
 .../main/java/org/apache/hadoop/fs/shell/Mkdir.java| 13 -
 .../test/java/org/apache/hadoop/hdfs/TestDFSShell.java |  8 
 .../hadoop-hdfs/src/test/resources/testHDFSConf.xml|  4 ++--
 3 files changed, 18 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/be150a17/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Mkdir.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Mkdir.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Mkdir.java
index 9f39da2..5828b0b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Mkdir.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Mkdir.java
@@ -68,11 +68,14 @@ class Mkdir extends FsCommand {
 
   @Override
   protected void processNonexistentPath(PathData item) throws IOException {
-// check if parent exists. this is complicated because getParent(a/b/c/) 
returns a/b/c, but
-// we want a/b
-if (!createParents &&
-!item.fs.exists(new Path(item.path.toString()).getParent())) {
-  throw new PathNotFoundException(item.toString());
+if (!createParents) {
+  // check if parent exists. this is complicated because getParent(a/b/c/) 
returns a/b/c, but
+  // we want a/b
+  final Path itemPath = new Path(item.path.toString());
+  final Path itemParentPath = itemPath.getParent();
+  if (!item.fs.exists(itemParentPath)) {
+throw new PathNotFoundException(itemParentPath.toString());
+  }
 }
 if (!item.fs.mkdirs(item.path)) {
   throw new PathIOException(item.toString());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/be150a17/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
index b19bdea..1d2042e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
@@ -721,6 +721,14 @@ public class TestDFSShell {
   assertTrue(" -mkdir returned this is a file ",
   (returned.lastIndexOf("not a directory") != -1));
   out.reset();
+  argv[0] = "-mkdir";
+  argv[1] = "/testParent/testChild";
+  ret = ToolRunner.run(shell, argv);
+  returned = out.toString();
+  assertEquals(" -mkdir returned 1", 1, ret);
+  assertTrue(" -mkdir returned there is No file or directory but has 
testChild in the path",
+  (returned.lastIndexOf("testChild") == -1));
+  out.reset();
   argv = new String[3];
   argv[0] = "-mv";
   argv[1] = "/testfile";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/be150a17/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
index a13c441..4ab093b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
@@ -6183,11 +6183,11 @@
   
 
   RegexpComparator
-  mkdir: `dir0/dir1': No such file or 
directory
+  .*mkdir:.*dir0': No such file or 
directory$
 
   
 
-
+
  
   mkdir: Test recreate of existing directory 
fails
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[32/50] hadoop git commit: YARN-8584. Several typos in Log Aggregation related classes. Contributed by Szilard Nemeth.

2018-08-01 Thread xkrogen
YARN-8584. Several typos in Log Aggregation related classes. Contributed by 
Szilard Nemeth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2b39ad26
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2b39ad26
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2b39ad26

Branch: refs/heads/HDFS-12943
Commit: 2b39ad26984d641bad57db2cfcc0b7515ef95f46
Parents: e8f952e
Author: bibinchundatt 
Authored: Mon Jul 30 23:25:19 2018 +0530
Committer: bibinchundatt 
Committed: Mon Jul 30 23:25:19 2018 +0530

--
 .../AggregatedLogDeletionService.java   |  4 +--
 .../logaggregation/AggregatedLogFormat.java |  8 +++---
 .../LogAggregationFileController.java   |  6 ++---
 .../ifile/IndexedFileAggregatedLogsBlock.java   |  6 ++---
 .../LogAggregationIndexedFileController.java| 26 ++--
 .../tfile/LogAggregationTFileController.java|  2 +-
 .../TestAggregatedLogDeletionService.java   |  6 ++---
 .../logaggregation/AppLogAggregatorImpl.java|  2 +-
 .../logaggregation/LogAggregationService.java   |  6 ++---
 .../tracker/NMLogAggregationStatusTracker.java  |  4 +--
 10 files changed, 35 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b39ad26/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogDeletionService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogDeletionService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogDeletionService.java
index 562bd2c..841b870 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogDeletionService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogDeletionService.java
@@ -258,7 +258,7 @@ public class AggregatedLogDeletionService extends 
AbstractService {
   return;
 }
 setLogAggCheckIntervalMsecs(retentionSecs);
-task = new LogDeletionTask(conf, retentionSecs, creatRMClient());
+task = new LogDeletionTask(conf, retentionSecs, createRMClient());
 timer = new Timer();
 timer.scheduleAtFixedRate(task, 0, checkIntervalMsecs);
   }
@@ -281,7 +281,7 @@ public class AggregatedLogDeletionService extends 
AbstractService {
   // We have already marked ApplicationClientProtocol.getApplicationReport
   // as @Idempotent, it will automatically take care of RM restart/failover.
   @VisibleForTesting
-  protected ApplicationClientProtocol creatRMClient() throws IOException {
+  protected ApplicationClientProtocol createRMClient() throws IOException {
 return ClientRMProxy.createRMProxy(getConfig(),
   ApplicationClientProtocol.class);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b39ad26/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
index 4ee5c8a..d9b4c1e4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
@@ -178,7 +178,7 @@ public class AggregatedLogFormat {
  * The set of log files that are older than retention policy that will
  * not be uploaded but ready for deletion.
  */
-private final Set obseleteRetentionLogFiles = new HashSet();
+private final Set obsoleteRetentionLogFiles = new HashSet();
 
 // TODO Maybe add a version string here. Instead of changing the version of
 // the entire k-v format
@@ -324,7 +324,7 @@ public class AggregatedLogFormat {
   // if log files are older than retention policy, do not upload them.
   // but schedule them for deletion.
   if(logRetentionContext != null && 
!logRetentionContext.shouldRetainLog()){
-obseleteRetentionLogFiles.addAll(candidates);
+obsoleteRetentionLogFiles.addAll(candidates);
 candidates.clear();
 return candidates;
   }
@@ -396,9 +396,9 @@ public class AggregatedLogFormat {
   return info;
 }
 
-public Set getObseleteRetentio

[49/50] hadoop git commit: Merge branch 'trunk' into HDFS-12943

2018-08-01 Thread xkrogen
Merge branch 'trunk' into HDFS-12943


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cad93969
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cad93969
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cad93969

Branch: refs/heads/HDFS-12943
Commit: cad939695611cd00fad3224f7310e54cdfa40bea
Parents: 9b55946 67c65da
Author: Erik Krogen 
Authored: Wed Aug 1 10:05:56 2018 -0700
Committer: Erik Krogen 
Committed: Wed Aug 1 10:05:56 2018 -0700

--
 .../src/main/conf/hadoop-policy.xml |  20 +
 .../org/apache/hadoop/crypto/key/KeyShell.java  |  32 +-
 .../key/kms/LoadBalancingKMSClientProvider.java |  19 +-
 .../java/org/apache/hadoop/fs/ChecksumFs.java   |  37 ++
 .../java/org/apache/hadoop/fs/CreateFlag.java   |   9 +-
 .../org/apache/hadoop/fs/LocalDirAllocator.java |  28 +-
 .../java/org/apache/hadoop/fs/shell/Mkdir.java  |  13 +-
 .../hadoop/io/file/tfile/Compression.java   |  31 +-
 .../apache/hadoop/ipc/DecayRpcScheduler.java|   8 +
 .../ipc/WeightedRoundRobinMultiplexer.java  |   3 +
 .../hadoop/security/UserGroupInformation.java   | 179 +++--
 .../authorize/DefaultImpersonationProvider.java |   4 +-
 .../apache/hadoop/service/AbstractService.java  |   2 +-
 .../org/apache/hadoop/tools/CommandShell.java   |   6 +-
 .../kms/TestLoadBalancingKMSClientProvider.java |  79 +++
 .../fs/FileContextMainOperationsBaseTest.java   |  38 ++
 .../hadoop/io/file/tfile/TestCompression.java   |  34 +-
 .../hadoop/security/TestGroupsCaching.java  |  17 +-
 .../security/TestUserGroupInformation.java  |  38 ++
 .../TestDefaultImpersonationProvider.java   | 100 +++
 .../hadoop/hdds/scm/XceiverClientRatis.java |  30 +-
 .../org/apache/hadoop/hdds/HddsConfigKeys.java  |  32 +-
 .../java/org/apache/hadoop/hdds/HddsUtils.java  |  33 +
 .../org/apache/hadoop/hdds/client/BlockID.java  |  20 +-
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   |  17 +-
 .../container/common/helpers/ContainerInfo.java |   7 +-
 .../scm/container/common/helpers/Pipeline.java  |   7 +
 .../scm/storage/ContainerProtocolCalls.java |  63 +-
 .../apache/hadoop/ozone/OzoneConfigKeys.java|   6 +-
 .../org/apache/hadoop/ozone/OzoneConsts.java|   7 +-
 .../org/apache/hadoop/ozone/common/Storage.java |   2 +-
 .../ozone/container/common/helpers/KeyData.java |  89 ++-
 .../apache/hadoop/ozone/lease/LeaseManager.java |  14 +-
 .../org/apache/hadoop/utils/LevelDBStore.java   |   5 +
 .../hadoop/utils/LevelDBStoreIterator.java  |  64 ++
 .../apache/hadoop/utils/MetaStoreIterator.java  |  39 ++
 .../org/apache/hadoop/utils/MetadataStore.java  |  55 ++
 .../org/apache/hadoop/utils/RocksDBStore.java   |   5 +
 .../hadoop/utils/RocksDBStoreIterator.java  |  66 ++
 .../main/proto/DatanodeContainerProtocol.proto  | 106 +--
 .../common/src/main/resources/ozone-default.xml |  58 +-
 .../hadoop/ozone/lease/TestLeaseManager.java|  21 +-
 .../apache/hadoop/utils/TestMetadataStore.java  | 206 +++---
 .../apache/hadoop/hdds/scm/HddsServerUtil.java  |  13 +-
 .../hadoop/ozone/HddsDatanodeService.java   |   2 +
 .../common/helpers/ContainerUtils.java  | 114 ++--
 .../container/common/impl/ContainerData.java| 128 ++--
 .../common/impl/ContainerDataYaml.java  |  98 ++-
 .../container/common/impl/HddsDispatcher.java   | 103 +--
 .../common/impl/OpenContainerBlockMap.java  | 148 +
 .../common/interfaces/BlockIterator.java|  57 ++
 .../container/common/interfaces/Container.java  |   6 +
 .../report/CommandStatusReportPublisher.java|  24 +-
 .../common/report/ContainerReportPublisher.java |  25 +-
 .../common/report/NodeReportPublisher.java  |  32 +-
 .../common/report/ReportPublisher.java  |  14 +-
 .../statemachine/DatanodeStateMachine.java  |   2 +-
 .../statemachine/EndpointStateMachine.java  |   4 +-
 .../statemachine/SCMConnectionManager.java  |   2 +-
 .../common/statemachine/StateContext.java   |  71 +-
 .../CloseContainerCommandHandler.java   |   8 +-
 .../commandhandler/CommandHandler.java  |   6 +-
 .../DeleteBlocksCommandHandler.java |  12 +-
 .../states/endpoint/HeartbeatEndpointTask.java  |  33 +-
 .../states/endpoint/RegisterEndpointTask.java   |   3 +-
 .../states/endpoint/VersionEndpointTask.java|  27 +-
 .../server/ratis/ContainerStateMachine.java |  20 +-
 .../container/common/utils/HddsVolumeUtil.java  |  56 ++
 .../container/common/volume/HddsVolume.java | 128 +++-
 .../container/common/volume/VolumeInfo.java |   8 +
 .../container/common/volume/VolumeSet.java  |  60 +-
 .../container/common/volume/VolumeUsage.java|  17 -
 .../keyvalue/KeyValueBlockIterator.java | 148 +
 .../container/keyvalue/KeyValueContainer.java   | 129 ++--
 .../keyvalue/KeyValueContainerData.java |  95 +--
 .../container/keyvalue/KeyV

[11/50] hadoop git commit: HADOOP-15593. Fixed NPE in UGI spawnAutoRenewalThreadForUserCreds. Contributed by Gabor Bota

2018-08-01 Thread xkrogen
HADOOP-15593.  Fixed NPE in UGI spawnAutoRenewalThreadForUserCreds.
   Contributed by Gabor Bota


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/77721f39
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/77721f39
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/77721f39

Branch: refs/heads/HDFS-12943
Commit: 77721f39e26b630352a1f4087524a3fbd21ff06e
Parents: 40fad32
Author: Eric Yang 
Authored: Thu Jul 26 18:35:36 2018 -0400
Committer: Eric Yang 
Committed: Thu Jul 26 18:35:36 2018 -0400

--
 .../hadoop/security/UserGroupInformation.java   | 179 ---
 .../security/TestUserGroupInformation.java  |  38 
 2 files changed, 148 insertions(+), 69 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/77721f39/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index 29b9fea..6ce72edb 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -40,6 +40,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.Date;
 import java.util.EnumMap;
 import java.util.HashMap;
 import java.util.Iterator;
@@ -851,81 +852,121 @@ public class UserGroupInformation {
 }
 
 //spawn thread only if we have kerb credentials
-Thread t = new Thread(new Runnable() {
+KerberosTicket tgt = getTGT();
+if (tgt == null) {
+  return;
+}
+String cmd = conf.get("hadoop.kerberos.kinit.command", "kinit");
+long nextRefresh = getRefreshTime(tgt);
+Thread t =
+new Thread(new AutoRenewalForUserCredsRunnable(tgt, cmd, nextRefresh));
+t.setDaemon(true);
+t.setName("TGT Renewer for " + getUserName());
+t.start();
+  }
+
+  @VisibleForTesting
+  class AutoRenewalForUserCredsRunnable implements Runnable {
+private KerberosTicket tgt;
+private RetryPolicy rp;
+private String kinitCmd;
+private long nextRefresh;
+private boolean runRenewalLoop = true;
+
+AutoRenewalForUserCredsRunnable(KerberosTicket tgt, String kinitCmd,
+long nextRefresh){
+  this.tgt = tgt;
+  this.kinitCmd = kinitCmd;
+  this.nextRefresh = nextRefresh;
+  this.rp = null;
+}
+
+public void setRunRenewalLoop(boolean runRenewalLoop) {
+  this.runRenewalLoop = runRenewalLoop;
+}
 
-  @Override
-  public void run() {
-String cmd = conf.get("hadoop.kerberos.kinit.command", "kinit");
-KerberosTicket tgt = getTGT();
-if (tgt == null) {
+@Override
+public void run() {
+  do {
+try {
+  long now = Time.now();
+  if (LOG.isDebugEnabled()) {
+LOG.debug("Current time is " + now);
+LOG.debug("Next refresh is " + nextRefresh);
+  }
+  if (now < nextRefresh) {
+Thread.sleep(nextRefresh - now);
+  }
+  String output = Shell.execCommand(kinitCmd, "-R");
+  if (LOG.isDebugEnabled()) {
+LOG.debug("Renewed ticket. kinit output: {}", output);
+  }
+  reloginFromTicketCache();
+  tgt = getTGT();
+  if (tgt == null) {
+LOG.warn("No TGT after renewal. Aborting renew thread for " +
+getUserName());
+return;
+  }
+  nextRefresh = Math.max(getRefreshTime(tgt),
+  now + kerberosMinSecondsBeforeRelogin);
+  metrics.renewalFailures.set(0);
+  rp = null;
+} catch (InterruptedException ie) {
+  LOG.warn("Terminating renewal thread");
   return;
-}
-long nextRefresh = getRefreshTime(tgt);
-RetryPolicy rp = null;
-while (true) {
+} catch (IOException ie) {
+  metrics.renewalFailuresTotal.incr();
+  final long now = Time.now();
+
+  if (tgt.isDestroyed()) {
+LOG.error("TGT is destroyed. Aborting renew thread for {}.",
+getUserName());
+return;
+  }
+
+  long tgtEndTime;
+  // As described in HADOOP-15593 we need to handle the case when
+  // tgt.getEndTime() throws NPE because of JDK issue JDK-8147772
+  // NPE is only possible if this issue is not fixed in the JDK
+  // currently used
   try {

[17/50] hadoop git commit: YARN-8566. Add diagnostic message for unschedulable containers (snemeth via rkanter)

2018-08-01 Thread xkrogen
YARN-8566. Add diagnostic message for unschedulable containers (snemeth via 
rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fecbac49
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fecbac49
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fecbac49

Branch: refs/heads/HDFS-12943
Commit: fecbac499e2ae6b3334773a997d454a518f43e01
Parents: b429f19
Author: Robert Kanter 
Authored: Fri Jul 27 14:32:34 2018 -0700
Committer: Robert Kanter 
Committed: Fri Jul 27 14:32:34 2018 -0700

--
 .../src/site/markdown/ResourceManagerRest.md| 285 +++
 1 file changed, 285 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fecbac49/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
index a30677c..24c2319 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
@@ -2326,6 +2326,291 @@ Response Body:
 
 ```
 
+Containers for an Application Attempt API
+-
+
+With Containers for an Application Attempt API you can obtain the list of 
containers, which belongs to an Application Attempt.
+
+### URI
+
+  * 
http://rm-http-address:port/ws/v1/cluster/apps/{appid}/appattempts/{appAttemptId}/containers
+
+### HTTP Operations Supported
+
+  * GET
+
+### Query Parameters Supported
+
+  None
+
+### Elements of the *containers* object
+
+When you make a request for the list of containers, the information will be 
returned as an array of container objects.
+
+containers:
+
+| Item | Data Type | Description |
+|: |: |: |
+| containers | array of app container objects(JSON)/zero or more container 
objects(XML) | The collection of app container objects |
+
+### Elements of the *container* object
+
+| Item | Data Type | Description |
+|: |: |: |
+| containerId | string | The container id |
+| allocatedMB | long | The amount of memory allocated for the container in MB |
+| allocatedVCores | int | The amount of virtual cores allocated for the 
container |
+| assignedNodeId | string | The node id of the node the attempt ran on |
+| priority | int | Allocated priority of the container |
+| startedTime | long | The start time of the attempt (in ms since epoch) |
+| finishedTime | long | The finish time of the attempt (in ms since epoch) 0 
if not finished |
+| elapsedTime | long | The elapsed time in ms since the startedTime |
+| logUrl | string | The web URL that can be used to check the log for the 
container |
+| containerExitStatus | int | Final exit status of the container |
+| containerState | string | State of the container, can be NEW, RUNNING, or 
COMPLETE |
+| nodeHttpAddress | string | The node http address of the node the attempt ran 
on ||
+| nodeId | string | The node id of the node the attempt ran on |
+| allocatedResources |array of resource(JSON)/zero or more resource 
objects(XML) | Allocated resources for the container |
+
+### Elements of the *resource* object
+| Item | Data Type | Description |
+|: |: |: |
+| memory | int | The maximum memory for the container |
+| vCores | int | The maximum number of vcores for the container |
+
+**JSON response**
+
+HTTP Request:
+
+  GET 
http://rm-http-address:port/ws/v1/cluster/apps/{appid}/appattempts/{appAttemptId}/containers
+
+Response Header:
+
+  HTTP/1.1 200 OK
+  Content-Type: application/json
+  Transfer-Encoding: chunked
+  Server: Jetty(6.1.26)
+
+Response Body:
+
+```json
+{
+  "containers" : {
+"container": [
+  {
+  "containerId": "container_1531404209605_0008_01_01",
+  "allocatedMB": "1536",
+  "allocatedVCores": "1",
+  "assignedNodeId": "host.domain.com:37814",
+  "priority": "0",
+  "startedTime": "1531405909444",
+  "finishedTime": "0",
+  "elapsedTime": "4112",
+  "logUrl": 
"http://host.domain.com:8042/node/containerlogs/container_1531404209605_0008_01_01/systest";,
+  "containerExitStatus": "0",
+  "containerState": "RUNNING",
+  "nodeHttpAddress": "http://host.domain.com:8042";,
+  "nodeId": "host.domain.com:37814",
+  "allocatedResources": [
+ {
+"key": "memory-mb",
+"value": "1536"
+ },
+ {
+"key": "vcores",
+"value": "1"
+ }
+   ]
+  }
+]
+  }
+}
+```
+
+**XML response**
+
+HTTP Request:

[29/50] hadoop git commit: HDDS-302. Fix javadoc and add implementation details in ContainerStateMachine. Contributed by Shashikant Banerjee.

2018-08-01 Thread xkrogen
HDDS-302. Fix javadoc and add implementation details in ContainerStateMachine. 
Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/952dc2fd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/952dc2fd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/952dc2fd

Branch: refs/heads/HDFS-12943
Commit: 952dc2fd557f9aaf0f144ee32d0b7731a84bad73
Parents: 3108d27
Author: Mukul Kumar Singh 
Authored: Mon Jul 30 18:45:58 2018 +0530
Committer: Mukul Kumar Singh 
Committed: Mon Jul 30 18:45:58 2018 +0530

--
 .../hadoop/hdds/scm/XceiverClientRatis.java | 30 ++
 .../java/org/apache/hadoop/hdds/HddsUtils.java  | 33 
 .../server/ratis/ContainerStateMachine.java | 14 -
 3 files changed, 49 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/952dc2fd/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
index 0effa8f..2541415 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hdds.scm;
 
 import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.ratis.shaded.com.google.protobuf
 .InvalidProtocolBufferException;
 import org.apache.hadoop.conf.Configuration;
@@ -183,34 +184,9 @@ public final class XceiverClientRatis extends 
XceiverClientSpi {
 return Objects.requireNonNull(client.get(), "client is null");
   }
 
-  private boolean isReadOnly(ContainerCommandRequestProto proto) {
-switch (proto.getCmdType()) {
-case ReadContainer:
-case ReadChunk:
-case ListKey:
-case GetKey:
-case GetSmallFile:
-case ListContainer:
-case ListChunk:
-  return true;
-case CloseContainer:
-case WriteChunk:
-case UpdateContainer:
-case CompactChunk:
-case CreateContainer:
-case DeleteChunk:
-case DeleteContainer:
-case DeleteKey:
-case PutKey:
-case PutSmallFile:
-default:
-  return false;
-}
-  }
-
   private RaftClientReply sendRequest(ContainerCommandRequestProto request)
   throws IOException {
-boolean isReadOnlyRequest = isReadOnly(request);
+boolean isReadOnlyRequest = HddsUtils.isReadOnly(request);
 ByteString byteString = request.toByteString();
 LOG.debug("sendCommand {} {}", isReadOnlyRequest, request);
 final RaftClientReply reply =  isReadOnlyRequest ?
@@ -222,7 +198,7 @@ public final class XceiverClientRatis extends 
XceiverClientSpi {
 
   private CompletableFuture sendRequestAsync(
   ContainerCommandRequestProto request) throws IOException {
-boolean isReadOnlyRequest = isReadOnly(request);
+boolean isReadOnlyRequest = HddsUtils.isReadOnly(request);
 ByteString byteString = request.toByteString();
 LOG.debug("sendCommandAsync {} {}", isReadOnlyRequest, request);
 return isReadOnlyRequest ? getClient().sendReadOnlyAsync(() -> byteString) 
:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/952dc2fd/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
index 48c6dce..33bf90c 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
@@ -24,6 +24,7 @@ import com.google.common.net.HostAndPort;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.net.NetUtils;
@@ -315,4 +316,36 @@ public final class HddsUtils {
 return name;
   }
 
+  /**
+   * Checks if the container command is read only or not.
+   * @param proto ContainerCommand Request proto
+   * @return True if its readOnly , false otherwise.
+   */
+  public static boolean isReadOnly(
+  ContainerProtos.ContainerCommandRequestProto proto) {
+switch (proto.getCmdType()) {
+case ReadContainer:
+case ReadChunk:
+case ListKey:
+case GetKey

[44/50] hadoop git commit: HDDS-226. Client should update block length in OM while committing the key. Contributed by Shashikant Banerjee.

2018-08-01 Thread xkrogen
HDDS-226. Client should update block length in OM while committing the key. 
Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f4db753b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f4db753b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f4db753b

Branch: refs/heads/HDFS-12943
Commit: f4db753bb6b4648c583722dbe8108973c23ba06f
Parents: 6310c0d
Author: Mukul Kumar Singh 
Authored: Wed Aug 1 09:02:43 2018 +0530
Committer: Mukul Kumar Singh 
Committed: Wed Aug 1 09:03:00 2018 +0530

--
 .../ozone/client/io/ChunkGroupOutputStream.java | 22 +++-
 .../hadoop/ozone/om/helpers/OmKeyArgs.java  | 26 ---
 .../hadoop/ozone/om/helpers/OmKeyInfo.java  | 29 ++--
 .../ozone/om/helpers/OmKeyLocationInfo.java |  6 +++-
 ...neManagerProtocolClientSideTranslatorPB.java |  8 -
 .../src/main/proto/OzoneManagerProtocol.proto   |  1 +
 .../ozone/client/rpc/TestOzoneRpcClient.java| 35 
 .../hadoop/ozone/om/TestOmBlockVersioning.java  | 13 +++-
 .../apache/hadoop/ozone/om/KeyManagerImpl.java  |  4 +++
 ...neManagerProtocolServerSideTranslatorPB.java |  5 ++-
 10 files changed, 138 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4db753b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
--
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
index 9443317..83b4dfd 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
@@ -76,7 +76,7 @@ public class ChunkGroupOutputStream extends OutputStream {
   private final int chunkSize;
   private final String requestID;
   private boolean closed;
-
+  private List locationInfoList;
   /**
* A constructor for testing purpose only.
*/
@@ -91,6 +91,7 @@ public class ChunkGroupOutputStream extends OutputStream {
 chunkSize = 0;
 requestID = null;
 closed = false;
+locationInfoList = null;
   }
 
   /**
@@ -133,6 +134,7 @@ public class ChunkGroupOutputStream extends OutputStream {
 this.xceiverClientManager = xceiverClientManager;
 this.chunkSize = chunkSize;
 this.requestID = requestId;
+this.locationInfoList = new ArrayList<>();
 LOG.debug("Expecting open key with one block, but got" +
 info.getKeyLocationVersions().size());
   }
@@ -196,8 +198,19 @@ public class ChunkGroupOutputStream extends OutputStream {
 streamEntries.add(new ChunkOutputStreamEntry(subKeyInfo.getBlockID(),
 keyArgs.getKeyName(), xceiverClientManager, xceiverClient, requestID,
 chunkSize, subKeyInfo.getLength()));
+// reset the original length to zero here. It will be updated as and when
+// the data gets written.
+subKeyInfo.setLength(0);
+locationInfoList.add(subKeyInfo);
   }
 
+  private void incrementBlockLength(int index, long length) {
+if (locationInfoList != null) {
+  OmKeyLocationInfo locationInfo = locationInfoList.get(index);
+  long originalLength = locationInfo.getLength();
+  locationInfo.setLength(originalLength + length);
+}
+  }
 
   @VisibleForTesting
   public long getByteOffset() {
@@ -222,6 +235,7 @@ public class ChunkGroupOutputStream extends OutputStream {
 }
 ChunkOutputStreamEntry entry = streamEntries.get(currentStreamIndex);
 entry.write(b);
+incrementBlockLength(currentStreamIndex, 1);
 if (entry.getRemaining() <= 0) {
   currentStreamIndex += 1;
 }
@@ -276,6 +290,7 @@ public class ChunkGroupOutputStream extends OutputStream {
   ChunkOutputStreamEntry current = streamEntries.get(currentStreamIndex);
   int writeLen = Math.min(len, (int)current.getRemaining());
   current.write(b, off, writeLen);
+  incrementBlockLength(currentStreamIndex, writeLen);
   if (current.getRemaining() <= 0) {
 currentStreamIndex += 1;
   }
@@ -328,8 +343,13 @@ public class ChunkGroupOutputStream extends OutputStream {
 }
 if (keyArgs != null) {
   // in test, this could be null
+  long length =
+  locationInfoList.parallelStream().mapToLong(e -> 
e.getLength()).sum();
+  Preconditions.checkState(byteOffset == length);
   keyArgs.setDataSize(byteOffset);
+  keyArgs.setLocationInfoList(locationInfoList);
   omClient.commitKey(keyArgs, openID);
+  locationInfoList = null;
 } else {
   LOG.warn("Closing ChunkGroup

[37/50] hadoop git commit: YARN-8175. Add support for Node Labels in SLS. Contributed by Abhishek Modi.

2018-08-01 Thread xkrogen
YARN-8175. Add support for Node Labels in SLS. Contributed by Abhishek Modi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9fea5c9e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9fea5c9e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9fea5c9e

Branch: refs/heads/HDFS-12943
Commit: 9fea5c9ee76bd36f273ae93afef5f3ef3c477a53
Parents: b28bdc7
Author: Inigo Goiri 
Authored: Tue Jul 31 09:36:34 2018 -0700
Committer: Inigo Goiri 
Committed: Tue Jul 31 09:36:34 2018 -0700

--
 .../org/apache/hadoop/yarn/sls/SLSRunner.java   | 93 +++-
 .../hadoop/yarn/sls/appmaster/AMSimulator.java  |  9 +-
 .../yarn/sls/appmaster/MRAMSimulator.java   |  5 +-
 .../yarn/sls/appmaster/StreamAMSimulator.java   |  5 +-
 .../hadoop/yarn/sls/conf/SLSConfiguration.java  |  1 +
 .../yarn/sls/nodemanager/NMSimulator.java   | 13 ++-
 .../apache/hadoop/yarn/sls/utils/SLSUtils.java  | 58 
 .../yarn/sls/appmaster/TestAMSimulator.java | 35 +++-
 .../hadoop/yarn/sls/utils/TestSLSUtils.java | 64 ++
 .../test/resources/nodes-with-resources.json|  8 +-
 .../hadoop/yarn/client/cli/RMAdminCLI.java  | 71 +--
 .../yarn/client/util/YarnClientUtils.java   | 77 
 12 files changed, 301 insertions(+), 138 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fea5c9e/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
index e859732..1e83e40 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
@@ -60,6 +60,7 @@ import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.hadoop.yarn.api.records.ExecutionType;
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeLabel;
 import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.api.records.ReservationId;
 import org.apache.hadoop.yarn.api.records.Resource;
@@ -298,30 +299,20 @@ public class SLSRunner extends Configured implements Tool 
{
 SLSConfiguration.NM_RESOURCE_UTILIZATION_RATIO,
 SLSConfiguration.NM_RESOURCE_UTILIZATION_RATIO_DEFAULT);
 // nm information (fetch from topology file, or from sls/rumen json file)
-Map nodeResourceMap = new HashMap<>();
-Set nodeSet;
+Set nodeSet = null;
 if (nodeFile.isEmpty()) {
   for (String inputTrace : inputTraces) {
 switch (inputType) {
 case SLS:
   nodeSet = SLSUtils.parseNodesFromSLSTrace(inputTrace);
-  for (String node : nodeSet) {
-nodeResourceMap.put(node, null);
-  }
   break;
 case RUMEN:
   nodeSet = SLSUtils.parseNodesFromRumenTrace(inputTrace);
-  for (String node : nodeSet) {
-nodeResourceMap.put(node, null);
-  }
   break;
 case SYNTH:
   stjp = new SynthTraceJobProducer(getConf(), new 
Path(inputTraces[0]));
   nodeSet = SLSUtils.generateNodes(stjp.getNumNodes(),
   stjp.getNumNodes()/stjp.getNodesPerRack());
-  for (String node : nodeSet) {
-nodeResourceMap.put(node, null);
-  }
   break;
 default:
   throw new YarnException("Input configuration not recognized, "
@@ -329,11 +320,11 @@ public class SLSRunner extends Configured implements Tool 
{
 }
   }
 } else {
-  nodeResourceMap = SLSUtils.parseNodesFromNodeFile(nodeFile,
+  nodeSet = SLSUtils.parseNodesFromNodeFile(nodeFile,
   nodeManagerResource);
 }
 
-if (nodeResourceMap.size() == 0) {
+if (nodeSet == null || nodeSet.isEmpty()) {
   throw new YarnException("No node! Please configure nodes.");
 }
 
@@ -344,20 +335,21 @@ public class SLSRunner extends Configured implements Tool 
{
 SLSConfiguration.RUNNER_POOL_SIZE_DEFAULT);
 ExecutorService executorService = Executors.
 newFixedThreadPool(threadPoolSize);
-for (Map.Entry entry : nodeResourceMap.entrySet()) {
+for (NodeDetails nodeDetails : nodeSet) {
   executorService.submit(new Runnable() {
 @Override public void run() {
   try {
 // we randomize the heartbeat start time from zero to 1 interval
 NMSimulator nm = new NMSimulator();
 Resource nmResource = nodeManagerResource;
-String hostName = entry.getKey();
-  

[21/50] hadoop git commit: HADOOP-15636. Follow-up from HADOOP-14918; restoring test under new name. Contributed by Gabor Bota.

2018-08-01 Thread xkrogen
HADOOP-15636. Follow-up from HADOOP-14918; restoring test under new name. 
Contributed by Gabor Bota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/59adeb8d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/59adeb8d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/59adeb8d

Branch: refs/heads/HDFS-12943
Commit: 59adeb8d7f2f04bc56d37b2a2e65596fee6e4894
Parents: ed9d60e
Author: Sean Mackrory 
Authored: Thu Jul 26 10:25:47 2018 -0600
Committer: Sean Mackrory 
Committed: Fri Jul 27 18:23:29 2018 -0600

--
 .../s3a/s3guard/ITestDynamoDBMetadataStore.java | 649 +++
 1 file changed, 649 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/59adeb8d/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStore.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStore.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStore.java
new file mode 100644
index 000..a597858
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStore.java
@@ -0,0 +1,649 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3a.s3guard;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+
+import com.amazonaws.services.dynamodbv2.document.DynamoDB;
+import com.amazonaws.services.dynamodbv2.document.Item;
+import com.amazonaws.services.dynamodbv2.document.PrimaryKey;
+import com.amazonaws.services.dynamodbv2.document.Table;
+import 
com.amazonaws.services.dynamodbv2.model.ProvisionedThroughputDescription;
+import com.amazonaws.services.dynamodbv2.model.ResourceNotFoundException;
+import com.amazonaws.services.dynamodbv2.model.TableDescription;
+
+import com.google.common.collect.Lists;
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.hadoop.fs.contract.s3a.S3AContract;
+import org.apache.hadoop.fs.s3a.Constants;
+import org.apache.hadoop.fs.s3a.Tristate;
+
+import org.apache.hadoop.io.IOUtils;
+import org.junit.AfterClass;
+import org.junit.Assume;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.s3a.S3AFileStatus;
+import org.apache.hadoop.fs.s3a.S3AFileSystem;
+import org.apache.hadoop.security.UserGroupInformation;
+
+import static org.apache.hadoop.fs.s3a.Constants.*;
+import static org.apache.hadoop.fs.s3a.S3ATestUtils.*;
+import static 
org.apache.hadoop.fs.s3a.s3guard.PathMetadataDynamoDBTranslation.*;
+import static org.apache.hadoop.fs.s3a.s3guard.DynamoDBMetadataStore.*;
+import static org.apache.hadoop.test.LambdaTestUtils.*;
+
+/**
+ * Test that {@link DynamoDBMetadataStore} implements {@link MetadataStore}.
+ *
+ * In this integration test, we use a real AWS DynamoDB. A
+ * {@link DynamoDBMetadataStore} object is created in the @BeforeClass method,
+ * and shared for all test in the @BeforeClass method. You will be charged
+ * bills for AWS S3 or DynamoDB when you run these tests.
+ *
+ * According to the base class, every test case will have independent contract
+ * to create a new {@link S3AFileSystem} instance and initializes it.
+ * A table will be created and shared between the tests,
+ */
+public class ITestDynamoDBMetadataStore extends MetadataStoreTestBase {
+  private static final Logger LOG =
+  LoggerFactory.getLogger(ITestDynamoDBMetadataStore.class);
+  public static final PrimaryKey
+  VERSION_MARKER_PRIMARY_KEY = createVersionMarkerPrimaryKey(
+  DynamoDBMetadataStore.VERSION_MARKER);
+
+  private S3AFileSystem fileSystem

[08/50] hadoop git commit: HDDS-277. PipelineStateMachine should handle closure of pipelines in SCM. Contributed by Mukul Kumar Singh.

2018-08-01 Thread xkrogen
HDDS-277. PipelineStateMachine should handle closure of pipelines in SCM. 
Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fd31cb6c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fd31cb6c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fd31cb6c

Branch: refs/heads/HDFS-12943
Commit: fd31cb6cfeef0c7e9bb0a054cb0f78853df8976f
Parents: be150a1
Author: Xiaoyu Yao 
Authored: Thu Jul 26 13:15:27 2018 -0700
Committer: Xiaoyu Yao 
Committed: Thu Jul 26 13:15:55 2018 -0700

--
 .../container/common/helpers/ContainerInfo.java |   7 +-
 .../container/CloseContainerEventHandler.java   |  28 ++--
 .../hdds/scm/container/ContainerMapping.java|  16 +-
 .../scm/container/ContainerStateManager.java|  11 ++
 .../scm/container/states/ContainerStateMap.java |   2 +-
 .../hdds/scm/pipelines/Node2PipelineMap.java|  33 ++--
 .../hdds/scm/pipelines/PipelineManager.java |  31 ++--
 .../hdds/scm/pipelines/PipelineSelector.java|  70 +++--
 .../scm/pipelines/ratis/RatisManagerImpl.java   |  14 +-
 .../standalone/StandaloneManagerImpl.java   |  13 +-
 .../scm/server/StorageContainerManager.java |   2 +-
 .../hadoop/hdds/scm/block/TestBlockManager.java |   4 +-
 .../TestCloseContainerEventHandler.java |  13 +-
 .../scm/container/TestContainerMapping.java |   4 +-
 .../container/closer/TestContainerCloser.java   |   4 +-
 .../hdds/scm/node/TestContainerPlacement.java   |   3 +-
 .../hdds/scm/pipeline/TestPipelineClose.java| 152 +++
 .../hadoop/ozone/scm/TestContainerSQLCli.java   |   4 +-
 18 files changed, 331 insertions(+), 80 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd31cb6c/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
index 4074b21..b194c14 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
@@ -459,12 +459,13 @@ public class ContainerInfo implements 
Comparator,
 
   /**
* Check if a container is in open state, this will check if the
-   * container is either open or allocated or creating. Any containers in
-   * these states is managed as an open container by SCM.
+   * container is either open, allocated, creating or creating.
+   * Any containers in these states is managed as an open container by SCM.
*/
   public boolean isContainerOpen() {
 return state == HddsProtos.LifeCycleState.ALLOCATED ||
 state == HddsProtos.LifeCycleState.CREATING ||
-state == HddsProtos.LifeCycleState.OPEN;
+state == HddsProtos.LifeCycleState.OPEN ||
+state == HddsProtos.LifeCycleState.CLOSING;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd31cb6c/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
index 859e5d5..949eb13 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
@@ -21,7 +21,6 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import 
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.server.events.EventHandler;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
@@ -63,13 +62,13 @@ public class CloseContainerEventHandler implements 
EventHandler {
   containerManager.getContainerWithPipeline(containerID.getId());
   info = containerWithPipeline.getContainerInfo();
   if (info == null) {
-LOG.info("Failed to update the container state. Container with id : {} 
"
+LOG.error("Failed to update the container state. Container wit

[22/50] hadoop git commit: YARN-8558. NM recovery level db not cleaned up properly on container finish. Contributed by Bibin A Chundatt.

2018-08-01 Thread xkrogen
YARN-8558. NM recovery level db not cleaned up properly on container finish. 
Contributed by Bibin A Chundatt.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3d586841
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3d586841
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3d586841

Branch: refs/heads/HDFS-12943
Commit: 3d586841aba99c7df98b2b4d3e48ec0144bad086
Parents: 59adeb8
Author: bibinchundatt 
Authored: Sat Jul 28 20:52:39 2018 +0530
Committer: bibinchundatt 
Committed: Sat Jul 28 20:52:39 2018 +0530

--
 .../recovery/NMLeveldbStateStoreService.java  | 14 ++
 .../recovery/TestNMLeveldbStateStoreService.java  |  7 +++
 2 files changed, 17 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d586841/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
index 44f5e18..67f642d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
@@ -143,9 +143,9 @@ public class NMLeveldbStateStoreService extends 
NMStateStoreService {
   NM_TOKENS_KEY_PREFIX + PREV_MASTER_KEY_SUFFIX;
   private static final String CONTAINER_TOKENS_KEY_PREFIX =
   "ContainerTokens/";
-  private static final String CONTAINER_TOKENS_CURRENT_MASTER_KEY =
+  private static final String CONTAINER_TOKEN_SECRETMANAGER_CURRENT_MASTER_KEY 
=
   CONTAINER_TOKENS_KEY_PREFIX + CURRENT_MASTER_KEY_SUFFIX;
-  private static final String CONTAINER_TOKENS_PREV_MASTER_KEY =
+  private static final String CONTAINER_TOKEN_SECRETMANAGER_PREV_MASTER_KEY =
   CONTAINER_TOKENS_KEY_PREFIX + PREV_MASTER_KEY_SUFFIX;
 
   private static final String LOG_DELETER_KEY_PREFIX = "LogDeleters/";
@@ -658,6 +658,12 @@ public class NMLeveldbStateStoreService extends 
NMStateStoreService {
 batch.delete(bytes(keyPrefix + CONTAINER_KILLED_KEY_SUFFIX));
 batch.delete(bytes(keyPrefix + CONTAINER_EXIT_CODE_KEY_SUFFIX));
 batch.delete(bytes(keyPrefix + CONTAINER_UPDATE_TOKEN_SUFFIX));
+batch.delete(bytes(keyPrefix + CONTAINER_START_TIME_KEY_SUFFIX));
+batch.delete(bytes(keyPrefix + CONTAINER_LOG_DIR_KEY_SUFFIX));
+batch.delete(bytes(keyPrefix + CONTAINER_VERSION_KEY_SUFFIX));
+batch.delete(bytes(keyPrefix + CONTAINER_REMAIN_RETRIES_KEY_SUFFIX));
+batch.delete(bytes(keyPrefix + CONTAINER_RESTART_TIMES_SUFFIX));
+batch.delete(bytes(keyPrefix + CONTAINER_WORK_DIR_KEY_SUFFIX));
 List unknownKeysForContainer = containerUnknownKeySuffixes
 .removeAll(containerId);
 for (String unknownKeySuffix : unknownKeysForContainer) {
@@ -1169,13 +1175,13 @@ public class NMLeveldbStateStoreService extends 
NMStateStoreService {
   @Override
   public void storeContainerTokenCurrentMasterKey(MasterKey key)
   throws IOException {
-storeMasterKey(CONTAINER_TOKENS_CURRENT_MASTER_KEY, key);
+storeMasterKey(CONTAINER_TOKEN_SECRETMANAGER_CURRENT_MASTER_KEY, key);
   }
 
   @Override
   public void storeContainerTokenPreviousMasterKey(MasterKey key)
   throws IOException {
-storeMasterKey(CONTAINER_TOKENS_PREV_MASTER_KEY, key);
+storeMasterKey(CONTAINER_TOKEN_SECRETMANAGER_PREV_MASTER_KEY, key);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d586841/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java
ind

[48/50] hadoop git commit: YARN-8403. Change the log level for fail to download resource from INFO to ERROR. Contributed by Eric Yang

2018-08-01 Thread xkrogen
YARN-8403. Change the log level for fail to download resource from INFO to 
ERROR. Contributed by Eric Yang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/67c65da2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/67c65da2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/67c65da2

Branch: refs/heads/HDFS-12943
Commit: 67c65da261464a0dccb63dc27668109a52e05714
Parents: d920b9d
Author: Billie Rinaldi 
Authored: Wed Aug 1 08:51:18 2018 -0700
Committer: Billie Rinaldi 
Committed: Wed Aug 1 08:51:40 2018 -0700

--
 .../localizer/ResourceLocalizationService.java  | 16 +++-
 .../localizer/TestResourceLocalizationService.java  |  3 +++
 2 files changed, 14 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/67c65da2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
index 4ca6720..3834ece 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
@@ -969,11 +969,17 @@ public class ResourceLocalizationService extends 
CompositeService
 .getDU(new File(local.toUri();
   assoc.getResource().unlock();
 } catch (ExecutionException e) {
-  LOG.info("Failed to download resource " + assoc.getResource(),
-  e.getCause());
-  LocalResourceRequest req = assoc.getResource().getRequest();
-  publicRsrc.handle(new ResourceFailedLocalizationEvent(req,
-  e.getMessage()));
+  String user = assoc.getContext().getUser();
+  ApplicationId applicationId = 
assoc.getContext().getContainerId().getApplicationAttemptId().getApplicationId();
+  LocalResourcesTracker tracker =
+getLocalResourcesTracker(LocalResourceVisibility.APPLICATION, 
user, applicationId);
+  final String diagnostics = "Failed to download resource " +
+  assoc.getResource() + " " + e.getCause();
+  tracker.handle(new ResourceFailedLocalizationEvent(
+  assoc.getResource().getRequest(), diagnostics));
+  publicRsrc.handle(new ResourceFailedLocalizationEvent(
+  assoc.getResource().getRequest(), diagnostics));
+  LOG.error(diagnostics);
   assoc.getResource().unlock();
 } catch (CancellationException e) {
   // ignore; shutting down

http://git-wip-us.apache.org/repos/asf/hadoop/blob/67c65da2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
index 4d03f15..2b9148e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
@@ -2398,6 +2398,9 @@ public class TestResourceLocalizationService {
   // Waiting for resource to change into FAILED state.
   Assert.assertTrue(waitForResourceState(lr, spyService, req,
 LocalResourceVisibility.

[09/50] hadoop git commit: HDDS-291. Initialize hadoop metrics system in standalone hdds datanodes. Contributed by Elek Marton.

2018-08-01 Thread xkrogen
HDDS-291. Initialize hadoop metrics system in standalone hdds datanodes. 
Contributed by Elek Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d70d8457
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d70d8457
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d70d8457

Branch: refs/heads/HDFS-12943
Commit: d70d84570575574b7e3ad0f00baf54f1dde76d97
Parents: fd31cb6
Author: Xiaoyu Yao 
Authored: Thu Jul 26 13:17:37 2018 -0700
Committer: Xiaoyu Yao 
Committed: Thu Jul 26 13:17:37 2018 -0700

--
 .../src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java | 2 ++
 .../ozone/container/common/statemachine/SCMConnectionManager.java  | 2 +-
 2 files changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d70d8457/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
index ddeec87..f359e72 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import org.apache.hadoop.ozone.container.common.statemachine
 .DatanodeStateMachine;
@@ -241,6 +242,7 @@ public class HddsDatanodeService implements ServicePlugin {
 System.exit(1);
   }
   StringUtils.startupShutdownMessage(HddsDatanodeService.class, args, LOG);
+  DefaultMetricsSystem.initialize("HddsDatanode");
   HddsDatanodeService hddsDatanodeService =
   createHddsDatanodeService(conf);
   hddsDatanodeService.start(null);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d70d8457/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java
index 19722f0..85fb580 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java
@@ -67,7 +67,7 @@ public class SCMConnectionManager
 this.rpcTimeout = timeOut.intValue();
 this.scmMachines = new HashMap<>();
 this.conf = conf;
-jmxBean = MBeans.register("OzoneDataNode",
+jmxBean = MBeans.register("HddsDatanode",
 "SCMConnectionManager",
 this);
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[47/50] hadoop git commit: YARN-8595. [UI2] Container diagnostic information is missing from container page. Contributed by Akhil PB.

2018-08-01 Thread xkrogen
YARN-8595. [UI2] Container diagnostic information is missing from container 
page. Contributed by Akhil PB.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d920b9db
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d920b9db
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d920b9db

Branch: refs/heads/HDFS-12943
Commit: d920b9db77be44adc4f8a2a0c2df889af82be04f
Parents: a48a0cc
Author: Sunil G 
Authored: Wed Aug 1 14:27:54 2018 +0530
Committer: Sunil G 
Committed: Wed Aug 1 14:27:54 2018 +0530

--
 .../main/webapp/app/models/yarn-app-attempt.js  |  1 +
 .../app/models/yarn-timeline-container.js   |  1 +
 .../webapp/app/serializers/yarn-app-attempt.js  |  3 +-
 .../app/serializers/yarn-timeline-container.js  |  6 +--
 .../src/main/webapp/app/styles/app.scss |  9 
 .../templates/components/app-attempt-table.hbs  |  6 +++
 .../app/templates/components/timeline-view.hbs  | 44 ++--
 7 files changed, 51 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d920b9db/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-attempt.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-attempt.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-attempt.js
index cffe198..f483695 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-attempt.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-attempt.js
@@ -32,6 +32,7 @@ export default DS.Model.extend({
   logsLink: DS.attr('string'),
   state: DS.attr('string'),
   appAttemptId: DS.attr('string'),
+  diagnosticsInfo: DS.attr('string'),
 
   appId: Ember.computed("id",function () {
 var id = this.get("id");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d920b9db/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-timeline-container.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-timeline-container.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-timeline-container.js
index 7482a2f..9384418 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-timeline-container.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-timeline-container.js
@@ -31,6 +31,7 @@ export default DS.Model.extend({
   containerState: DS.attr('string'),
   nodeHttpAddress: DS.attr('string'),
   nodeId: DS.attr('string'),
+  diagnosticsInfo: DS.attr('string'),
 
   startTs: function() {
 return Converter.dateToTimeStamp(this.get("startedTime"));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d920b9db/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-attempt.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-attempt.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-attempt.js
index f8f598b..55f484b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-attempt.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-attempt.js
@@ -40,7 +40,8 @@ export default DS.JSONAPISerializer.extend({
   hosts: payload.host,
   state: payload.appAttemptState,
   logsLink: payload.logsLink,
-  appAttemptId: payload.appAttemptId
+  appAttemptId: payload.appAttemptId,
+  diagnosticsInfo: payload.diagnosticsInfo
 }
   };
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d920b9db/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-timeline-container.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-timeline-container.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-timeline-container.js
index 1322972..99ab6c4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-timeline-container.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-timeline-container.js
@@ -22,11 +22,6 @@ import Converter from 'yarn-ui/utils/converter';
 
 export default DS.JSONAPISerializer.extend({
   intern

[39/50] hadoop git commit: YARN-8418. App local logs could leaked if log aggregation fails to initialize for the app. (Bibin A Chundatt via wangda)

2018-08-01 Thread xkrogen
YARN-8418. App local logs could leaked if log aggregation fails to initialize 
for the app. (Bibin A Chundatt via wangda)

Change-Id: I29a23ca4b219b48c92e7975cd44cddb8b0e04104


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4b540bbf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4b540bbf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4b540bbf

Branch: refs/heads/HDFS-12943
Commit: 4b540bbfcf02d828052999215c6135603d98f5db
Parents: 8aa93a5
Author: Wangda Tan 
Authored: Tue Jul 31 12:07:51 2018 -0700
Committer: Wangda Tan 
Committed: Tue Jul 31 12:08:00 2018 -0700

--
 .../LogAggregationFileController.java   |  7 ++
 .../nodemanager/NodeStatusUpdaterImpl.java  |  1 +
 .../containermanager/ContainerManager.java  |  1 +
 .../containermanager/ContainerManagerImpl.java  | 13 ++-
 .../logaggregation/AppLogAggregator.java|  8 ++
 .../logaggregation/AppLogAggregatorImpl.java| 15 
 .../logaggregation/LogAggregationService.java   | 83 
 .../containermanager/loghandler/LogHandler.java |  7 ++
 .../loghandler/NonAggregatingLogHandler.java|  9 +++
 .../loghandler/event/LogHandlerEventType.java   |  4 +-
 .../event/LogHandlerTokenUpdatedEvent.java  | 26 ++
 .../nodemanager/DummyContainerManager.java  |  7 ++
 .../TestLogAggregationService.java  | 34 +---
 13 files changed, 187 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b540bbf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
index b047b1c..6b3c9a4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
@@ -43,11 +43,14 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.logaggregation.LogAggregationUtils;
 import org.apache.hadoop.yarn.webapp.View.ViewContext;
@@ -365,6 +368,10 @@ public abstract class LogAggregationFileController {
 }
   });
 } catch (Exception e) {
+  if (e instanceof RemoteException) {
+throw new YarnRuntimeException(((RemoteException) e)
+.unwrapRemoteException(SecretManager.InvalidToken.class));
+  }
   throw new YarnRuntimeException(e);
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b540bbf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
index 8154723..faf7adb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
@@ -1135,6 +1135,7 @@ public class NodeStatusUpdaterImpl extends 
AbstractService implements
 if (systemCredentials != null && !systemCredentials.isEmpty()) {
   ((NMContext) context).

[50/50] hadoop git commit: HDFS-13688. [SBN read] Introduce msync API call. Contributed by Chen Liang.

2018-08-01 Thread xkrogen
HDFS-13688. [SBN read] Introduce msync API call. Contributed by Chen Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2dad24f7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2dad24f7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2dad24f7

Branch: refs/heads/HDFS-12943
Commit: 2dad24f73474833d70c11908b0ff893f4d547348
Parents: cad9396
Author: Erik Krogen 
Authored: Wed Aug 1 09:58:04 2018 -0700
Committer: Erik Krogen 
Committed: Wed Aug 1 10:06:40 2018 -0700

--
 .../main/java/org/apache/hadoop/hdfs/DFSClient.java   | 14 ++
 .../apache/hadoop/hdfs/protocol/ClientProtocol.java   | 11 +++
 .../ClientNamenodeProtocolTranslatorPB.java   | 11 +++
 .../src/main/proto/ClientNamenodeProtocol.proto   |  8 
 .../org/apache/hadoop/hdfs/protocol/TestReadOnly.java |  3 ++-
 .../server/federation/router/RouterRpcServer.java |  5 +
 .../ClientNamenodeProtocolServerSideTranslatorPB.java | 13 +
 .../hdfs/server/namenode/NameNodeRpcServer.java   |  5 +
 8 files changed, 69 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2dad24f7/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 85d6512..71f7401 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -3148,4 +3148,18 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 checkOpen();
 return new OpenFilesIterator(namenode, tracer, openFilesTypes, path);
   }
+
+  /**
+   * A blocking call to wait for Observer NameNode state ID to reach to the
+   * current client state ID. Current client state ID is given by the client
+   * alignment context.
+   * An assumption is that client alignment context has the state ID set at 
this
+   * point. This is become ObserverReadProxyProvider sets up the initial state
+   * ID when it is being created.
+   *
+   * @throws IOException
+   */
+  public void msync() throws IOException {
+namenode.msync();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2dad24f7/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index f2fc530..84a875a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -1787,4 +1787,15 @@ public interface ClientProtocol {
   @ReadOnly
   BatchedEntries listOpenFiles(long prevId,
   EnumSet openFilesTypes, String path) throws IOException;
+
+  /**
+   * Called by client to wait until the server has reached the state id of the
+   * client. The client and server state id are given by client side and server
+   * side alignment context respectively. This can be a blocking call.
+   *
+   * @throws IOException
+   */
+  @Idempotent
+  @ReadOnly
+  void msync() throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2dad24f7/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
index e7ae6fd..442a59f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
@@ -158,6 +158,8 @@ import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSa
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtoc

[36/50] hadoop git commit: HDDS-279. DeleteBlocks command should not be sent for open containers. Contributed by Lokesh Jain.

2018-08-01 Thread xkrogen
HDDS-279. DeleteBlocks command should not be sent for open containers. 
Contributed by Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b28bdc7e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b28bdc7e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b28bdc7e

Branch: refs/heads/HDFS-12943
Commit: b28bdc7e8b488ef0df62a92bcfe7eb74bbe177c1
Parents: 7631e0a
Author: Mukul Kumar Singh 
Authored: Tue Jul 31 19:50:40 2018 +0530
Committer: Mukul Kumar Singh 
Committed: Tue Jul 31 19:50:40 2018 +0530

--
 .../block/DatanodeDeletedBlockTransactions.java | 18 ++--
 .../hdds/scm/block/DeletedBlockLogImpl.java |  8 +-
 .../org/apache/hadoop/ozone/OzoneTestUtils.java | 92 
 .../ozone/TestStorageContainerManager.java  |  8 ++
 .../common/TestBlockDeletingService.java| 17 +++-
 .../commandhandler/TestBlockDeletion.java   | 47 --
 .../hadoop/ozone/web/client/TestKeys.java   |  3 +
 7 files changed, 152 insertions(+), 41 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b28bdc7e/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java
index e33a700..25420fe 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java
@@ -28,6 +28,8 @@ import java.util.List;
 import java.util.Set;
 import java.util.UUID;
 import java.util.stream.Collectors;
+
+import 
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 
 /**
@@ -53,21 +55,26 @@ public class DatanodeDeletedBlockTransactions {
 this.nodeNum = nodeNum;
   }
 
-  public void addTransaction(DeletedBlocksTransaction tx,
-  Set dnsWithTransactionCommitted) throws IOException {
+  public boolean addTransaction(DeletedBlocksTransaction tx,
+  Set dnsWithTransactionCommitted) {
 Pipeline pipeline = null;
 try {
-  pipeline = mappingService.getContainerWithPipeline(tx.getContainerID())
-  .getPipeline();
+  ContainerWithPipeline containerWithPipeline =
+  mappingService.getContainerWithPipeline(tx.getContainerID());
+  if (containerWithPipeline.getContainerInfo().isContainerOpen()) {
+return false;
+  }
+  pipeline = containerWithPipeline.getPipeline();
 } catch (IOException e) {
   SCMBlockDeletingService.LOG.warn("Got container info error.", e);
+  return false;
 }
 
 if (pipeline == null) {
   SCMBlockDeletingService.LOG.warn(
   "Container {} not found, continue to process next",
   tx.getContainerID());
-  return;
+  return false;
 }
 
 for (DatanodeDetails dd : pipeline.getMachines()) {
@@ -78,6 +85,7 @@ public class DatanodeDeletedBlockTransactions {
 addTransactionToDN(dnID, tx);
   }
 }
+return true;
   }
 
   private void addTransactionToDN(UUID dnID, DeletedBlocksTransaction tx) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b28bdc7e/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
index 752c9c7..ca4e1d0 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
@@ -386,9 +386,11 @@ public class DeletedBlockLogImpl implements 
DeletedBlockLog {
   .parseFrom(value);
 
   if (block.getCount() > -1 && block.getCount() <= maxRetry) {
-Set dnsWithTransactionCommitted = transactionToDNsCommitMap
-.putIfAbsent(block.getTxID(), new ConcurrentHashSet<>());
-transactions.addTransaction(block, dnsWithTransactionCommitted);
+if (transactions.addTransaction(block,
+transactionToDNsCommitMap.get(block.getTxID( {
+  transactionToDNsCommitMap
+  .putIfAbsent(block.getTxID(), new ConcurrentHashSet<>());
+}
   }
   

[20/50] hadoop git commit: YARN-8508. Release GPU resource for killed container. Contributed by Chandni Singh

2018-08-01 Thread xkrogen
YARN-8508.  Release GPU resource for killed container.
Contributed by Chandni Singh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ed9d60e8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ed9d60e8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ed9d60e8

Branch: refs/heads/HDFS-12943
Commit: ed9d60e888d0acfd748fda7f66249f5b79a3ed6d
Parents: 79091cf
Author: Eric Yang 
Authored: Fri Jul 27 19:33:58 2018 -0400
Committer: Eric Yang 
Committed: Fri Jul 27 19:33:58 2018 -0400

--
 .../nodemanager/LinuxContainerExecutor.java | 34 ++--
 .../nodemanager/TestLinuxContainerExecutor.java |  9 +-
 2 files changed, 25 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed9d60e8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
index 03b88a4..4253f2f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
@@ -573,15 +573,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
   return handleExitCode(e, container, containerId);
 } finally {
   resourcesHandler.postExecute(containerId);
-
-  try {
-if (resourceHandlerChain != null) {
-  resourceHandlerChain.postComplete(containerId);
-}
-  } catch (ResourceHandlerException e) {
-LOG.warn("ResourceHandlerChain.postComplete failed for " +
-"containerId: " + containerId + ". Exception: " + e);
-  }
+  postComplete(containerId);
 }
 
 return 0;
@@ -721,14 +713,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
   return super.reacquireContainer(ctx);
 } finally {
   resourcesHandler.postExecute(containerId);
-  if (resourceHandlerChain != null) {
-try {
-  resourceHandlerChain.postComplete(containerId);
-} catch (ResourceHandlerException e) {
-  LOG.warn("ResourceHandlerChain.postComplete failed for " +
-  "containerId: " + containerId + " Exception: " + e);
-}
-  }
+  postComplete(containerId);
 }
   }
 
@@ -798,6 +783,8 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
   logOutput(e.getOutput());
   throw new IOException("Error in reaping container "
   + container.getContainerId().toString() + " exit = " + retCode, e);
+} finally {
+  postComplete(container.getContainerId());
 }
 return true;
   }
@@ -968,4 +955,17 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
   LOG.warn("Unable to remove docker container: " + containerId);
 }
   }
+
+  @VisibleForTesting
+  void postComplete(final ContainerId containerId) {
+try {
+  if (resourceHandlerChain != null) {
+LOG.debug("{} post complete", containerId);
+resourceHandlerChain.postComplete(containerId);
+  }
+} catch (ResourceHandlerException e) {
+  LOG.warn("ResourceHandlerChain.postComplete failed for " +
+  "containerId: {}. Exception: ", containerId, e);
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed9d60e8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java
index ddbf3b9..6d77fc4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodem

[15/50] hadoop git commit: HDFS-13765. Fix javadoc for FSDirMkdirOp#createParentDirectories. Contributed by Lokesh Jain.

2018-08-01 Thread xkrogen
HDFS-13765. Fix javadoc for FSDirMkdirOp#createParentDirectories. Contributed 
by Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1c40bc28
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1c40bc28
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1c40bc28

Branch: refs/heads/HDFS-12943
Commit: 1c40bc283645db5a661dc9f004a0bf34832a0902
Parents: 3cc7ce8
Author: Arpit Agarwal 
Authored: Fri Jul 27 10:14:01 2018 -0700
Committer: Arpit Agarwal 
Committed: Fri Jul 27 10:14:01 2018 -0700

--
 .../org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java| 5 +
 1 file changed, 1 insertion(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c40bc28/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
index 45bb6b4..2f0a0fc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
@@ -110,10 +110,7 @@ class FSDirMkdirOp {
* Create all ancestor directories and return the parent inodes.
*
* @param fsd FSDirectory
-   * @param existing The INodesInPath instance containing all the existing
-   * ancestral INodes
-   * @param children The relative path from the parent towards children,
-   * starting with "/"
+   * @param iip inodes in path to the fs directory
* @param perm the permission of the directory. Note that all ancestors
* created along the path has implicit {@code u+wx} permissions.
* @param inheritPerms if the ancestor directories should inherit permissions


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[12/50] hadoop git commit: YARN-8429. Improve diagnostic message when artifact is not set properly. Contributed by Gour Saha

2018-08-01 Thread xkrogen
YARN-8429. Improve diagnostic message when artifact is not set properly.
   Contributed by Gour Saha


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8d3c068e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8d3c068e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8d3c068e

Branch: refs/heads/HDFS-12943
Commit: 8d3c068e59f18e3f8260713fee83c458aa1d
Parents: 77721f3
Author: Eric Yang 
Authored: Thu Jul 26 20:02:13 2018 -0400
Committer: Eric Yang 
Committed: Thu Jul 26 20:02:13 2018 -0400

--
 .../exceptions/RestApiErrorMessages.java|  6 +-
 .../provider/AbstractClientProvider.java| 14 ++---
 .../defaultImpl/DefaultClientProvider.java  | 22 ---
 .../provider/docker/DockerClientProvider.java   | 15 ++---
 .../provider/tarball/TarballClientProvider.java | 27 
 .../yarn/service/utils/ServiceApiUtil.java  |  4 +-
 .../hadoop/yarn/service/TestServiceApiUtil.java |  9 ++-
 .../providers/TestAbstractClientProvider.java   | 29 -
 .../providers/TestDefaultClientProvider.java| 66 
 9 files changed, 138 insertions(+), 54 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d3c068e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java
index 5b3c72c..f10d884 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java
@@ -50,6 +50,10 @@ public interface RestApiErrorMessages {
   "Artifact id (like docker image name) is either empty or not provided";
   String ERROR_ARTIFACT_ID_FOR_COMP_INVALID =
   ERROR_ARTIFACT_ID_INVALID + ERROR_SUFFIX_FOR_COMPONENT;
+  String ERROR_ARTIFACT_PATH_FOR_COMP_INVALID = "For component %s with %s "
+  + "artifact, path does not exist: %s";
+  String ERROR_CONFIGFILE_DEST_FILE_FOR_COMP_NOT_ABSOLUTE = "For component %s "
+  + "with %s artifact, dest_file must be a relative path: %s";
 
   String ERROR_RESOURCE_INVALID = "Resource is not provided";
   String ERROR_RESOURCE_FOR_COMP_INVALID =
@@ -89,7 +93,7 @@ public interface RestApiErrorMessages {
   String ERROR_ABSENT_NUM_OF_INSTANCE =
   "Num of instances should appear either globally or per component";
   String ERROR_ABSENT_LAUNCH_COMMAND =
-  "Launch_command is required when type is not DOCKER";
+  "launch_command is required when type is not DOCKER";
 
   String ERROR_QUICKLINKS_FOR_COMP_INVALID = "Quicklinks specified at"
   + " component level, needs corresponding values set at service level";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d3c068e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractClientProvider.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractClientProvider.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractClientProvider.java
index 672c435..ae79619 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractClientProvider.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractClientProvider.java
@@ -68,18 +68,18 @@ public abstract class AbstractClientProvider {
* Validate the artifact.
* @param artifact
*/
-  public abstract void validateArtifact(Artifact artifact, FileSystem
-  fileSystem) throws IOException;
+  public abstract void validateArtifact(Artifact art

[01/50] hadoop git commit: HADOOP-15395. DefaultImpersonationProvider fails to parse proxy user config if username has . in it. Contributed by Ajay Kumar.

2018-08-01 Thread xkrogen
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-12943 9b55946e0 -> 2dad24f73


HADOOP-15395. DefaultImpersonationProvider fails to parse proxy user config if 
username has . in it. Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5f0b9243
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5f0b9243
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5f0b9243

Branch: refs/heads/HDFS-12943
Commit: 5f0b924360b345f491c2d6693882f1069c7f3508
Parents: 3c4fbc6
Author: Mukul Kumar Singh 
Authored: Wed Jul 25 21:09:11 2018 +0530
Committer: Mukul Kumar Singh 
Committed: Wed Jul 25 21:09:11 2018 +0530

--
 .../authorize/DefaultImpersonationProvider.java |   4 +-
 .../TestDefaultImpersonationProvider.java   | 100 +++
 2 files changed, 102 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f0b9243/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/DefaultImpersonationProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/DefaultImpersonationProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/DefaultImpersonationProvider.java
index 26cd7ab..b766d5c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/DefaultImpersonationProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/DefaultImpersonationProvider.java
@@ -75,9 +75,9 @@ public class DefaultImpersonationProvider implements 
ImpersonationProvider {
 //   $configPrefix.[ANY].hosts
 //
 String prefixRegEx = configPrefix.replace(".", "\\.");
-String usersGroupsRegEx = prefixRegEx + "[^.]*(" +
+String usersGroupsRegEx = prefixRegEx + "[\\S]*(" +
 Pattern.quote(CONF_USERS) + "|" + Pattern.quote(CONF_GROUPS) + ")";
-String hostsRegEx = prefixRegEx + "[^.]*" + Pattern.quote(CONF_HOSTS);
+String hostsRegEx = prefixRegEx + "[\\S]*" + Pattern.quote(CONF_HOSTS);
 
   // get list of users and groups per proxyuser
 Map allMatchKeys = 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f0b9243/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestDefaultImpersonationProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestDefaultImpersonationProvider.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestDefaultImpersonationProvider.java
new file mode 100644
index 000..ef86697
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestDefaultImpersonationProvider.java
@@ -0,0 +1,100 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security.authorize;
+
+import static org.mockito.Mockito.when;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.test.LambdaTestUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+import org.mockito.Mockito;
+
+/**
+ * Test class for @DefaultImpersonationProvider
+ */
+public class TestDefaultImpersonationProvider {
+
+  private String proxyUser;
+  private String user;
+  private DefaultImpersonationProvider provider;
+  private UserGroupInformation userGroupInformation = Mockito
+  .mock(UserGroupInformation.class);
+  private UserGroupInformation realUserUGI = Mockito
+  .mock(UserGroupInformation.class);
+  private Configuration conf;
+  @Rule
+  public Timeout globalTimeout = new Timeout(1);
+
+  @Before
+  public void setup() {
+conf = new Configuration()

hadoop git commit: YARN-8155. Improve ATSv2 client logging in RM and NM publisher. Contributed by Abhishek Modi.

2018-08-01 Thread rohithsharmaks
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 a7163886c -> 21e416ad2


YARN-8155. Improve ATSv2 client logging in RM and NM publisher. Contributed by 
Abhishek Modi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/21e416ad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/21e416ad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/21e416ad

Branch: refs/heads/branch-2
Commit: 21e416ad27a6e23ac77ead8f79440df841387af3
Parents: a716388
Author: Rohith Sharma K S 
Authored: Wed Aug 1 22:25:53 2018 +0530
Committer: Rohith Sharma K S 
Committed: Wed Aug 1 22:25:53 2018 +0530

--
 .../timelineservice/NMTimelinePublisher.java| 43 +---
 .../metrics/TimelineServiceV2Publisher.java |  8 +++-
 .../collector/TimelineCollectorWebService.java  | 13 --
 3 files changed, 53 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/21e416ad/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
index bba5670..08e3651 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
@@ -189,9 +189,20 @@ public class NMTimelinePublisher extends CompositeService {
   LOG.error("Seems like client has been removed before the container"
   + " metric could be published for " + 
container.getContainerId());
 }
-  } catch (IOException | YarnException e) {
+  } catch (IOException e) {
 LOG.error("Failed to publish Container metrics for container "
-+ container.getContainerId(), e);
++ container.getContainerId());
+if (LOG.isDebugEnabled()) {
+  LOG.debug("Failed to publish Container metrics for container "
+  + container.getContainerId(), e);
+}
+  } catch (YarnException e) {
+LOG.error("Failed to publish Container metrics for container "
++ container.getContainerId() + " Error: " + e.getMessage());
+if (LOG.isDebugEnabled()) {
+  LOG.debug("Failed to publish Container metrics for container "
+  + container.getContainerId(), e);
+}
   }
 }
   }
@@ -283,9 +294,20 @@ public class NMTimelinePublisher extends CompositeService {
 LOG.error("Seems like client has been removed before the event could 
be"
 + " published for " + container.getContainerId());
   }
-} catch (IOException | YarnException e) {
+} catch (IOException e) {
+  LOG.error("Failed to publish Container metrics for container "
+  + container.getContainerId());
+  if (LOG.isDebugEnabled()) {
+LOG.debug("Failed to publish Container metrics for container "
++ container.getContainerId(), e);
+  }
+} catch (YarnException e) {
   LOG.error("Failed to publish Container metrics for container "
-  + container.getContainerId(), e);
+  + container.getContainerId() + " Error: " +  e.getMessage());
+  if (LOG.isDebugEnabled()) {
+LOG.debug("Failed to publish Container metrics for container "
++ container.getContainerId(), e);
+  }
 }
   }
 
@@ -314,8 +336,17 @@ public class NMTimelinePublisher extends CompositeService {
 LOG.error("Seems like client has been removed before the entity "
 + "could be published for " + entity);
   }
-} catch (Exception e) {
-  LOG.error("Error when publishing entity " + entity, e);
+} catch (IOException e) {
+  LOG.error("Error when publishing entity " + entity);
+  if (LOG.isDebugEnabled()) {
+LOG.debug("Error when publishing entity " + entity, e);
+  }
+} catch (YarnException e) {
+  LOG.error("Error when publishing entity " + entity + " Error: " +
+  e.getMessage());
+  if (LOG.isDebugEnabled()) {
+LOG.debug("Error when publishing entity " + entity, e);
+  }
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop

hadoop git commit: YARN-8403. Change the log level for fail to download resource from INFO to ERROR. Contributed by Eric Yang

2018-08-01 Thread billie
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 a6525e074 -> 2a94823f3


YARN-8403. Change the log level for fail to download resource from INFO to 
ERROR. Contributed by Eric Yang

(cherry picked from commit 67c65da261464a0dccb63dc27668109a52e05714)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2a94823f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2a94823f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2a94823f

Branch: refs/heads/branch-3.1
Commit: 2a94823f32f65351a57a5fac5556508beb251fc5
Parents: a6525e0
Author: Billie Rinaldi 
Authored: Wed Aug 1 08:51:18 2018 -0700
Committer: Billie Rinaldi 
Committed: Wed Aug 1 08:58:15 2018 -0700

--
 .../localizer/ResourceLocalizationService.java  | 16 +++-
 .../localizer/TestResourceLocalizationService.java  |  3 +++
 2 files changed, 14 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a94823f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
index 12251a2..142387e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
@@ -972,11 +972,17 @@ public class ResourceLocalizationService extends 
CompositeService
 .getDU(new File(local.toUri();
   assoc.getResource().unlock();
 } catch (ExecutionException e) {
-  LOG.info("Failed to download resource " + assoc.getResource(),
-  e.getCause());
-  LocalResourceRequest req = assoc.getResource().getRequest();
-  publicRsrc.handle(new ResourceFailedLocalizationEvent(req,
-  e.getMessage()));
+  String user = assoc.getContext().getUser();
+  ApplicationId applicationId = 
assoc.getContext().getContainerId().getApplicationAttemptId().getApplicationId();
+  LocalResourcesTracker tracker =
+getLocalResourcesTracker(LocalResourceVisibility.APPLICATION, 
user, applicationId);
+  final String diagnostics = "Failed to download resource " +
+  assoc.getResource() + " " + e.getCause();
+  tracker.handle(new ResourceFailedLocalizationEvent(
+  assoc.getResource().getRequest(), diagnostics));
+  publicRsrc.handle(new ResourceFailedLocalizationEvent(
+  assoc.getResource().getRequest(), diagnostics));
+  LOG.error(diagnostics);
   assoc.getResource().unlock();
 } catch (CancellationException e) {
   // ignore; shutting down

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a94823f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
index 4d03f15..2b9148e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
@@ -2398,6 +2398,9 @@ public class TestResourceLocalizationService {

hadoop git commit: YARN-8403. Change the log level for fail to download resource from INFO to ERROR. Contributed by Eric Yang

2018-08-01 Thread billie
Repository: hadoop
Updated Branches:
  refs/heads/trunk d920b9db7 -> 67c65da26


YARN-8403. Change the log level for fail to download resource from INFO to 
ERROR. Contributed by Eric Yang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/67c65da2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/67c65da2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/67c65da2

Branch: refs/heads/trunk
Commit: 67c65da261464a0dccb63dc27668109a52e05714
Parents: d920b9d
Author: Billie Rinaldi 
Authored: Wed Aug 1 08:51:18 2018 -0700
Committer: Billie Rinaldi 
Committed: Wed Aug 1 08:51:40 2018 -0700

--
 .../localizer/ResourceLocalizationService.java  | 16 +++-
 .../localizer/TestResourceLocalizationService.java  |  3 +++
 2 files changed, 14 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/67c65da2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
index 4ca6720..3834ece 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
@@ -969,11 +969,17 @@ public class ResourceLocalizationService extends 
CompositeService
 .getDU(new File(local.toUri();
   assoc.getResource().unlock();
 } catch (ExecutionException e) {
-  LOG.info("Failed to download resource " + assoc.getResource(),
-  e.getCause());
-  LocalResourceRequest req = assoc.getResource().getRequest();
-  publicRsrc.handle(new ResourceFailedLocalizationEvent(req,
-  e.getMessage()));
+  String user = assoc.getContext().getUser();
+  ApplicationId applicationId = 
assoc.getContext().getContainerId().getApplicationAttemptId().getApplicationId();
+  LocalResourcesTracker tracker =
+getLocalResourcesTracker(LocalResourceVisibility.APPLICATION, 
user, applicationId);
+  final String diagnostics = "Failed to download resource " +
+  assoc.getResource() + " " + e.getCause();
+  tracker.handle(new ResourceFailedLocalizationEvent(
+  assoc.getResource().getRequest(), diagnostics));
+  publicRsrc.handle(new ResourceFailedLocalizationEvent(
+  assoc.getResource().getRequest(), diagnostics));
+  LOG.error(diagnostics);
   assoc.getResource().unlock();
 } catch (CancellationException e) {
   // ignore; shutting down

http://git-wip-us.apache.org/repos/asf/hadoop/blob/67c65da2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
index 4d03f15..2b9148e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
@@ -2398,6 +2398,9 @@ public class TestResourceLocalizationService {
   // Waiting for resource to change into FAILED state.
   Assert.assertTrue

hadoop git commit: YARN-8595. [UI2] Container diagnostic information is missing from container page. Contributed by Akhil PB.

2018-08-01 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 ff35f0c30 -> a6525e074


YARN-8595. [UI2] Container diagnostic information is missing from container 
page. Contributed by Akhil PB.

(cherry picked from commit d920b9db77be44adc4f8a2a0c2df889af82be04f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a6525e07
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a6525e07
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a6525e07

Branch: refs/heads/branch-3.1
Commit: a6525e07442658f6d3a9ac22626ebbf4b54e0756
Parents: ff35f0c
Author: Sunil G 
Authored: Wed Aug 1 14:27:54 2018 +0530
Committer: Sunil G 
Committed: Wed Aug 1 14:28:35 2018 +0530

--
 .../main/webapp/app/models/yarn-app-attempt.js  |  1 +
 .../app/models/yarn-timeline-container.js   |  1 +
 .../webapp/app/serializers/yarn-app-attempt.js  |  3 +-
 .../app/serializers/yarn-timeline-container.js  |  6 +--
 .../src/main/webapp/app/styles/app.scss |  9 
 .../templates/components/app-attempt-table.hbs  |  6 +++
 .../app/templates/components/timeline-view.hbs  | 44 ++--
 7 files changed, 51 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a6525e07/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-attempt.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-attempt.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-attempt.js
index cffe198..f483695 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-attempt.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-attempt.js
@@ -32,6 +32,7 @@ export default DS.Model.extend({
   logsLink: DS.attr('string'),
   state: DS.attr('string'),
   appAttemptId: DS.attr('string'),
+  diagnosticsInfo: DS.attr('string'),
 
   appId: Ember.computed("id",function () {
 var id = this.get("id");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a6525e07/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-timeline-container.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-timeline-container.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-timeline-container.js
index 7482a2f..9384418 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-timeline-container.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-timeline-container.js
@@ -31,6 +31,7 @@ export default DS.Model.extend({
   containerState: DS.attr('string'),
   nodeHttpAddress: DS.attr('string'),
   nodeId: DS.attr('string'),
+  diagnosticsInfo: DS.attr('string'),
 
   startTs: function() {
 return Converter.dateToTimeStamp(this.get("startedTime"));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a6525e07/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-attempt.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-attempt.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-attempt.js
index f8f598b..55f484b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-attempt.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-attempt.js
@@ -40,7 +40,8 @@ export default DS.JSONAPISerializer.extend({
   hosts: payload.host,
   state: payload.appAttemptState,
   logsLink: payload.logsLink,
-  appAttemptId: payload.appAttemptId
+  appAttemptId: payload.appAttemptId,
+  diagnosticsInfo: payload.diagnosticsInfo
 }
   };
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a6525e07/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-timeline-container.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-timeline-container.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-timeline-container.js
index 1322972..99ab6c4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-timeline-container.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializ

hadoop git commit: YARN-8595. [UI2] Container diagnostic information is missing from container page. Contributed by Akhil PB.

2018-08-01 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/trunk a48a0cc7f -> d920b9db7


YARN-8595. [UI2] Container diagnostic information is missing from container 
page. Contributed by Akhil PB.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d920b9db
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d920b9db
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d920b9db

Branch: refs/heads/trunk
Commit: d920b9db77be44adc4f8a2a0c2df889af82be04f
Parents: a48a0cc
Author: Sunil G 
Authored: Wed Aug 1 14:27:54 2018 +0530
Committer: Sunil G 
Committed: Wed Aug 1 14:27:54 2018 +0530

--
 .../main/webapp/app/models/yarn-app-attempt.js  |  1 +
 .../app/models/yarn-timeline-container.js   |  1 +
 .../webapp/app/serializers/yarn-app-attempt.js  |  3 +-
 .../app/serializers/yarn-timeline-container.js  |  6 +--
 .../src/main/webapp/app/styles/app.scss |  9 
 .../templates/components/app-attempt-table.hbs  |  6 +++
 .../app/templates/components/timeline-view.hbs  | 44 ++--
 7 files changed, 51 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d920b9db/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-attempt.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-attempt.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-attempt.js
index cffe198..f483695 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-attempt.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-attempt.js
@@ -32,6 +32,7 @@ export default DS.Model.extend({
   logsLink: DS.attr('string'),
   state: DS.attr('string'),
   appAttemptId: DS.attr('string'),
+  diagnosticsInfo: DS.attr('string'),
 
   appId: Ember.computed("id",function () {
 var id = this.get("id");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d920b9db/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-timeline-container.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-timeline-container.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-timeline-container.js
index 7482a2f..9384418 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-timeline-container.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-timeline-container.js
@@ -31,6 +31,7 @@ export default DS.Model.extend({
   containerState: DS.attr('string'),
   nodeHttpAddress: DS.attr('string'),
   nodeId: DS.attr('string'),
+  diagnosticsInfo: DS.attr('string'),
 
   startTs: function() {
 return Converter.dateToTimeStamp(this.get("startedTime"));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d920b9db/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-attempt.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-attempt.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-attempt.js
index f8f598b..55f484b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-attempt.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-attempt.js
@@ -40,7 +40,8 @@ export default DS.JSONAPISerializer.extend({
   hosts: payload.host,
   state: payload.appAttemptState,
   logsLink: payload.logsLink,
-  appAttemptId: payload.appAttemptId
+  appAttemptId: payload.appAttemptId,
+  diagnosticsInfo: payload.diagnosticsInfo
 }
   };
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d920b9db/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-timeline-container.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-timeline-container.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-timeline-container.js
index 1322972..99ab6c4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-timeline-container.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-timeline-container.js
@@ -22,11 +22,6 @@ import Converter from 'yarn-ui

hadoop git commit: YARN-8606. Opportunistic scheduling does not work post RM failover. Contributed by Bibin A Chundatt.

2018-08-01 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1.1 0f66a0d82 -> e4f530a9a


YARN-8606. Opportunistic scheduling does not work post RM failover. Contributed 
by Bibin A Chundatt.

(cherry picked from commit a48a0cc7fd8e7ac1c07b260e6078077824f27c35)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e4f530a9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e4f530a9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e4f530a9

Branch: refs/heads/branch-3.1.1
Commit: e4f530a9afeee1222c323e1ef3f6cc99eddc1a41
Parents: 0f66a0d
Author: Sunil G 
Authored: Wed Aug 1 12:17:18 2018 +0530
Committer: Sunil G 
Committed: Wed Aug 1 12:38:57 2018 +0530

--
 ...pportunisticContainerAllocatorAMService.java |  4 +-
 .../server/resourcemanager/ResourceManager.java | 37 ++--
 .../yarn/server/resourcemanager/TestRMHA.java   | 44 
 3 files changed, 72 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f530a9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java
index ce425df..9c9b0eb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -406,7 +407,8 @@ public class OpportunisticContainerAllocatorAMService
 return nodeMonitor.getThresholdCalculator();
   }
 
-  private synchronized List getLeastLoadedNodes() {
+  @VisibleForTesting
+  synchronized List getLeastLoadedNodes() {
 long currTime = System.currentTimeMillis();
 if ((currTime - lastCacheUpdateTime > cacheRefreshInterval)
 || (cachedNodes == null)) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f530a9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index 0b7e87c..f14d440 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -757,9 +757,11 @@ public class ResourceManager extends CompositeService 
implements Recoverable {
   }
 
   masterService = createApplicationMasterService();
+  createAndRegisterOpportunisticDispatcher(masterService);
   addService(masterService) ;
   rmContext.setApplicationMasterService(masterService);
 
+
   applicationACLsManager = new ApplicationACLsManager(conf);
 
   queueACLsManager = createQueueACLsManager(scheduler, conf);
@@ -807,6 +809,23 @@ public class ResourceManager extends CompositeService 
implements Recoverable {
   super.serviceInit(conf);
 }
 
+private void createAndRegisterOpportunisticDispatcher(
+ApplicationMasterService service) {
+  if (!isOpportunisticSchedulingEnabled(conf)) {
+return;
+  }
+  EventDispatcher oppContainerAllocEventDispatcher = new EventDispatcher(
+  (OpportunisticContainerAllocatorAMService) service,
+  OpportunisticContainerAllocatorAMService.class.getName());

hadoop git commit: YARN-8591. [ATSv2] NPE while checking for entity acl in non-secure cluster. Contributed by Rohith Sharma K S.

2018-08-01 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1.1 433eb1462 -> 0f66a0d82


YARN-8591. [ATSv2] NPE while checking for entity acl in non-secure cluster. 
Contributed by Rohith Sharma K S.

(cherry picked from commit 63e08ec071852640babea9e39780327a0907712a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0f66a0d8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0f66a0d8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0f66a0d8

Branch: refs/heads/branch-3.1.1
Commit: 0f66a0d825a079f0758bac6354732c67705803cd
Parents: 433eb14
Author: Sunil G 
Authored: Mon Jul 30 14:48:04 2018 +0530
Committer: Sunil G 
Committed: Wed Aug 1 12:36:56 2018 +0530

--
 .../server/timelineservice/reader/TimelineReaderWebServices.java | 3 ++-
 .../reader/TestTimelineReaderWebServicesBasicAcl.java| 4 
 2 files changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f66a0d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
index 7f96bfb..b10b705 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
@@ -3532,7 +3532,8 @@ public class TimelineReaderWebServices {
   static boolean checkAccess(TimelineReaderManager readerManager,
   UserGroupInformation ugi, String entityUser) {
 if (isDisplayEntityPerUserFilterEnabled(readerManager.getConfig())) {
-  if (!validateAuthUserWithEntityUser(readerManager, ugi, entityUser)) {
+  if (ugi != null && !validateAuthUserWithEntityUser(readerManager, ugi,
+  entityUser)) {
 String userName = ugi.getShortUserName();
 String msg = "User " + userName
 + " is not allowed to read TimelineService V2 data.";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f66a0d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesBasicAcl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesBasicAcl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesBasicAcl.java
index 4239bf0..6651457 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesBasicAcl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesBasicAcl.java
@@ -88,6 +88,10 @@ public class TestTimelineReaderWebServicesBasicAcl {
 Assert.assertFalse(TimelineReaderWebServices
 .validateAuthUserWithEntityUser(manager, null, user1));
 
+// true because ugi is null
+Assert.assertTrue(
+TimelineReaderWebServices.checkAccess(manager, null, user1));
+
 // incoming ugi is admin asking for entity owner user1
 Assert.assertTrue(
 TimelineReaderWebServices.checkAccess(manager, adminUgi, user1));


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org