hadoop git commit: HDFS-11491. Ozone: SCM: Add close container RPC. Contributed by Anu Engineer.

2017-03-14 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 72f4e3f34 -> 65487b579


HDFS-11491. Ozone: SCM: Add close container RPC. Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/65487b57
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/65487b57
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/65487b57

Branch: refs/heads/HDFS-7240
Commit: 65487b579e0a68ccf0860ed479c2de965251cbe3
Parents: 72f4e3f
Author: Xiaoyu Yao 
Authored: Tue Mar 14 21:28:23 2017 -0700
Committer: Xiaoyu Yao 
Committed: Tue Mar 14 21:28:44 2017 -0700

--
 .../main/proto/DatanodeContainerProtocol.proto  |  20 
 .../container/common/helpers/ContainerData.java |  50 
 .../common/impl/ContainerManagerImpl.java   |  53 -
 .../ozone/container/common/impl/Dispatcher.java |  80 +++--
 .../common/interfaces/ContainerManager.java |  19 +++
 .../ozone/container/ContainerTestHelper.java|  18 +++
 .../container/ozoneimpl/TestOzoneContainer.java | 119 ++-
 7 files changed, 342 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/65487b57/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/DatanodeContainerProtocol.proto
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/DatanodeContainerProtocol.proto
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/DatanodeContainerProtocol.proto
index 6566a71..dfd4bc5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/DatanodeContainerProtocol.proto
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/DatanodeContainerProtocol.proto
@@ -67,6 +67,12 @@ import "hdfs.proto";
  *  13. ListChunk - Given a Container/Key returns the list of Chunks.
  *
  *  14. CompactChunk - Re-writes a chunk based on Offsets.
+ *
+ *  15. PutSmallFile - A single RPC that combines both putKey and WriteChunk.
+ *
+ *  16. GetSmallFile - A single RPC that combines both getKey and ReadChunk.
+ *
+ *  17. CloseContainer - Closes an open container and makes it immutable.
  */
 
 enum Type {
@@ -90,6 +96,7 @@ enum Type {
   /** Combines Key and Chunk Operation into Single RPC. */
   PutSmallFile = 15;
   GetSmallFile = 16;
+  CloseContainer = 17;
 
 }
 
@@ -116,6 +123,7 @@ enum Result {
   INVALID_ARGUMENT = 19;
   PUT_SMALL_FILE_ERROR = 20;
   GET_SMALL_FILE_ERROR = 21;
+  CLOSED_CONTAINER_IO = 22;
 }
 
 message ContainerCommandRequestProto {
@@ -147,6 +155,7 @@ message ContainerCommandRequestProto {
 
   optional   PutSmallFileRequestProto putSmallFile = 16;
   optional   GetSmallFileRequestProto getSmallFile = 17;
+  optional   CloseContainerRequestProto closeContainer = 18;
 }
 
 message ContainerCommandResponseProto {
@@ -174,6 +183,7 @@ message ContainerCommandResponseProto {
 
   optional PutSmallFileResponseProto putSmallFile = 19;
   optional GetSmallFileResponseProto getSmallFile = 20;
+  optional CloseContainerResponseProto closeContainer = 21;
 
 }
 
@@ -194,6 +204,8 @@ message ContainerData {
   repeated KeyValue metadata = 2;
   optional string dbPath = 3;
   optional string containerPath = 4;
+  optional bool open = 5 [default = true];
+  optional string hash = 6;
 }
 
 message ContainerMeta {
@@ -246,6 +258,14 @@ message  ListContainerResponseProto {
   repeated ContainerData containerData = 1;
 }
 
+message CloseContainerRequestProto {
+  required Pipeline pipeline = 1;
+}
+
+message CloseContainerResponseProto {
+  optional Pipeline pipeline = 1;
+  optional string hash = 2;
+}
 
 message KeyData {
   required string containerName = 1;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65487b57/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
index c6c432b..91f7cbe 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
@@ -38,6 +38,8 @@ public class ContainerData {
   private String dbPath;  // Path to Level DB Store.
   // Path to Physical file system where container and checksum are stored.
   private String containerFilePath;
+  private boolean open;
+  private String hash;
 
   /**
* Constructs a  ContainerData Object.
@@ -71,6 +73,15 @@ public class 

hadoop git commit: HDFS-11487. Ozone: Exclude container protobuf files from findbugs check(amends previous fix). Contributed by Yuanbo Liu.

2017-03-14 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 39058dd60 -> 72f4e3f34


HDFS-11487. Ozone: Exclude container protobuf files from findbugs check(amends 
previous fix). Contributed by Yuanbo Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/72f4e3f3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/72f4e3f3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/72f4e3f3

Branch: refs/heads/HDFS-7240
Commit: 72f4e3f34788bce26908693da3c0b35ce0fbf45a
Parents: 39058dd
Author: Xiaoyu Yao 
Authored: Tue Mar 14 21:18:44 2017 -0700
Committer: Xiaoyu Yao 
Committed: Tue Mar 14 21:18:44 2017 -0700

--
 .../hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/72f4e3f3/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml 
b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
index 699ed89..30525f1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
@@ -62,6 +62,6 @@
   
 
   
-
+
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-9705. Refine the behaviour of getFileChecksum when length = 0. Contributed by Kai Zheng and SammiChen.

2017-03-14 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 4c66a8d19 -> cc1292e73


HDFS-9705. Refine the behaviour of getFileChecksum when length = 0. Contributed 
by Kai Zheng and SammiChen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cc1292e7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cc1292e7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cc1292e7

Branch: refs/heads/trunk
Commit: cc1292e73acd39c1f1023ad4841ffe30176f7daf
Parents: 4c66a8d
Author: Andrew Wang 
Authored: Tue Mar 14 16:41:10 2017 -0700
Committer: Andrew Wang 
Committed: Tue Mar 14 16:41:10 2017 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  | 10 +++--
 .../apache/hadoop/hdfs/FileChecksumHelper.java  | 42 
 .../server/datanode/BlockChecksumHelper.java|  6 +--
 .../hadoop/hdfs/TestDistributedFileSystem.java  | 22 +-
 4 files changed, 48 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc1292e7/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index ae1d821..aaf8bdd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -1731,10 +1731,14 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 checkOpen();
 Preconditions.checkArgument(length >= 0);
 
-LocatedBlocks blockLocations = getBlockLocations(src, length);
+LocatedBlocks blockLocations = null;
+FileChecksumHelper.FileChecksumComputer maker = null;
+ErasureCodingPolicy ecPolicy = null;
+if (length > 0) {
+  blockLocations = getBlockLocations(src, length);
+  ecPolicy = blockLocations.getErasureCodingPolicy();
+}
 
-FileChecksumHelper.FileChecksumComputer maker;
-ErasureCodingPolicy ecPolicy = blockLocations.getErasureCodingPolicy();
 maker = ecPolicy != null ?
 new FileChecksumHelper.StripedFileNonStripedChecksumComputer(src,
 length, blockLocations, namenode, this, ecPolicy) :

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc1292e7/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/FileChecksumHelper.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/FileChecksumHelper.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/FileChecksumHelper.java
index fe462f2..689d46d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/FileChecksumHelper.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/FileChecksumHelper.java
@@ -95,11 +95,13 @@ final class FileChecksumHelper {
   this.client = client;
 
   this.remaining = length;
-  if (src.contains(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR_SEPARATOR)) {
-this.remaining = Math.min(length, blockLocations.getFileLength());
-  }
 
-  this.locatedBlocks = blockLocations.getLocatedBlocks();
+  if (blockLocations != null) {
+if (src.contains(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR_SEPARATOR)) {
+  this.remaining = Math.min(length, blockLocations.getFileLength());
+}
+this.locatedBlocks = blockLocations.getLocatedBlocks();
+  }
 }
 
 String getSrc() {
@@ -203,9 +205,23 @@ final class FileChecksumHelper {
  * @throws IOException
  */
 void compute() throws IOException {
-  checksumBlocks();
-
-  fileChecksum = makeFinalResult();
+  /**
+   * request length is 0 or the file is empty, return one with the
+   * magic entry that matches what previous hdfs versions return.
+   */
+  if (locatedBlocks == null || locatedBlocks.isEmpty()) {
+// Explicitly specified here in case the default DataOutputBuffer
+// buffer length value is changed in future. This matters because the
+// fixed value 32 has to be used to repeat the magic value for previous
+// HDFS version.
+final int lenOfZeroBytes = 32;
+byte[] emptyBlockMd5 = new byte[lenOfZeroBytes];
+MD5Hash fileMD5 = MD5Hash.digest(emptyBlockMd5);
+fileChecksum =  new MD5MD5CRC32GzipFileChecksum(0, 0, fileMD5);
+  } else {
+checksumBlocks();
+fileChecksum = makeFinalResult();
+  }

[32/33] hadoop git commit: YARN-5946: Create YarnConfigurationStore interface and InMemoryConfigurationStore class. Contributed by Jonathan Hung

2017-03-14 Thread jhung
YARN-5946: Create YarnConfigurationStore interface and
InMemoryConfigurationStore class. Contributed by Jonathan Hung


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/874b9b81
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/874b9b81
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/874b9b81

Branch: refs/heads/YARN-5734
Commit: 874b9b8150478762e59d5c9c0bdf29f27fc6
Parents: dcbebf5
Author: Xuan 
Authored: Fri Feb 24 15:58:12 2017 -0800
Committer: Jonathan Hung 
Committed: Tue Mar 14 16:29:01 2017 -0700

--
 .../conf/InMemoryConfigurationStore.java|  86 +++
 .../capacity/conf/YarnConfigurationStore.java   | 154 +++
 .../conf/TestYarnConfigurationStore.java|  70 +
 3 files changed, 310 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/874b9b81/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
new file mode 100644
index 000..a208fb9
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/conf/InMemoryConfigurationStore.java
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf;
+
+import org.apache.hadoop.conf.Configuration;
+
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * A default implementation of {@link YarnConfigurationStore}. Doesn't offer
+ * persistent configuration storage, just stores the configuration in memory.
+ */
+public class InMemoryConfigurationStore implements YarnConfigurationStore {
+
+  private Configuration schedConf;
+  private LinkedList pendingMutations;
+  private long pendingId;
+
+  @Override
+  public void initialize(Configuration conf, Configuration schedConf) {
+this.schedConf = schedConf;
+this.pendingMutations = new LinkedList<>();
+this.pendingId = 0;
+  }
+
+  @Override
+  public synchronized long logMutation(LogMutation logMutation) {
+logMutation.setId(++pendingId);
+pendingMutations.add(logMutation);
+return pendingId;
+  }
+
+  @Override
+  public synchronized boolean confirmMutation(long id, boolean isValid) {
+LogMutation mutation = pendingMutations.poll();
+// If confirmMutation is called out of order, discard mutations until id
+// is reached.
+while (mutation != null) {
+  if (mutation.getId() == id) {
+if (isValid) {
+  Map mutations = mutation.getUpdates();
+  for (Map.Entry kv : mutations.entrySet()) {
+schedConf.set(kv.getKey(), kv.getValue());
+  }
+}
+return true;
+  }
+  mutation = pendingMutations.poll();
+}
+return false;
+  }
+
+  @Override
+  public synchronized Configuration retrieve() {
+return schedConf;
+  }
+
+  @Override
+  public synchronized List getPendingMutations() {
+return pendingMutations;
+  }
+
+  @Override
+  public List getConfirmedConfHistory(long fromId) {
+// Unimplemented.
+return null;
+  }
+}


[28/33] hadoop git commit: Revert "HADOOP-14170. FileSystemContractBaseTest is not cleaning up test directory clearly. Contributed by Mingliang Liu"

2017-03-14 Thread jhung
Revert "HADOOP-14170. FileSystemContractBaseTest is not cleaning up test 
directory clearly. Contributed by Mingliang Liu"

This reverts commit b8c69557b7a23ff9c4c0b2c9d595338a08b873f1.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e6cda581
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e6cda581
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e6cda581

Branch: refs/heads/YARN-5734
Commit: e6cda5819b1f4bbdcb12487260b1e3b787e11879
Parents: 871dc42
Author: Mingliang Liu 
Authored: Tue Mar 14 12:54:46 2017 -0700
Committer: Mingliang Liu 
Committed: Tue Mar 14 13:03:42 2017 -0700

--
 .../hadoop/fs/FileSystemContractBaseTest.java   | 247 +++
 .../fs/TestRawLocalFileSystemContract.java  |  24 +-
 .../fs/s3a/ITestS3AFileSystemContract.java  |  39 ++-
 3 files changed, 132 insertions(+), 178 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6cda581/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
index 78ba1f9..6247959 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
@@ -24,9 +24,8 @@ import java.util.ArrayList;
 
 import junit.framework.TestCase;
 
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.security.AccessControlException;
@@ -46,8 +45,8 @@ import org.apache.hadoop.util.StringUtils;
  * 
  */
 public abstract class FileSystemContractBaseTest extends TestCase {
-  private static final Logger LOG =
-  LoggerFactory.getLogger(FileSystemContractBaseTest.class);
+  private static final Log LOG =
+LogFactory.getLog(FileSystemContractBaseTest.class);
 
   protected final static String TEST_UMASK = "062";
   protected FileSystem fs;
@@ -55,46 +54,15 @@ public abstract class FileSystemContractBaseTest extends 
TestCase {
 
   @Override
   protected void tearDown() throws Exception {
-if (fs != null) {
-  // some cases use this absolute path
-  if (rootDirTestEnabled()) {
-cleanupDir(path("/FileSystemContractBaseTest"));
-  }
-  // others use this relative path against test base directory
-  cleanupDir(getTestBaseDir());
-}
-super.tearDown();
-  }
-
-  private void cleanupDir(Path p) {
 try {
-  LOG.info("Deleting " + p);
-  fs.delete(p, true);
+  if (fs != null) {
+fs.delete(path("/test"), true);
+  }
 } catch (IOException e) {
-  LOG.error("Error deleting test dir: " + p, e);
+  LOG.error("Error deleting /test: " + e, e);
 }
   }
-
-  /**
-   * Test base directory for resolving relative test paths.
-   *
-   * The default value is /user/$USER/FileSystemContractBaseTest. Subclass may
-   * set specific test base directory.
-   */
-  protected Path getTestBaseDir() {
-return new Path(fs.getWorkingDirectory(), "FileSystemContractBaseTest");
-  }
-
-  /**
-   * For absolute path return the fully qualified path while for relative path
-   * return the fully qualified path against {@link #getTestBaseDir()}.
-   */
-  protected final Path path(String pathString) {
-Path p = new Path(pathString).makeQualified(fs.getUri(), getTestBaseDir());
-LOG.info("Resolving {} -> {}", pathString, p);
-return p;
-  }
-
+  
   protected int getBlockSize() {
 return 1024;
   }
@@ -113,17 +81,6 @@ public abstract class FileSystemContractBaseTest extends 
TestCase {
   }
 
   /**
-   * Override this if the filesystem does not enable testing root directories.
-   *
-   * If this returns true, the test will create and delete test directories and
-   * files under root directory, which may have side effects, e.g. fail tests
-   * with PermissionDenied exceptions.
-   */
-  protected boolean rootDirTestEnabled() {
-return true;
-  }
-
-  /**
* Override this if the filesystem is not case sensitive
* @return true if the case detection/preservation tests should run
*/
@@ -145,24 +102,24 @@ public abstract class FileSystemContractBaseTest extends 
TestCase {
 Path workDir = path(getDefaultWorkingDirectory());
 assertEquals(workDir, fs.getWorkingDirectory());
 
-

[30/33] hadoop git commit: YARN-6331. Fix flakiness in TestFairScheduler#testDumpState. (Yufei Gu via rchiang)

2017-03-14 Thread jhung
YARN-6331. Fix flakiness in TestFairScheduler#testDumpState. (Yufei Gu via 
rchiang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4c66a8d1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4c66a8d1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4c66a8d1

Branch: refs/heads/YARN-5734
Commit: 4c66a8d19b7d503095ad27aeed39d62238b9cb47
Parents: fa67a96
Author: Ray Chiang 
Authored: Tue Mar 14 14:37:18 2017 -0700
Committer: Ray Chiang 
Committed: Tue Mar 14 15:09:47 2017 -0700

--
 .../server/resourcemanager/scheduler/fair/TestFairScheduler.java  | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c66a8d1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
index 537d3d0..baf7434 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
@@ -5201,6 +5201,7 @@ public class TestFairScheduler extends 
FairSchedulerTestBase {
 FSParentQueue parent =
 scheduler.getQueueManager().getParentQueue("parent", false);
 parent.setMaxShare(resource);
+parent.updateDemand();
 
 String parentQueueString = "{Name: root.parent,"
 + " Weight: ,"
@@ -5210,7 +5211,7 @@ public class TestFairScheduler extends 
FairSchedulerTestBase {
 + " MaxShare: ,"
 + " MinShare: ,"
 + " ResourceUsage: ,"
-+ " Demand: ,"
++ " Demand: ,"
 + " MaxAMShare: 0.5,"
 + " Runnable: 0}";
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[21/33] hadoop git commit: HDFS-11482. Add storage type demand to into DFSNetworkTopology#chooseRandom. Contributed by Chen Liang.

2017-03-14 Thread jhung
HDFS-11482. Add storage type demand to into DFSNetworkTopology#chooseRandom. 
Contributed by Chen Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9832ae0e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9832ae0e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9832ae0e

Branch: refs/heads/YARN-5734
Commit: 9832ae0ed8853d29072c9ea7031cd2373e6b16f9
Parents: 55796a0
Author: Chen Liang 
Authored: Mon Mar 13 17:30:10 2017 -0700
Committer: Arpit Agarwal 
Committed: Mon Mar 13 17:30:10 2017 -0700

--
 .../org/apache/hadoop/net/InnerNodeImpl.java|   8 +-
 .../net/NetworkTopologyWithNodeGroup.java   |   2 +-
 .../hadoop/hdfs/net/DFSNetworkTopology.java | 289 
 .../hadoop/hdfs/net/DFSTopologyNodeImpl.java| 275 
 .../blockmanagement/DatanodeDescriptor.java |   9 +
 .../apache/hadoop/hdfs/DFSNetworkTopology.java  |  36 --
 .../apache/hadoop/hdfs/DFSTopologyNodeImpl.java | 253 ---
 .../hadoop/hdfs/TestDFSNetworkTopology.java | 260 ---
 .../hadoop/hdfs/net/TestDFSNetworkTopology.java | 449 +++
 9 files changed, 1027 insertions(+), 554 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9832ae0e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/InnerNodeImpl.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/InnerNodeImpl.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/InnerNodeImpl.java
index 81eaf7f..5a2931b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/InnerNodeImpl.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/InnerNodeImpl.java
@@ -63,7 +63,7 @@ public class InnerNodeImpl extends NodeBase implements 
InnerNode {
   /** Judge if this node represents a rack
* @return true if it has no child or its children are not InnerNodes
*/
-  boolean isRack() {
+  public boolean isRack() {
 if (children.isEmpty()) {
   return true;
 }
@@ -81,7 +81,7 @@ public class InnerNodeImpl extends NodeBase implements 
InnerNode {
* @param n a node
* @return true if this node is an ancestor of n
*/
-  protected boolean isAncestor(Node n) {
+  public boolean isAncestor(Node n) {
 return getPath(this).equals(NodeBase.PATH_SEPARATOR_STR) ||
   (n.getNetworkLocation()+NodeBase.PATH_SEPARATOR_STR).
   startsWith(getPath(this)+NodeBase.PATH_SEPARATOR_STR);
@@ -92,12 +92,12 @@ public class InnerNodeImpl extends NodeBase implements 
InnerNode {
* @param n a node
* @return true if this node is the parent of n
*/
-  protected boolean isParent(Node n) {
+  public boolean isParent(Node n) {
 return n.getNetworkLocation().equals(getPath(this));
   }
 
   /* Return a child name of this node who is an ancestor of node n */
-  protected String getNextAncestorName(Node n) {
+  public String getNextAncestorName(Node n) {
 if (!isAncestor(n)) {
   throw new IllegalArgumentException(
  this + "is not an ancestor of " + n);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9832ae0e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopologyWithNodeGroup.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopologyWithNodeGroup.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopologyWithNodeGroup.java
index a20d5fc..bec0fe1 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopologyWithNodeGroup.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopologyWithNodeGroup.java
@@ -308,7 +308,7 @@ public class NetworkTopologyWithNodeGroup extends 
NetworkTopology {
 }
 
 @Override
-boolean isRack() {
+public boolean isRack() {
   // it is node group
   if (getChildren().isEmpty()) {
 return false;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9832ae0e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java
new file mode 100644
index 000..ee83dba
--- /dev/null
+++ 

[24/33] hadoop git commit: YARN-6314. Potential infinite redirection on YARN log redirection web service. Contributed by Xuan Gong.

2017-03-14 Thread jhung
YARN-6314. Potential infinite redirection on YARN log redirection web service. 
Contributed by Xuan Gong.

(cherry picked from commit 5a9dda796f0e73060ada794ad5752cc6a237ab2e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/34424e98
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/34424e98
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/34424e98

Branch: refs/heads/YARN-5734
Commit: 34424e98a618a9fefce800746168be2b72e17de9
Parents: 023b941
Author: Junping Du 
Authored: Tue Mar 14 02:56:18 2017 -0700
Committer: Junping Du 
Committed: Tue Mar 14 02:58:07 2017 -0700

--
 .../webapp/AHSWebServices.java  | 32 +++-
 .../webapp/TestAHSWebServices.java  | 17 +++
 .../server/webapp/YarnWebServiceParams.java |  1 +
 .../nodemanager/webapp/NMWebServices.java   |  6 +++-
 .../nodemanager/webapp/TestNMWebServices.java   |  6 
 5 files changed, 54 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/34424e98/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
index c296aaa..6195199 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
@@ -28,6 +28,7 @@ import java.util.Set;
 
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
+import javax.ws.rs.DefaultValue;
 import javax.ws.rs.GET;
 import javax.ws.rs.Path;
 import javax.ws.rs.PathParam;
@@ -235,6 +236,8 @@ public class AHSWebServices extends WebServices {
*The container ID
* @param nmId
*The Node Manager NodeId
+   * @param redirected_from_node
+   *Whether this is a redirected request from NM
* @return
*The log file's name and current file size
*/
@@ -245,7 +248,9 @@ public class AHSWebServices extends WebServices {
   @Context HttpServletRequest req,
   @Context HttpServletResponse res,
   @PathParam(YarnWebServiceParams.CONTAINER_ID) String containerIdStr,
-  @QueryParam(YarnWebServiceParams.NM_ID) String nmId) {
+  @QueryParam(YarnWebServiceParams.NM_ID) String nmId,
+  @QueryParam(YarnWebServiceParams.REDIRECTED_FROM_NODE)
+  @DefaultValue("false") boolean redirected_from_node) {
 ContainerId containerId = null;
 init(res);
 try {
@@ -253,6 +258,7 @@ public class AHSWebServices extends WebServices {
 } catch (IllegalArgumentException e) {
   throw new BadRequestException("invalid container id, " + containerIdStr);
 }
+
 ApplicationId appId = containerId.getApplicationAttemptId()
 .getApplicationId();
 AppInfo appInfo;
@@ -297,9 +303,12 @@ public class AHSWebServices extends WebServices {
 // make sure nodeHttpAddress is not null and not empty. Otherwise,
 // we would only get log meta for aggregated logs instead of
 // re-directing the request
-if (nodeHttpAddress == null || nodeHttpAddress.isEmpty()) {
+if (nodeHttpAddress == null || nodeHttpAddress.isEmpty()
+|| redirected_from_node) {
   // return log meta for the aggregated logs if exists.
   // It will also return empty log meta for the local logs.
+  // If this is the redirect request from NM, we should not
+  // re-direct the request back. Simply output the aggregated log meta.
   return getContainerLogMeta(appId, appOwner, null,
   containerIdStr, true);
 }
@@ -338,6 +347,8 @@ public class AHSWebServices extends WebServices {
*the size of the log file
* @param nmId
*The Node Manager NodeId
+   * @param redirected_from_node
+   *Whether this is the redirect request from NM
* @return
*The contents of the container's log file
*/
@@ -352,9 +363,11 @@ public class AHSWebServices extends 

[14/33] hadoop git commit: YARN-6069. CORS support in timeline v2 (Rohith Sharma K S via Varun Saxena)

2017-03-14 Thread jhung
YARN-6069. CORS support in timeline v2 (Rohith Sharma K S via Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/229c7c9f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/229c7c9f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/229c7c9f

Branch: refs/heads/YARN-5734
Commit: 229c7c9f8922f2b1bfd04b09b834e00d359046ff
Parents: 6d356b6
Author: Varun Saxena 
Authored: Sat Mar 11 02:11:49 2017 +0530
Committer: Varun Saxena 
Committed: Sat Mar 11 04:09:01 2017 +0530

--
 .../src/main/resources/yarn-default.xml  | 11 +++
 .../timelineservice/reader/TimelineReaderServer.java |  9 +
 .../src/site/markdown/TimelineServiceV2.md   |  9 +
 3 files changed, 29 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/229c7c9f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 645a342..727e2c9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -3068,4 +3068,15 @@
 64
   
 
+  
+
+  Flag to enable cross-origin (CORS) support for timeline service v1.x or
+  Timeline Reader in timeline service v2. For timeline service v2, also add
+  org.apache.hadoop.security.HttpCrossOriginFilterInitializer to the
+  configuration hadoop.http.filter.initializers in core-site.xml.
+
+yarn.timeline-service.http-cross-origin.enabled
+false
+  
+
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/229c7c9f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
index 2835c1b..2faf4b6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
@@ -32,6 +32,7 @@ import 
org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.http.lib.StaticUserWebFilter;
+import org.apache.hadoop.security.HttpCrossOriginFilterInitializer;
 import org.apache.hadoop.service.CompositeService;
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.ReflectionUtils;
@@ -134,6 +135,14 @@ public class TimelineReaderServer extends CompositeService 
{
 YarnConfiguration.TIMELINE_SERVICE_BIND_HOST,
 WebAppUtils.getTimelineReaderWebAppURL(conf));
 LOG.info("Instantiating TimelineReaderWebApp at " + bindAddress);
+boolean enableCorsFilter = conf.getBoolean(
+YarnConfiguration.TIMELINE_SERVICE_HTTP_CROSS_ORIGIN_ENABLED,
+YarnConfiguration.TIMELINE_SERVICE_HTTP_CROSS_ORIGIN_ENABLED_DEFAULT);
+// setup CORS
+if (enableCorsFilter) {
+  conf.setBoolean(HttpCrossOriginFilterInitializer.PREFIX
+  + HttpCrossOriginFilterInitializer.ENABLED_SUFFIX, true);
+}
 try {
   HttpServer2.Builder builder = new HttpServer2.Builder()
 .setName("timeline")

http://git-wip-us.apache.org/repos/asf/hadoop/blob/229c7c9f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
index dc16803..bcbe0b7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
+++ 

[09/33] hadoop git commit: YARN-6310. OutputStreams in AggregatedLogFormat.LogWriter can be left open upon exceptions. Contributed by Haibo Chen

2017-03-14 Thread jhung
YARN-6310. OutputStreams in AggregatedLogFormat.LogWriter can be left open upon 
exceptions. Contributed by Haibo Chen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/deb9f569
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/deb9f569
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/deb9f569

Branch: refs/heads/YARN-5734
Commit: deb9f569465bb760e661e60a313dad1605635236
Parents: e06ff18
Author: Jason Lowe 
Authored: Fri Mar 10 11:07:19 2017 -0600
Committer: Jason Lowe 
Committed: Fri Mar 10 11:08:33 2017 -0600

--
 .../logaggregation/AggregatedLogFormat.java | 52 ++--
 1 file changed, 26 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/deb9f569/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
index 02f7782..1b46007 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
@@ -486,34 +486,34 @@ public class AggregatedLogFormat {
 }
 
 private void writeVersion() throws IOException {
-  DataOutputStream out = this.writer.prepareAppendKey(-1);
-  VERSION_KEY.write(out);
-  out.close();
-  out = this.writer.prepareAppendValue(-1);
-  out.writeInt(VERSION);
-  out.close();
+  try (DataOutputStream out = this.writer.prepareAppendKey(-1)) {
+VERSION_KEY.write(out);
+  }
+  try (DataOutputStream out = this.writer.prepareAppendValue(-1)) {
+out.writeInt(VERSION);
+  }
 }
 
 public void writeApplicationOwner(String user) throws IOException {
-  DataOutputStream out = this.writer.prepareAppendKey(-1);
-  APPLICATION_OWNER_KEY.write(out);
-  out.close();
-  out = this.writer.prepareAppendValue(-1);
-  out.writeUTF(user);
-  out.close();
+  try (DataOutputStream out = this.writer.prepareAppendKey(-1)) {
+APPLICATION_OWNER_KEY.write(out);
+  }
+  try (DataOutputStream out = this.writer.prepareAppendValue(-1)) {
+out.writeUTF(user);
+  }
 }
 
 public void writeApplicationACLs(Map 
appAcls)
 throws IOException {
-  DataOutputStream out = this.writer.prepareAppendKey(-1);
-  APPLICATION_ACL_KEY.write(out);
-  out.close();
-  out = this.writer.prepareAppendValue(-1);
-  for (Entry entry : appAcls.entrySet()) {
-out.writeUTF(entry.getKey().toString());
-out.writeUTF(entry.getValue());
+  try (DataOutputStream out = this.writer.prepareAppendKey(-1)) {
+APPLICATION_ACL_KEY.write(out);
+  }
+  try (DataOutputStream out = this.writer.prepareAppendValue(-1)) {
+for (Entry entry : appAcls.entrySet()) {
+  out.writeUTF(entry.getKey().toString());
+  out.writeUTF(entry.getValue());
+}
   }
-  out.close();
 }
 
 public void append(LogKey logKey, LogValue logValue) throws IOException {
@@ -522,12 +522,12 @@ public class AggregatedLogFormat {
   if (pendingUploadFiles.size() == 0) {
 return;
   }
-  DataOutputStream out = this.writer.prepareAppendKey(-1);
-  logKey.write(out);
-  out.close();
-  out = this.writer.prepareAppendValue(-1);
-  logValue.write(out, pendingUploadFiles);
-  out.close();
+  try (DataOutputStream out = this.writer.prepareAppendKey(-1)) {
+logKey.write(out);
+  }
+  try (DataOutputStream out = this.writer.prepareAppendValue(-1)) {
+logValue.write(out, pendingUploadFiles);
+  }
 }
 
 public void close() {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[22/33] hadoop git commit: YARN-5496. Make Node Heatmap Chart categories clickable in new YARN UI. Contributed by Gergely Novák.

2017-03-14 Thread jhung
YARN-5496. Make Node Heatmap Chart categories clickable in new YARN UI. 
Contributed by Gergely Novák.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e5f2eedc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e5f2eedc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e5f2eedc

Branch: refs/heads/YARN-5734
Commit: e5f2eedcbfcbfe8fa6fdb6a57b1250f80b12c32f
Parents: 9832ae0
Author: Sunil G 
Authored: Tue Mar 14 11:47:11 2017 +0530
Committer: Sunil G 
Committed: Tue Mar 14 11:47:11 2017 +0530

--
 .../app/components/base-chart-component.js  |   4 +
 .../main/webapp/app/components/nodes-heatmap.js | 106 ++-
 .../src/main/webapp/app/styles/app.css  |  12 +++
 3 files changed, 93 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5f2eedc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/base-chart-component.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/base-chart-component.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/base-chart-component.js
index d11a532..aa41893 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/base-chart-component.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/base-chart-component.js
@@ -141,4 +141,8 @@ export default Ember.Component.extend({
 };
 return layout;
   },
+
+  willDestroy: function() {
+this.tooltip.remove();
+  }
 });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5f2eedc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/nodes-heatmap.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/nodes-heatmap.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/nodes-heatmap.js
index 5652834..ef6e46e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/nodes-heatmap.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/nodes-heatmap.js
@@ -26,17 +26,18 @@ export default BaseChartComponent.extend({
   CELL_MARGIN: 2,
   RACK_MARGIN: 20,
   filter: "",
+  selectedCategory: 0,
 
-  bindTP: function(element) {
+  bindTP: function(element, cell) {
 element.on("mouseover", function() {
   this.tooltip
 .style("left", (d3.event.pageX) + "px")
 .style("top", (d3.event.pageY - 28) + "px");
-  element.style("opacity", 1.0);
+  cell.style("opacity", 1.0);
 }.bind(this))
   .on("mousemove", function() {
 // Handle pie chart case
-var text = element.attr("tooltiptext");
+var text = cell.attr("tooltiptext");
 
 this.tooltip.style("opacity", 0.9);
 this.tooltip.html(text)
@@ -45,10 +46,45 @@ export default BaseChartComponent.extend({
   }.bind(this))
   .on("mouseout", function() {
 this.tooltip.style("opacity", 0);
-element.style("opacity", 0.8);
+cell.style("opacity", 0.8);
   }.bind(this));
   },
 
+  bindSelectCategory: function(element, i) {
+element.on("click", function() {
+  if (this.selectedCategory == i) {
+// Remove selection for second click
+this.selectedCategory = 0;
+  } else {
+this.selectedCategory = i;
+  }
+  this.didInsertElement();
+}.bind(this));
+  },
+
+  isNodeSelected: function(node) {
+if (this.filter) {
+  var rack = node.get("rack");
+  var host = node.get("nodeHostName");
+  if (!rack.includes(this.filter) && !host.includes(this.filter)) {
+return false;
+  }
+}
+
+if (this.selectedCategory === 0) {
+  return true;
+}
+
+var usage = node.get("usedMemoryMB") /
+  (node.get("usedMemoryMB") + node.get("availMemoryMB"))
+var lowerLimit = (this.selectedCategory - 1) * 0.2;
+var upperLimit = this.selectedCategory * 0.2;
+if (lowerLimit <= usage && usage <= upperLimit) {
+  return true;
+}
+return false;
+  },
+
   // data:
   //[{label=label1, value=value1}, ...]
   //...
@@ -84,20 +120,32 @@ export default BaseChartComponent.extend({
 for (i = 1; i <= 5; i++) {
   var ratio = i * 0.2 - 0.1;
 
-  g.append("rect")
+  var rect = g.append("rect")
 .attr("x", sampleXOffset)
 .attr("y", sampleYOffset)
-.attr("fill", colorFunc(ratio))
+.attr("fill", this.selectedCategory === i ? "#2ca02c" : 
colorFunc(ratio))
 .attr("width", 

[15/33] hadoop git commit: YARN-6042. Dump scheduler and queue state information into FairScheduler DEBUG log. (Yufei Gu via rchiang)

2017-03-14 Thread jhung
YARN-6042. Dump scheduler and queue state information into FairScheduler DEBUG 
log. (Yufei Gu via rchiang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4db9cc70
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4db9cc70
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4db9cc70

Branch: refs/heads/YARN-5734
Commit: 4db9cc70d0178703fb28f451eb84d97f2bf63af8
Parents: 229c7c9
Author: Ray Chiang 
Authored: Fri Mar 10 16:13:31 2017 -0800
Committer: Ray Chiang 
Committed: Fri Mar 10 16:13:31 2017 -0800

--
 .../src/main/conf/log4j.properties  |  9 +++
 .../scheduler/fair/FSAppAttempt.java| 49 +++---
 .../scheduler/fair/FSLeafQueue.java | 21 ++
 .../scheduler/fair/FSParentQueue.java   | 21 ++
 .../resourcemanager/scheduler/fair/FSQueue.java | 41 ++-
 .../scheduler/fair/FairScheduler.java   | 28 +---
 .../scheduler/fair/TestFairScheduler.java   | 71 
 7 files changed, 206 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4db9cc70/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
--
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties 
b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
index b8c84e7..6026763 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
+++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
@@ -321,3 +321,12 @@ 
log4j.appender.EWMA=org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender
 log4j.appender.EWMA.cleanupInterval=${yarn.ewma.cleanupInterval}
 log4j.appender.EWMA.messageAgeLimitSeconds=${yarn.ewma.messageAgeLimitSeconds}
 log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages}
+
+# Fair scheduler requests log on state dump
+log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler.statedump=DEBUG,FSLOGGER
+log4j.appender.FSLOGGER=org.apache.log4j.RollingFileAppender
+log4j.appender.FSLOGGER.File=${hadoop.log.dir}/fairscheduler-statedump.log
+log4j.appender.FSLOGGER.layout=org.apache.log4j.PatternLayout
+log4j.appender.FSLOGGER.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.FSLOGGER.MaxFileSize=${hadoop.log.maxfilesize}
+log4j.appender.FSLOGGER.MaxBackupIndex=${hadoop.log.maxbackupindex}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4db9cc70/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index 3a9c94e..ccfcffb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -835,25 +835,27 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
   return capability;
 }
 
+if (LOG.isDebugEnabled()) {
+  LOG.debug("Resource request: " + capability + " exceeds the available"
+  + " resources of the node.");
+}
+
 // The desired container won't fit here, so reserve
 if (isReservable(capability) &&
 reserve(pendingAsk.getPerAllocationResource(), node, reservedContainer,
 type, schedulerKey)) {
-  if (isWaitingForAMContainer()) {
-updateAMDiagnosticMsg(capability,
-" exceed the available resources of the node and the request is"
-+ " reserved");
+  updateAMDiagnosticMsg(capability, " exceeds the available resources of "
+  + "the node and the request is reserved)");
+  if (LOG.isDebugEnabled()) {
+LOG.debug(getName() + "'s resource request is reserved.");
   }
   return FairScheduler.CONTAINER_RESERVED;
 } else {
-  if (isWaitingForAMContainer()) {
-updateAMDiagnosticMsg(capability,
-" exceed the available resources of the node and the request 
cannot"
-+ " be 

[03/33] hadoop git commit: YARN-1047. Expose # of pre-emptions as a queue counter (Contributed by Karthik Kambatla via Daniel Templeton)

2017-03-14 Thread jhung
YARN-1047. Expose # of pre-emptions as a queue counter (Contributed by Karthik 
Kambatla via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/846a0cd6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/846a0cd6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/846a0cd6

Branch: refs/heads/YARN-5734
Commit: 846a0cd678fba743220f28cef844ac9011a3f934
Parents: 819808a
Author: Daniel Templeton 
Authored: Thu Mar 9 17:51:47 2017 -0800
Committer: Daniel Templeton 
Committed: Thu Mar 9 17:51:47 2017 -0800

--
 .../server/resourcemanager/scheduler/QueueMetrics.java | 13 +
 .../resourcemanager/scheduler/fair/FSAppAttempt.java   |  5 +
 .../scheduler/fair/TestFairSchedulerPreemption.java| 12 +---
 3 files changed, 27 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/846a0cd6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
index 4e364f7..007d2b3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
@@ -71,6 +71,8 @@ public class QueueMetrics implements MetricsSource {
   @Metric("Aggregate # of allocated off-switch containers")
 MutableCounterLong aggregateOffSwitchContainersAllocated;
   @Metric("Aggregate # of released containers") MutableCounterLong 
aggregateContainersReleased;
+  @Metric("Aggregate # of preempted containers") MutableCounterLong
+  aggregateContainersPreempted;
   @Metric("Available memory in MB") MutableGaugeLong availableMB;
   @Metric("Available CPU in virtual cores") MutableGaugeInt availableVCores;
   @Metric("Pending memory allocation in MB") MutableGaugeLong pendingMB;
@@ -476,6 +478,13 @@ public class QueueMetrics implements MetricsSource {
 }
   }
 
+  public void preemptContainer() {
+aggregateContainersPreempted.incr();
+if (parent != null) {
+  parent.preemptContainer();
+}
+  }
+
   public void reserveResource(String user, Resource res) {
 reservedContainers.incr();
 reservedMB.incr(res.getMemorySize());
@@ -640,4 +649,8 @@ public class QueueMetrics implements MetricsSource {
   public long getAggegatedReleasedContainers() {
 return aggregateContainersReleased.value();
   }
+
+  public long getAggregatePreemptedContainers() {
+return aggregateContainersPreempted.value();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/846a0cd6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index 6c61b45..3a9c94e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -45,6 +45,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.PendingAsk;

[12/33] hadoop git commit: YARN-6321. TestResources test timeouts are too aggressive. Contributed by Eric Badger

2017-03-14 Thread jhung
YARN-6321. TestResources test timeouts are too aggressive. Contributed by Eric 
Badger


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9649c278
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9649c278
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9649c278

Branch: refs/heads/YARN-5734
Commit: 9649c27864a23ea156bae904368c1d3cf94c6e9d
Parents: 092ec39
Author: Jason Lowe 
Authored: Fri Mar 10 13:05:55 2017 -0600
Committer: Jason Lowe 
Committed: Fri Mar 10 13:06:54 2017 -0600

--
 .../org/apache/hadoop/yarn/util/resource/TestResources.java| 6 +++---
 .../yarn/server/resourcemanager/resource/TestResources.java| 4 ++--
 2 files changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9649c278/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResources.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResources.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResources.java
index f8570a8..d79179a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResources.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResources.java
@@ -30,7 +30,7 @@ public class TestResources {
 return Resource.newInstance(memory, vCores);
   }
 
-  @Test(timeout=1000)
+  @Test(timeout=1)
   public void testCompareToWithUnboundedResource() {
 assertTrue(Resources.unbounded().compareTo(
 createResource(Long.MAX_VALUE, Integer.MAX_VALUE)) == 0);
@@ -40,7 +40,7 @@ public class TestResources {
 createResource(0, Integer.MAX_VALUE)) > 0);
   }
 
-  @Test(timeout=1000)
+  @Test(timeout=1)
   public void testCompareToWithNoneResource() {
 assertTrue(Resources.none().compareTo(createResource(0, 0)) == 0);
 assertTrue(Resources.none().compareTo(
@@ -49,7 +49,7 @@ public class TestResources {
 createResource(0, 1)) < 0);
   }
 
-  @Test
+  @Test(timeout=1)
   public void testMultipleRoundUp() {
 final double by = 0.5;
 final String memoryErrorMsg = "Invalid memory size.";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9649c278/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resource/TestResources.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resource/TestResources.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resource/TestResources.java
index ae98660..2a10747 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resource/TestResources.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resource/TestResources.java
@@ -22,7 +22,7 @@ import static org.junit.Assert.*;
 import org.junit.Test;
 
 public class TestResources {
-  @Test(timeout=1000)
+  @Test(timeout=1)
   public void testFitsIn() {
 assertTrue(fitsIn(createResource(1, 1), createResource(2, 2)));
 assertTrue(fitsIn(createResource(2, 2), createResource(2, 2)));
@@ -31,7 +31,7 @@ public class TestResources {
 assertFalse(fitsIn(createResource(2, 1), createResource(1, 2)));
   }
   
-  @Test(timeout=1000)
+  @Test(timeout=1)
   public void testComponentwiseMin() {
 assertEquals(createResource(1, 1),
 componentwiseMin(createResource(1, 1), createResource(2, 2)));


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[01/33] hadoop git commit: HDFS-11506. Move ErasureCodingPolicyManager#getSystemDefaultPolicy to test code. Contributed by Manoj Govindassamy. [Forced Update!]

2017-03-14 Thread jhung
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5734 1d219f00e -> 25d2028be (forced update)


http://git-wip-us.apache.org/repos/asf/hadoop/blob/819808a0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java
index e7794d6..0bfa054 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java
@@ -31,12 +31,12 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.StripedFileTestUtil;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
-import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
@@ -46,7 +46,7 @@ import org.junit.Test;
 
 public class TestOfflineImageViewerWithStripedBlocks {
   private final ErasureCodingPolicy ecPolicy =
-  ErasureCodingPolicyManager.getSystemDefaultPolicy();
+  StripedFileTestUtil.getDefaultECPolicy();
   private int dataBlocks = ecPolicy.getNumDataUnits();
   private int parityBlocks = ecPolicy.getNumParityUnits();
 
@@ -64,7 +64,7 @@ public class TestOfflineImageViewerWithStripedBlocks {
 cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
 cluster.waitActive();
 cluster.getFileSystem().getClient().setErasureCodingPolicy("/",
-ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
+StripedFileTestUtil.getDefaultECPolicy().getName());
 fs = cluster.getFileSystem();
 Path eczone = new Path("/eczone");
 fs.mkdirs(eczone);
@@ -144,7 +144,7 @@ public class TestOfflineImageViewerWithStripedBlocks {
 // Verify space consumed present in BlockInfoStriped
 FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
 INodeFile fileNode = fsdir.getINode4Write(file.toString()).asFile();
-assertEquals(ErasureCodingPolicyManager.getSystemDefaultPolicy().getId(),
+assertEquals(StripedFileTestUtil.getDefaultECPolicy().getId(),
 fileNode.getErasureCodingPolicyID());
 assertTrue("Invalid block size", fileNode.getBlocks().length > 0);
 long actualFileSize = 0;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[26/33] hadoop git commit: YARN-6327. Removing queues from CapacitySchedulerQueueManager and ParentQueue should be done with iterator. Contributed by Jonathan Hung.

2017-03-14 Thread jhung
YARN-6327. Removing queues from CapacitySchedulerQueueManager and ParentQueue 
should be done with iterator. Contributed by Jonathan Hung.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0a3aa40f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0a3aa40f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0a3aa40f

Branch: refs/heads/YARN-5734
Commit: 0a3aa40fe7878c939dbf4e6b43466595159ff930
Parents: 7515e75
Author: Naganarasimha 
Authored: Wed Mar 15 01:22:25 2017 +0530
Committer: Naganarasimha 
Committed: Wed Mar 15 01:22:25 2017 +0530

--
 .../scheduler/capacity/CapacitySchedulerQueueManager.java | 7 +--
 .../resourcemanager/scheduler/capacity/ParentQueue.java   | 6 --
 2 files changed, 9 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a3aa40f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
index 8cae6c3..76cb5d6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
@@ -22,6 +22,7 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Comparator;
 import java.util.HashMap;
+import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -312,10 +313,12 @@ public class CapacitySchedulerQueueManager implements 
SchedulerQueueManager<
 existingQueues.put(queueName, queue);
   }
 }
-for (Map.Entry e : existingQueues.entrySet()) {
+for (Iterator> itr = existingQueues.entrySet()
+.iterator(); itr.hasNext();) {
+  Map.Entry e = itr.next();
   String queueName = e.getKey();
   if (!newQueues.containsKey(queueName)) {
-existingQueues.remove(queueName);
+itr.remove();
   }
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a3aa40f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
index 6f82fcc..f84b7a4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
@@ -333,10 +333,12 @@ public class ParentQueue extends AbstractCSQueue {
   }
 
   // remove the deleted queue in the refreshed xml.
-  for (Map.Entry e : currentChildQueues.entrySet()) {
+  for (Iterator> itr = currentChildQueues
+  .entrySet().iterator(); itr.hasNext();) {
+Map.Entry e = itr.next();
 String queueName = e.getKey();
 if (!newChildQueues.containsKey(queueName)) {
-  currentChildQueues.remove(queueName);
+  itr.remove();
 }
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[27/33] hadoop git commit: YARN-6313. YARN logs cli should provide logs for a completed container even when application is still running. Contributed by Xuan Gong.

2017-03-14 Thread jhung
YARN-6313. YARN logs cli should provide logs for a completed container even 
when application is still running. Contributed by Xuan Gong.

(cherry picked from commit b88f5e0f7858d1d89b79dfd325b767c34416052d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/871dc420
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/871dc420
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/871dc420

Branch: refs/heads/YARN-5734
Commit: 871dc420f8a4f151189c0925e062c64859a8f275
Parents: 0a3aa40
Author: Junping Du 
Authored: Tue Mar 14 12:56:54 2017 -0700
Committer: Junping Du 
Committed: Tue Mar 14 12:58:12 2017 -0700

--
 .../apache/hadoop/yarn/client/cli/LogsCLI.java  | 172 +--
 .../hadoop/yarn/client/cli/TestLogsCLI.java |  31 
 .../yarn/logaggregation/LogCLIHelpers.java  |  11 +-
 3 files changed, 160 insertions(+), 54 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/871dc420/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
index 3cb1c7d..8407b19 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
@@ -44,6 +44,7 @@ import org.apache.commons.cli.Options;
 import org.apache.commons.cli.ParseException;
 import org.apache.commons.io.IOUtils;
 import org.apache.commons.lang.StringUtils;
+import org.apache.commons.math3.util.Pair;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
@@ -65,6 +66,7 @@ import 
org.apache.hadoop.yarn.logaggregation.ContainerLogsRequest;
 import org.apache.hadoop.yarn.logaggregation.LogCLIHelpers;
 import org.apache.hadoop.yarn.logaggregation.PerContainerLogFileInfo;
 import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
+import org.apache.hadoop.yarn.webapp.util.YarnWebServiceUtils;
 import org.codehaus.jettison.json.JSONArray;
 import org.codehaus.jettison.json.JSONException;
 import org.codehaus.jettison.json.JSONObject;
@@ -409,10 +411,11 @@ public class LogsCLI extends Configured implements Tool {
 return false;
   }
 
-  private List getContainerLogFiles(
+  private List> getContainerLogFiles(
   Configuration conf, String containerIdStr, String nodeHttpAddress)
   throws IOException {
-List logFileInfos = new ArrayList<>();
+List> logFileInfos
+= new ArrayList<>();
 Client webServiceClient = Client.create();
 try {
   WebResource webResource = webServiceClient
@@ -438,16 +441,20 @@ public class LogsCLI extends Configured implements Tool {
   }
   for (int i = 0; i < array.length(); i++) {
 JSONObject log = array.getJSONObject(i);
+String aggregateType = log.has("logAggregationType") ?
+log.getString("logAggregationType") : "N/A";
 Object ob = log.get("containerLogInfo");
 if (ob instanceof JSONArray) {
   JSONArray obArray = (JSONArray)ob;
   for (int j = 0; j < obArray.length(); j++) {
-logFileInfos.add(generatePerContainerLogFileInfoFromJSON(
-obArray.getJSONObject(j)));
+logFileInfos.add(new Pair(
+generatePerContainerLogFileInfoFromJSON(
+obArray.getJSONObject(j)), aggregateType));
   }
 } else if (ob instanceof JSONObject) {
-  logFileInfos.add(generatePerContainerLogFileInfoFromJSON(
-  (JSONObject)ob));
+  logFileInfos.add(new Pair(
+  generatePerContainerLogFileInfoFromJSON(
+  (JSONObject)ob), aggregateType));
 }
   }
 } catch (Exception e) {
@@ -542,10 +549,8 @@ public class LogsCLI extends Configured implements Tool {
   IOUtils.closeQuietly(is);
 }
   }
-  // for the case, we have already uploaded partial logs in HDFS
-  int result = logCliHelper.dumpAContainerLogsForLogType(
-  newOptions, false);
-  if (result == 0 || 

[33/33] hadoop git commit: YARN-5948. Implement MutableConfigurationManager for handling storage into configuration store

2017-03-14 Thread jhung
YARN-5948. Implement MutableConfigurationManager for handling storage into 
configuration store


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/25d2028b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/25d2028b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/25d2028b

Branch: refs/heads/YARN-5734
Commit: 25d2028be4ff2223489e149d63de66895f8449ec
Parents: 874b9b8
Author: Jonathan Hung 
Authored: Wed Mar 1 16:03:01 2017 -0800
Committer: Jonathan Hung 
Committed: Tue Mar 14 16:29:32 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  6 ++
 .../src/main/resources/yarn-default.xml | 12 +++
 .../scheduler/MutableConfigurationProvider.java | 35 
 .../scheduler/capacity/CapacityScheduler.java   | 14 ++-
 .../CapacitySchedulerConfiguration.java |  3 +
 .../capacity/conf/CSConfigurationProvider.java  |  3 +-
 .../conf/MutableCSConfigurationProvider.java| 94 
 .../conf/YarnConfigurationStoreFactory.java | 46 ++
 .../TestMutableCSConfigurationProvider.java | 83 +
 9 files changed, 291 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/25d2028b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index b366855..91728a3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -602,6 +602,12 @@ public class YarnConfiguration extends Configuration {
   public static final String DEFAULT_RM_CONFIGURATION_PROVIDER_CLASS =
   "org.apache.hadoop.yarn.LocalConfigurationProvider";
 
+  public static final String SCHEDULER_CONFIGURATION_STORE_CLASS =
+  YARN_PREFIX + "scheduler.configuration.store.class";
+  public static final String MEMORY_CONFIGURATION_STORE = "memory";
+  public static final String DEFAULT_CONFIGURATION_STORE =
+  MEMORY_CONFIGURATION_STORE;
+
   public static final String YARN_AUTHORIZATION_PROVIDER = YARN_PREFIX
   + "authorization-provider";
   private static final List RM_SERVICES_ADDRESS_CONF_KEYS_HTTP =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/25d2028b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 727e2c9..9a3fa8b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -3079,4 +3079,16 @@
 false
   
 
+  
+
+  The type of configuration store to use for storing scheduler
+  configurations, if using a mutable configuration provider.
+  Keywords such as "memory" map to certain configuration store
+  implementations. If keyword is not found, try to load this
+  value as a class.
+
+yarn.scheduler.configuration.store.class
+memory
+  
+
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/25d2028b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
new file mode 100644
index 000..da30a2b
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * 

[13/33] hadoop git commit: HDFS-11340. DataNode reconfigure for disks doesn't remove the failed volumes. (Manoj Govindassamy via lei)

2017-03-14 Thread jhung
HDFS-11340. DataNode reconfigure for disks doesn't remove the failed volumes. 
(Manoj Govindassamy via lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6d356b6b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6d356b6b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6d356b6b

Branch: refs/heads/YARN-5734
Commit: 6d356b6b4d8ccb32397cacfb5d0357b21f6035fc
Parents: 9649c27
Author: Lei Xu 
Authored: Fri Mar 10 14:36:51 2017 -0800
Committer: Lei Xu 
Committed: Fri Mar 10 14:37:13 2017 -0800

--
 .../hadoop/hdfs/server/datanode/DataNode.java   |  73 +---
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |  14 ++-
 .../datanode/fsdataset/impl/FsVolumeList.java   |  13 ++-
 .../TestDataNodeVolumeFailureReporting.java | 116 +--
 4 files changed, 184 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d356b6b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 6f24858..5a82850 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -652,48 +652,84 @@ public class DataNode extends ReconfigurableBase
   ChangedVolumes parseChangedVolumes(String newVolumes) throws IOException {
 Configuration conf = new Configuration();
 conf.set(DFS_DATANODE_DATA_DIR_KEY, newVolumes);
-List locations = getStorageLocations(conf);
+List newStorageLocations = getStorageLocations(conf);
 
-if (locations.isEmpty()) {
+if (newStorageLocations.isEmpty()) {
   throw new IOException("No directory is specified.");
 }
 
-// Use the existing StorageLocation to detect storage type changes.
-Map existingLocations = new HashMap<>();
+// Use the existing storage locations from the current conf
+// to detect new storage additions or removals.
+Map existingStorageLocations = new HashMap<>();
 for (StorageLocation loc : getStorageLocations(getConf())) {
-  existingLocations.put(loc.getNormalizedUri().toString(), loc);
+  existingStorageLocations.put(loc.getNormalizedUri().toString(), loc);
 }
 
 ChangedVolumes results = new ChangedVolumes();
-results.newLocations.addAll(locations);
+results.newLocations.addAll(newStorageLocations);
 
 for (Iterator it = storage.dirIterator();
  it.hasNext(); ) {
   Storage.StorageDirectory dir = it.next();
   boolean found = false;
-  for (Iterator sl = results.newLocations.iterator();
-   sl.hasNext(); ) {
-StorageLocation location = sl.next();
-if (location.matchesStorageDirectory(dir)) {
-  sl.remove();
-  StorageLocation old = existingLocations.get(
-  location.getNormalizedUri().toString());
-  if (old != null &&
-  old.getStorageType() != location.getStorageType()) {
+  for (Iterator newLocationItr =
+   results.newLocations.iterator(); newLocationItr.hasNext();) {
+StorageLocation newLocation = newLocationItr.next();
+if (newLocation.matchesStorageDirectory(dir)) {
+  StorageLocation oldLocation = existingStorageLocations.get(
+  newLocation.getNormalizedUri().toString());
+  if (oldLocation != null &&
+  oldLocation.getStorageType() != newLocation.getStorageType()) {
 throw new IOException("Changing storage type is not allowed.");
   }
-  results.unchangedLocations.add(location);
+  // Update the unchanged locations as this location
+  // from the new conf is really not a new one.
+  newLocationItr.remove();
+  results.unchangedLocations.add(newLocation);
   found = true;
   break;
 }
   }
 
+  // New conf doesn't have the storage location which available in
+  // the current storage locations. Add to the deactivateLocations list.
   if (!found) {
+LOG.info("Deactivation request received for active volume: "
++ dir.getRoot().toString());
 results.deactivateLocations.add(
 StorageLocation.parse(dir.getRoot().toString()));
   }
 }
 
+// Use the failed storage locations from the current conf
+// to detect removals in the new conf.
+if 

[10/33] hadoop git commit: HADOOP-14062. ApplicationMasterProtocolPBClientImpl.allocate fails with EOFException when RPC privacy is enabled. Contributed by Steven Rand

2017-03-14 Thread jhung
HADOOP-14062. ApplicationMasterProtocolPBClientImpl.allocate fails with 
EOFException when RPC privacy is enabled. Contributed by Steven Rand


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4478273e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4478273e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4478273e

Branch: refs/heads/YARN-5734
Commit: 4478273e5fb731de93ff12e249a3137c38fcf46f
Parents: deb9f56
Author: Jian He 
Authored: Thu Mar 9 19:28:09 2017 -0800
Committer: Jian He 
Committed: Fri Mar 10 09:25:58 2017 -0800

--
 .../main/java/org/apache/hadoop/ipc/Client.java   |  4 +++-
 .../yarn/client/api/impl/TestAMRMClient.java  | 18 +-
 2 files changed, 20 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4478273e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 70b902c..c0a5be9 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -1768,7 +1768,9 @@ public class Client implements AutoCloseable {
 }
 
 void setSaslClient(SaslRpcClient client) throws IOException {
-  setInputStream(client.getInputStream(in));
+  // Wrap the input stream in a BufferedInputStream to fill the buffer
+  // before reading its length (HADOOP-14062).
+  setInputStream(new BufferedInputStream(client.getInputStream(in)));
   setOutputStream(client.getOutputStream(out));
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4478273e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
index 43c0271..06ba137 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
@@ -40,6 +40,7 @@ import java.util.Set;
 import java.util.TreeSet;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.Credentials;
@@ -126,8 +127,12 @@ public class TestAMRMClient {
 
   @Before
   public void setup() throws Exception {
-// start minicluster
 conf = new YarnConfiguration();
+createClusterAndStartApplication();
+  }
+
+  private void createClusterAndStartApplication() throws Exception {
+// start minicluster
 conf.set(YarnConfiguration.RM_SCHEDULER, schedulerName);
 conf.setLong(
   YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS,
@@ -866,6 +871,17 @@ public class TestAMRMClient {
 initAMRMClientAndTest(true);
   }
 
+  @Test (timeout=6)
+  public void testAMRMClientWithSaslEncryption() throws Exception {
+// we have to create a new instance of MiniYARNCluster to avoid SASL qop
+// mismatches between client and server
+teardown();
+conf = new YarnConfiguration();
+conf.set(CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION, "privacy");
+createClusterAndStartApplication();
+initAMRMClientAndTest(false);
+  }
+
   private void initAMRMClientAndTest(boolean useAllocReqId)
   throws YarnException, IOException {
 AMRMClient amClient = null;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[29/33] hadoop git commit: HADOOP-14170. FileSystemContractBaseTest is not cleaning up test directory clearly. Contributed by Mingliang Liu

2017-03-14 Thread jhung
HADOOP-14170. FileSystemContractBaseTest is not cleaning up test directory 
clearly. Contributed by Mingliang Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fa67a96d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fa67a96d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fa67a96d

Branch: refs/heads/YARN-5734
Commit: fa67a96d7b0812a6557e40a6ef1eb16f19823e73
Parents: e6cda58
Author: Mingliang Liu 
Authored: Fri Mar 10 18:44:27 2017 -0800
Committer: Mingliang Liu 
Committed: Tue Mar 14 14:38:21 2017 -0700

--
 .../hadoop/fs/FileSystemContractBaseTest.java   | 246 +++
 .../fs/TestRawLocalFileSystemContract.java  |  24 +-
 .../fs/s3a/ITestS3AFileSystemContract.java  |  39 +--
 3 files changed, 176 insertions(+), 133 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa67a96d/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
index 6247959..040e9c8 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
@@ -24,8 +24,9 @@ import java.util.ArrayList;
 
 import junit.framework.TestCase;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.security.AccessControlException;
@@ -45,8 +46,8 @@ import org.apache.hadoop.util.StringUtils;
  * 
  */
 public abstract class FileSystemContractBaseTest extends TestCase {
-  private static final Log LOG =
-LogFactory.getLog(FileSystemContractBaseTest.class);
+  private static final Logger LOG =
+  LoggerFactory.getLogger(FileSystemContractBaseTest.class);
 
   protected final static String TEST_UMASK = "062";
   protected FileSystem fs;
@@ -54,15 +55,46 @@ public abstract class FileSystemContractBaseTest extends 
TestCase {
 
   @Override
   protected void tearDown() throws Exception {
-try {
-  if (fs != null) {
-fs.delete(path("/test"), true);
+if (fs != null) {
+  // some cases use this absolute path
+  if (rootDirTestEnabled()) {
+cleanupDir(path("/FileSystemContractBaseTest"));
   }
+  // others use this relative path against test base directory
+  cleanupDir(getTestBaseDir());
+}
+super.tearDown();
+  }
+
+  private void cleanupDir(Path p) {
+try {
+  LOG.info("Deleting " + p);
+  fs.delete(p, true);
 } catch (IOException e) {
-  LOG.error("Error deleting /test: " + e, e);
+  LOG.error("Error deleting test dir: " + p, e);
 }
   }
-  
+
+  /**
+   * Test base directory for resolving relative test paths.
+   *
+   * The default value is /user/$USER/FileSystemContractBaseTest. Subclass may
+   * set specific test base directory.
+   */
+  protected Path getTestBaseDir() {
+return new Path(fs.getWorkingDirectory(), "FileSystemContractBaseTest");
+  }
+
+  /**
+   * For absolute path return the fully qualified path while for relative path
+   * return the fully qualified path against {@link #getTestBaseDir()}.
+   */
+  protected final Path path(String pathString) {
+Path p = new Path(pathString).makeQualified(fs.getUri(), getTestBaseDir());
+LOG.info("Resolving {} -> {}", pathString, p);
+return p;
+  }
+
   protected int getBlockSize() {
 return 1024;
   }
@@ -81,6 +113,17 @@ public abstract class FileSystemContractBaseTest extends 
TestCase {
   }
 
   /**
+   * Override this if the filesystem does not enable testing root directories.
+   *
+   * If this returns true, the test will create and delete test directories and
+   * files under root directory, which may have side effects, e.g. fail tests
+   * with PermissionDenied exceptions.
+   */
+  protected boolean rootDirTestEnabled() {
+return true;
+  }
+
+  /**
* Override this if the filesystem is not case sensitive
* @return true if the case detection/preservation tests should run
*/
@@ -102,24 +145,24 @@ public abstract class FileSystemContractBaseTest extends 
TestCase {
 Path workDir = path(getDefaultWorkingDirectory());
 assertEquals(workDir, fs.getWorkingDirectory());
 
-fs.setWorkingDirectory(path("."));
+

[25/33] hadoop git commit: HDFS-11505. Do not enable any erasure coding policies by default. Contributed by Manoj Govindassamy.

2017-03-14 Thread jhung
HDFS-11505. Do not enable any erasure coding policies by default. Contributed 
by Manoj Govindassamy.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7515e751
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7515e751
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7515e751

Branch: refs/heads/YARN-5734
Commit: 7515e75103c06ce7139b305dd04d4fb2e94b12ad
Parents: 34424e9
Author: Andrew Wang 
Authored: Tue Mar 14 11:47:25 2017 -0700
Committer: Andrew Wang 
Committed: Tue Mar 14 11:47:25 2017 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java   |  2 +-
 .../server/namenode/ErasureCodingPolicyManager.java  |  5 -
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml  |  5 +++--
 .../src/site/markdown/HDFSErasureCoding.md   | 13 +
 .../hadoop/hdfs/TestDecommissionWithStriped.java |  2 ++
 .../hdfs/TestErasureCodeBenchmarkThroughput.java |  2 ++
 .../hdfs/TestErasureCodingPolicyWithSnapshot.java|  2 ++
 .../org/apache/hadoop/hdfs/TestFileChecksum.java |  2 ++
 .../hadoop/hdfs/TestFileStatusWithECPolicy.java  |  5 -
 .../apache/hadoop/hdfs/TestLeaseRecoveryStriped.java |  2 ++
 .../hadoop/hdfs/TestReadStripedFileWithDecoding.java |  2 ++
 .../hdfs/TestReadStripedFileWithMissingBlocks.java   |  2 ++
 .../hadoop/hdfs/TestReconstructStripedFile.java  |  2 ++
 .../hadoop/hdfs/TestSafeModeWithStripedFile.java |  2 ++
 .../apache/hadoop/hdfs/TestWriteReadStripedFile.java |  2 ++
 .../hadoop/hdfs/server/balancer/TestBalancer.java|  2 ++
 .../TestBlockTokenWithDFSStriped.java|  4 
 ...estReconstructStripedBlocksWithRackAwareness.java |  6 ++
 .../blockmanagement/TestSequentialBlockGroupId.java  |  2 ++
 .../datanode/TestDataNodeErasureCodingMetrics.java   |  2 ++
 .../apache/hadoop/hdfs/server/mover/TestMover.java   |  2 ++
 .../namenode/TestAddOverReplicatedStripedBlocks.java |  4 +++-
 .../server/namenode/TestAddStripedBlockInFBR.java|  3 +++
 .../hdfs/server/namenode/TestAddStripedBlocks.java   | 10 ++
 .../hdfs/server/namenode/TestEnabledECPolicies.java  | 15 +--
 .../hdfs/server/namenode/TestFSEditLogLoader.java|  4 
 .../apache/hadoop/hdfs/server/namenode/TestFsck.java |  8 
 .../hdfs/server/namenode/TestNameNodeMXBean.java |  2 ++
 .../server/namenode/TestQuotaWithStripedBlocks.java  |  2 ++
 .../namenode/TestReconstructStripedBlocks.java   |  6 ++
 .../hdfs/server/namenode/TestStripedINodeFile.java   |  4 
 .../TestOfflineImageViewerWithStripedBlocks.java |  2 ++
 32 files changed, 112 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7515e751/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 3fc4980..06b33f9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -563,7 +563,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   "10m";
 
   public static final String  DFS_NAMENODE_EC_POLICIES_ENABLED_KEY = 
"dfs.namenode.ec.policies.enabled";
-  public static final String  DFS_NAMENODE_EC_POLICIES_ENABLED_DEFAULT = 
"RS-6-3-64k";
+  public static final String  DFS_NAMENODE_EC_POLICIES_ENABLED_DEFAULT = "";
   public static final String  
DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_THREADS_KEY = 
"dfs.datanode.ec.reconstruction.stripedread.threads";
   public static final int 
DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_THREADS_DEFAULT = 20;
   public static final String  
DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_BUFFER_SIZE_KEY = 
"dfs.datanode.ec.reconstruction.stripedread.buffer.size";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7515e751/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
index 29af207..c23b034 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
+++ 

[16/33] hadoop git commit: HADOOP-14156. Fix grammar error in ConfTest.java.

2017-03-14 Thread jhung
HADOOP-14156. Fix grammar error in ConfTest.java.

This closes #187

Signed-off-by: Akira Ajisaka 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/04a5f5a6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/04a5f5a6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/04a5f5a6

Branch: refs/heads/YARN-5734
Commit: 04a5f5a6dc88769cca8b1a15057a0756712b5013
Parents: 4db9cc7
Author: Andrey Dyatlov 
Authored: Mon Feb 6 19:05:58 2017 +0100
Committer: Akira Ajisaka 
Committed: Mon Mar 13 16:15:53 2017 +0900

--
 .../src/main/java/org/apache/hadoop/util/ConfTest.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/04a5f5a6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ConfTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ConfTest.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ConfTest.java
index 3f37f5a..1915e79 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ConfTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ConfTest.java
@@ -269,7 +269,7 @@ public final class ConfTest {
 } else {
   String confDirName = System.getenv(HADOOP_CONF_DIR);
   if (confDirName == null) {
-terminate(1, HADOOP_CONF_DIR + " does not defined");
+terminate(1, HADOOP_CONF_DIR + " is not defined");
   }
   File confDir = new File(confDirName);
   if (!confDir.isDirectory()) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[31/33] hadoop git commit: YARN-5951. Changes to allow CapacityScheduler to use configuration store

2017-03-14 Thread jhung
YARN-5951. Changes to allow CapacityScheduler to use configuration store


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dcbebf5f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dcbebf5f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dcbebf5f

Branch: refs/heads/YARN-5734
Commit: dcbebf5fdf480832668d6b5b43cfedb869973ba8
Parents: 4c66a8d
Author: Jonathan Hung 
Authored: Mon Jan 30 19:03:48 2017 -0800
Committer: Jonathan Hung 
Committed: Tue Mar 14 16:29:01 2017 -0700

--
 .../scheduler/capacity/CapacityScheduler.java   | 36 +--
 .../CapacitySchedulerConfiguration.java | 10 +++
 .../capacity/conf/CSConfigurationProvider.java  | 46 ++
 .../conf/FileBasedCSConfigurationProvider.java  | 67 
 .../scheduler/capacity/conf/package-info.java   | 29 +
 .../capacity/TestCapacityScheduler.java |  4 +-
 6 files changed, 170 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dcbebf5f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index f6e7942..ed70b0b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
 
 import java.io.IOException;
-import java.io.InputStream;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.EnumSet;
@@ -104,6 +103,8 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.Activi
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivityDiagnosticConstant;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivityState;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.AllocationState;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.CSConfigurationProvider;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.FileBasedCSConfigurationProvider;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.KillableContainer;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.PreemptionManager;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.AssignmentInformation;
@@ -162,6 +163,7 @@ public class CapacityScheduler extends
 
   private int offswitchPerHeartbeatLimit;
 
+  private CSConfigurationProvider csConfProvider;
 
   @Override
   public void setConf(Configuration conf) {
@@ -285,7 +287,18 @@ public class CapacityScheduler extends
   IOException {
 try {
   writeLock.lock();
-  this.conf = loadCapacitySchedulerConfiguration(configuration);
+  String confProviderStr = configuration.get(
+  CapacitySchedulerConfiguration.CS_CONF_PROVIDER,
+  CapacitySchedulerConfiguration.DEFAULT_CS_CONF_PROVIDER);
+  if (confProviderStr.equals(
+  CapacitySchedulerConfiguration.FILE_CS_CONF_PROVIDER)) {
+this.csConfProvider = new FileBasedCSConfigurationProvider(rmContext);
+  } else {
+throw new IOException("Invalid CS configuration provider: " +
+confProviderStr);
+  }
+  this.csConfProvider.init(configuration);
+  this.conf = this.csConfProvider.loadConfiguration(configuration);
   validateConf(this.conf);
   this.minimumAllocation = this.conf.getMinimumAllocation();
   initMaximumResourceCapability(this.conf.getMaximumAllocation());
@@ -392,7 +405,7 @@ public class CapacityScheduler extends
   writeLock.lock();
   Configuration configuration = new Configuration(newConf);
   CapacitySchedulerConfiguration oldConf = this.conf;
-  this.conf = loadCapacitySchedulerConfiguration(configuration);
+  this.conf = 

[19/33] hadoop git commit: HADOOP-14170. FileSystemContractBaseTest is not cleaning up test directory clearly. Contributed by Mingliang Liu

2017-03-14 Thread jhung
HADOOP-14170. FileSystemContractBaseTest is not cleaning up test directory 
clearly. Contributed by Mingliang Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b8c69557
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b8c69557
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b8c69557

Branch: refs/heads/YARN-5734
Commit: b8c69557b7a23ff9c4c0b2c9d595338a08b873f1
Parents: 5a40baf
Author: Mingliang Liu 
Authored: Fri Mar 10 18:44:27 2017 -0800
Committer: Mingliang Liu 
Committed: Mon Mar 13 14:15:02 2017 -0700

--
 .../hadoop/fs/FileSystemContractBaseTest.java   | 247 ---
 .../fs/TestRawLocalFileSystemContract.java  |  24 +-
 .../fs/s3a/ITestS3AFileSystemContract.java  |  39 +--
 3 files changed, 178 insertions(+), 132 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8c69557/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
index 6247959..78ba1f9 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
@@ -24,8 +24,9 @@ import java.util.ArrayList;
 
 import junit.framework.TestCase;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.security.AccessControlException;
@@ -45,8 +46,8 @@ import org.apache.hadoop.util.StringUtils;
  * 
  */
 public abstract class FileSystemContractBaseTest extends TestCase {
-  private static final Log LOG =
-LogFactory.getLog(FileSystemContractBaseTest.class);
+  private static final Logger LOG =
+  LoggerFactory.getLogger(FileSystemContractBaseTest.class);
 
   protected final static String TEST_UMASK = "062";
   protected FileSystem fs;
@@ -54,15 +55,46 @@ public abstract class FileSystemContractBaseTest extends 
TestCase {
 
   @Override
   protected void tearDown() throws Exception {
-try {
-  if (fs != null) {
-fs.delete(path("/test"), true);
+if (fs != null) {
+  // some cases use this absolute path
+  if (rootDirTestEnabled()) {
+cleanupDir(path("/FileSystemContractBaseTest"));
   }
+  // others use this relative path against test base directory
+  cleanupDir(getTestBaseDir());
+}
+super.tearDown();
+  }
+
+  private void cleanupDir(Path p) {
+try {
+  LOG.info("Deleting " + p);
+  fs.delete(p, true);
 } catch (IOException e) {
-  LOG.error("Error deleting /test: " + e, e);
+  LOG.error("Error deleting test dir: " + p, e);
 }
   }
-  
+
+  /**
+   * Test base directory for resolving relative test paths.
+   *
+   * The default value is /user/$USER/FileSystemContractBaseTest. Subclass may
+   * set specific test base directory.
+   */
+  protected Path getTestBaseDir() {
+return new Path(fs.getWorkingDirectory(), "FileSystemContractBaseTest");
+  }
+
+  /**
+   * For absolute path return the fully qualified path while for relative path
+   * return the fully qualified path against {@link #getTestBaseDir()}.
+   */
+  protected final Path path(String pathString) {
+Path p = new Path(pathString).makeQualified(fs.getUri(), getTestBaseDir());
+LOG.info("Resolving {} -> {}", pathString, p);
+return p;
+  }
+
   protected int getBlockSize() {
 return 1024;
   }
@@ -81,6 +113,17 @@ public abstract class FileSystemContractBaseTest extends 
TestCase {
   }
 
   /**
+   * Override this if the filesystem does not enable testing root directories.
+   *
+   * If this returns true, the test will create and delete test directories and
+   * files under root directory, which may have side effects, e.g. fail tests
+   * with PermissionDenied exceptions.
+   */
+  protected boolean rootDirTestEnabled() {
+return true;
+  }
+
+  /**
* Override this if the filesystem is not case sensitive
* @return true if the case detection/preservation tests should run
*/
@@ -102,24 +145,24 @@ public abstract class FileSystemContractBaseTest extends 
TestCase {
 Path workDir = path(getDefaultWorkingDirectory());
 assertEquals(workDir, fs.getWorkingDirectory());
 
-fs.setWorkingDirectory(path("."));
+

[20/33] hadoop git commit: HDFS-11395. RequestHedgingProxyProvider#RequestHedgingInvocationHandler hides the Exception thrown from NameNode. Contributed by Nandakumar.

2017-03-14 Thread jhung
HDFS-11395. RequestHedgingProxyProvider#RequestHedgingInvocationHandler hides 
the Exception thrown from NameNode. Contributed by Nandakumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/55796a09
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/55796a09
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/55796a09

Branch: refs/heads/YARN-5734
Commit: 55796a0946f80a35055701a34379e374399009c5
Parents: b8c6955
Author: Jing Zhao 
Authored: Mon Mar 13 14:14:09 2017 -0700
Committer: Jing Zhao 
Committed: Mon Mar 13 14:24:51 2017 -0700

--
 .../hadoop/io/retry/RetryInvocationHandler.java |  17 ++-
 .../ha/RequestHedgingProxyProvider.java |  41 +--
 .../ha/TestRequestHedgingProxyProvider.java | 108 ++-
 3 files changed, 151 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/55796a09/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
index 8487602..ffdd928 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
@@ -240,12 +240,15 @@ public class RetryInvocationHandler implements 
RpcInvocationHandler {
 private final long delay;
 private final RetryAction action;
 private final long expectedFailoverCount;
+private final Exception failException;
 
-RetryInfo(long delay, RetryAction action, long expectedFailoverCount) {
+RetryInfo(long delay, RetryAction action, long expectedFailoverCount,
+Exception failException) {
   this.delay = delay;
   this.retryTime = Time.monotonicNow() + delay;
   this.action = action;
   this.expectedFailoverCount = expectedFailoverCount;
+  this.failException = failException;
 }
 
 boolean isFailover() {
@@ -258,11 +261,16 @@ public class RetryInvocationHandler implements 
RpcInvocationHandler {
   && action.action ==  RetryAction.RetryDecision.FAIL;
 }
 
+Exception getFailException() {
+  return failException;
+}
+
 static RetryInfo newRetryInfo(RetryPolicy policy, Exception e,
 Counters counters, boolean idempotentOrAtMostOnce,
 long expectedFailoverCount) throws Exception {
   RetryAction max = null;
   long maxRetryDelay = 0;
+  Exception ex = null;
 
   final Iterable exceptions = e instanceof MultiException ?
   ((MultiException) e).getExceptions().values()
@@ -279,10 +287,13 @@ public class RetryInvocationHandler implements 
RpcInvocationHandler {
 
 if (max == null || max.action.compareTo(a.action) < 0) {
   max = a;
+  if (a.action == RetryAction.RetryDecision.FAIL) {
+ex = exception;
+  }
 }
   }
 
-  return new RetryInfo(maxRetryDelay, max, expectedFailoverCount);
+  return new RetryInfo(maxRetryDelay, max, expectedFailoverCount, ex);
 }
   }
 
@@ -359,7 +370,7 @@ public class RetryInvocationHandler implements 
RpcInvocationHandler {
   + ". Not retrying because " + retryInfo.action.reason, e);
 }
   }
-  throw e;
+  throw retryInfo.getFailException();
 }
 
 log(method, retryInfo.isFailover(), counters.failovers, retryInfo.delay, 
e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55796a09/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
index 945e92f..a765e95 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
-import java.io.IOException;
 import java.lang.reflect.InvocationHandler;
 import java.lang.reflect.Method;
 import java.lang.reflect.Proxy;
@@ -122,7 +121,7 @@ public class RequestHedgingProxyProvider extends

[06/33] hadoop git commit: HADOOP-14153. ADL module has messed doc structure. Contributed by Mingliang Liu

2017-03-14 Thread jhung
HADOOP-14153. ADL module has messed doc structure. Contributed by Mingliang Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/881ec4d9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/881ec4d9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/881ec4d9

Branch: refs/heads/YARN-5734
Commit: 881ec4d97bd1db4582027aec3a4204156a4eda17
Parents: a96afae
Author: Mingliang Liu 
Authored: Tue Mar 7 16:29:19 2017 -0800
Committer: Mingliang Liu 
Committed: Fri Mar 10 00:16:09 2017 -0800

--
 .../src/site/markdown/index.md  | 55 
 1 file changed, 21 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/881ec4d9/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
--
diff --git a/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md 
b/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
index 9355241..3a16253 100644
--- a/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
+++ b/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
@@ -14,28 +14,15 @@
 
 # Hadoop Azure Data Lake Support
 
-* [Introduction](#Introduction)
-* [Features](#Features)
-* [Limitations](#Limitations)
-* [Usage](#Usage)
-* [Concepts](#Concepts)
-* [OAuth2 Support](#OAuth2_Support)
-* [Configuring Credentials and FileSystem](#Configuring_Credentials)
-* [Using Refresh Token](#Refresh_Token)
-* [Using Client Keys](#Client_Credential_Token)
-* [Protecting the Credentials with Credential 
Providers](#Credential_Provider)
-* [Enabling ADL Filesystem](#Enabling_ADL)
-* [Accessing `adl` URLs](#Accessing_adl_URLs)
-* [User/Group Representation](#OIDtoUPNConfiguration)
-* [Testing the `hadoop-azure` Module](#Testing_the_hadoop-azure_Module)
-
-## Introduction
+
+
+## Introduction
 
 The `hadoop-azure-datalake` module provides support for integration with the
 [Azure Data Lake 
Store](https://azure.microsoft.com/en-in/documentation/services/data-lake-store/).
 This support comes via the JAR file `azure-datalake-store.jar`.
 
-## Features
+## Features
 
 * Read and write data stored in an Azure Data Lake Storage account.
 * Reference file system paths using URLs using the `adl` scheme for Secure 
Webhdfs i.e. SSL
@@ -46,7 +33,7 @@ This support comes via the JAR file 
`azure-datalake-store.jar`.
 * API `setOwner()`, `setAcl`, `removeAclEntries()`, `modifyAclEntries()` 
accepts UPN or OID
   (Object ID) as user and group names.
 
-## Limitations
+## Limitations
 
 Partial or no support for the following operations :
 
@@ -62,9 +49,9 @@ Partial or no support for the following operations :
 * User and group information returned as `listStatus()` and `getFileStatus()` 
is
 in the form of the GUID associated in Azure Active Directory.
 
-## Usage
+## Usage
 
-### Concepts
+### Concepts
 Azure Data Lake Storage access path syntax is:
 
 ```
@@ -74,7 +61,7 @@ adl://.azuredatalakestore.net/
 For details on using the store, see
 [**Get started with Azure Data Lake Store using the Azure 
Portal**](https://azure.microsoft.com/en-in/documentation/articles/data-lake-store-get-started-portal/)
 
-### OAuth2 Support
+ OAuth2 Support
 
 Usage of Azure Data Lake Storage requires an OAuth2 bearer token to be present 
as
 part of the HTTPS header as per the OAuth2 specification.
@@ -86,11 +73,11 @@ and identity management service. See [*What is 
ActiveDirectory*](https://azure.m
 
 Following sections describes theOAuth2 configuration in `core-site.xml`.
 
- Configuring Credentials & FileSystem
+### Configuring Credentials and FileSystem
 Credentials can be configured using either a refresh token (associated with a 
user),
 or a client credential (analogous to a service principal).
 
- Using Refresh Tokens
+ Using Refresh Tokens
 
 Add the following properties to the cluster's `core-site.xml`
 
@@ -119,9 +106,9 @@ service associated with the client id. See [*Active 
Directory Library For Java*]
 ```
 
 
-### Using Client Keys
+ Using Client Keys
 
- Generating the Service Principal
+# Generating the Service Principal
 
 1.  Go to [the portal](https://portal.azure.com)
 2.  Under "Browse", look for Active Directory and click on it.
@@ -135,13 +122,13 @@ service associated with the client id. See [*Active 
Directory Library For Java*]
 -  The token endpoint (select "View endpoints" at the bottom of the page 
and copy/paste the OAuth2 .0 Token Endpoint value)
 -  Resource: Always https://management.core.windows.net/ , for all 
customers
 
- Adding the service principal to your ADL Account
+# Adding the service principal to your ADL Account
 1.  

[23/33] hadoop git commit: HDFS-11526. Fix confusing block recovery message. Contributed by Yiqun Lin.

2017-03-14 Thread jhung
HDFS-11526. Fix confusing block recovery message. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/023b941e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/023b941e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/023b941e

Branch: refs/heads/YARN-5734
Commit: 023b941e3b83f32bc785240dbb1bfce11a987941
Parents: e5f2eed
Author: Yiqun Lin 
Authored: Tue Mar 14 17:49:48 2017 +0800
Committer: Yiqun Lin 
Committed: Tue Mar 14 17:49:48 2017 +0800

--
 .../hadoop/hdfs/server/datanode/BlockRecoveryWorker.java  | 10 --
 1 file changed, 4 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/023b941e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
index d39d050..792b6af 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
@@ -167,9 +167,8 @@ public class BlockRecoveryWorker {
   return;
 } catch (IOException e) {
   ++errorCount;
-  InterDatanodeProtocol.LOG.warn(
-  "Failed to obtain replica info for block (=" + block
-  + ") from datanode (=" + id + ")", e);
+  InterDatanodeProtocol.LOG.warn("Failed to recover block (block="
+  + block + ", datanode=" + id + ")", e);
 }
   }
 
@@ -429,9 +428,8 @@ public class BlockRecoveryWorker {
   + rBlock.getNewGenerationStamp() + " is aborted.", ripE);
   return;
 } catch (IOException e) {
-  InterDatanodeProtocol.LOG.warn(
-  "Failed to obtain replica info for block (=" + block
-  + ") from datanode (=" + id + ")", e);
+  InterDatanodeProtocol.LOG.warn("Failed to recover block (block="
+  + block + ", datanode=" + id + ")", e);
 }
   }
   checkLocations(syncBlocks.size());


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[04/33] hadoop git commit: HADOOP-14123. Remove misplaced ADL service provider config file for FileSystem. Contributed by John Zhuge.

2017-03-14 Thread jhung
HADOOP-14123. Remove misplaced ADL service provider config file for FileSystem. 
Contributed by John Zhuge.

Change-Id: Ic956e2eb8189625916442eaffdc69163d32f730e


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c5ee7fde
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c5ee7fde
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c5ee7fde

Branch: refs/heads/YARN-5734
Commit: c5ee7fded46dcb1ac1ea4c1ada4949c50bc89afb
Parents: 846a0cd
Author: John Zhuge 
Authored: Sun Mar 5 22:34:22 2017 -0800
Committer: John Zhuge 
Committed: Thu Mar 9 18:30:17 2017 -0800

--
 .../META-INF/org.apache.hadoop.fs.FileSystem| 16 
 1 file changed, 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5ee7fde/hadoop-tools/hadoop-azure-datalake/src/main/resources/META-INF/org.apache.hadoop.fs.FileSystem
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/resources/META-INF/org.apache.hadoop.fs.FileSystem
 
b/hadoop-tools/hadoop-azure-datalake/src/main/resources/META-INF/org.apache.hadoop.fs.FileSystem
deleted file mode 100644
index 7ec7812..000
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/resources/META-INF/org.apache.hadoop.fs.FileSystem
+++ /dev/null
@@ -1,16 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-org.apache.hadoop.fs.adl.AdlFileSystem
\ No newline at end of file


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[07/33] hadoop git commit: HADOOP-13946. Document how HDFS updates timestamps in the FS spec; compare with object stores. Contributed by Steve Loughran

2017-03-14 Thread jhung
HADOOP-13946. Document how HDFS updates timestamps in the FS spec; compare with 
object stores. Contributed by Steve Loughran


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fd26783a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fd26783a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fd26783a

Branch: refs/heads/YARN-5734
Commit: fd26783aaf3deea7a4e197439bd1075a6689681f
Parents: 881ec4d
Author: Mingliang Liu 
Authored: Fri Mar 10 00:21:20 2017 -0800
Committer: Mingliang Liu 
Committed: Fri Mar 10 00:21:20 2017 -0800

--
 .../site/markdown/filesystem/introduction.md| 85 
 1 file changed, 85 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd26783a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/introduction.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/introduction.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/introduction.md
index f6db557..12a7967 100644
--- 
a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/introduction.md
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/introduction.md
@@ -392,3 +392,88 @@ Object stores with these characteristics, can not be used 
as a direct replacemen
 for HDFS. In terms of this specification, their implementations of the
 specified operations do not match those required. They are considered supported
 by the Hadoop development community, but not to the same extent as HDFS.
+
+ Timestamps
+
+
+`FileStatus` entries have a modification time and an access time.
+
+1. The exact behavior as to when these timestamps are set and whether or not 
they are valid
+varies between filesystems, and potentially between individual installations 
of a filesystem.
+1. The granularity of the timestamps is again, specific to both a filesystem
+and potentially individual installations.
+
+The HDFS filesystem does not update the modification time while it is being 
written to.
+
+Specifically
+
+* `FileSystem.create()` creation: a zero-byte file is listed; the modification 
time is
+  set to the current time as seen on the NameNode.
+* Writes to a file via the output stream returned in the `create()` call: the 
modification
+  time *does not change*.
+* When `OutputStream.close()` is called, all remaining data is written, the 
file closed and
+  the NameNode updated with the final size of the file. The modification time 
is set to
+  the time the file was closed.
+* Opening a file for appends via an `append()` operation does not change the 
modification
+  time of the file until the `close()` call is made on the output stream.
+* `FileSystem.setTimes()` can be used to explicitly set the time on a file.
+* When a file is renamed, its modification time is not changed, but the source
+  and destination directories have their modification times updated.
+* The rarely used operations:  `FileSystem.concat()`, `createSnapshot()`,
+ `createSymlink()` and `truncate()` all update the modification time.
+* The access time granularity is set in milliseconds 
`dfs.namenode.access.time.precision`;
+  the default granularity is 1 hour. If the precision is set to zero, access 
times
+  are not recorded.
+* If a modification or access time is not set, the value of that `FileStatus`
+field is 0.
+
+Other filesystems may have different behaviors. In particular,
+
+* Access times may or may not be supported; even if the underlying FS may 
support access times,
+  the option it is often disabled for performance reasons.
+* The granularity of the timestamps is an implementation-specific detail.
+
+
+Object stores have an even vaguer view of time, which can be summarized as
+"it varies".
+
+ * The timestamp granularity is likely to be 1 second, that being the 
granularity
+   of timestamps returned in HTTP HEAD and GET requests.
+ * Access times are likely to be unset. That is, `FileStatus.getAccessTime() 
== 0`.
+ * The modification timestamp for a newly created file MAY be that of the
+  `create()` call, or the actual time which the PUT request was initiated.
+   This may be in the  `FileSystem.create()` call, the final
+   `OutputStream.close()` operation, some period in between.
+ * The modification time may not be updated in the `close()` call.
+ * The timestamp is likely to be in UTC or the TZ of the object store. If the
+   client is in a different timezone, the timestamp of objects may be ahead or
+   behind that of the client.
+ * Object stores with cached metadata databases (for example: AWS S3 with
+   an in-memory or a DynamoDB metadata store) may have timestamps generated
+   from the local system 

[17/33] hadoop git commit: HDFS-11512. Increase timeout on TestShortCircuitLocalRead#testSkipWithVerifyChecksum. Contributed by Eric Badger.

2017-03-14 Thread jhung
HDFS-11512. Increase timeout on 
TestShortCircuitLocalRead#testSkipWithVerifyChecksum. Contributed by Eric 
Badger.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/79924266
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/79924266
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/79924266

Branch: refs/heads/YARN-5734
Commit: 79924266f8f68e5e7c873e6b12e3b3acfcd708da
Parents: 04a5f5a
Author: Yiqun Lin 
Authored: Mon Mar 13 18:22:30 2017 +0800
Committer: Yiqun Lin 
Committed: Mon Mar 13 18:22:30 2017 +0800

--
 .../apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/79924266/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
index 55e9795..f2ee48c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java
@@ -388,7 +388,7 @@ public class TestShortCircuitLocalRead {
 }
   }
 
-  @Test(timeout=1)
+  @Test(timeout=6)
   public void testSkipWithVerifyChecksum() throws IOException {
 int size = blockSize;
 Configuration conf = new Configuration();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[02/33] hadoop git commit: HDFS-11506. Move ErasureCodingPolicyManager#getSystemDefaultPolicy to test code. Contributed by Manoj Govindassamy.

2017-03-14 Thread jhung
HDFS-11506. Move ErasureCodingPolicyManager#getSystemDefaultPolicy to test 
code. Contributed by Manoj Govindassamy.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/819808a0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/819808a0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/819808a0

Branch: refs/heads/YARN-5734
Commit: 819808a016e16325502169e0091a16a6b2ae5387
Parents: e96a0b8
Author: Andrew Wang 
Authored: Thu Mar 9 17:29:11 2017 -0800
Committer: Andrew Wang 
Committed: Thu Mar 9 17:29:11 2017 -0800

--
 .../namenode/ErasureCodingPolicyManager.java| 10 --
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  2 +-
 .../hdfs/ErasureCodeBenchmarkThroughput.java|  5 +--
 .../apache/hadoop/hdfs/StripedFileTestUtil.java | 12 +++
 .../hadoop/hdfs/TestDFSStripedInputStream.java  |  3 +-
 .../hadoop/hdfs/TestDFSStripedOutputStream.java |  3 +-
 .../TestDFSStripedOutputStreamWithFailure.java  |  3 +-
 .../hdfs/TestDecommissionWithStriped.java   |  5 ++-
 .../hadoop/hdfs/TestErasureCodingPolicies.java  |  8 ++---
 .../TestErasureCodingPolicyWithSnapshot.java|  3 +-
 .../apache/hadoop/hdfs/TestFileChecksum.java|  5 ++-
 .../hadoop/hdfs/TestFileStatusWithECPolicy.java |  3 +-
 .../hadoop/hdfs/TestLeaseRecoveryStriped.java   |  3 +-
 .../hdfs/TestReadStripedFileWithDecoding.java   |  5 ++-
 .../TestReadStripedFileWithMissingBlocks.java   |  3 +-
 .../hadoop/hdfs/TestReconstructStripedFile.java |  7 ++---
 .../hdfs/TestSafeModeWithStripedFile.java   |  5 ++-
 .../TestUnsetAndChangeDirectoryEcPolicy.java|  3 +-
 .../hadoop/hdfs/TestWriteReadStripedFile.java   |  5 ++-
 .../hdfs/TestWriteStripedFileWithFailure.java   |  5 ++-
 .../hadoop/hdfs/protocolPB/TestPBHelper.java| 12 +++
 .../hdfs/server/balancer/TestBalancer.java  |  5 ++-
 .../blockmanagement/TestBlockInfoStriped.java   |  4 +--
 .../TestBlockTokenWithDFSStriped.java   |  6 ++--
 .../TestLowRedundancyBlockQueues.java   |  4 +--
 ...constructStripedBlocksWithRackAwareness.java | 10 +++---
 .../TestSequentialBlockGroupId.java |  6 ++--
 .../TestSortLocatedStripedBlock.java|  4 +--
 .../hdfs/server/datanode/TestBlockRecovery.java |  3 +-
 .../TestDataNodeErasureCodingMetrics.java   |  5 ++-
 .../hadoop/hdfs/server/mover/TestMover.java |  5 ++-
 .../TestAddOverReplicatedStripedBlocks.java |  6 ++--
 .../namenode/TestAddStripedBlockInFBR.java  |  5 +--
 .../server/namenode/TestAddStripedBlocks.java   |  7 +++--
 .../server/namenode/TestEnabledECPolicies.java  | 12 +++
 .../server/namenode/TestFSEditLogLoader.java|  3 +-
 .../hadoop/hdfs/server/namenode/TestFsck.java   | 33 +---
 .../server/namenode/TestNameNodeMXBean.java | 12 +++
 .../namenode/TestQuotaWithStripedBlocks.java|  3 +-
 .../namenode/TestReconstructStripedBlocks.java  |  6 ++--
 .../server/namenode/TestStripedINodeFile.java   |  5 +--
 ...TestOfflineImageViewerWithStripedBlocks.java |  8 ++---
 42 files changed, 121 insertions(+), 141 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/819808a0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
index 02cbbdf..29af207 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
@@ -130,16 +130,6 @@ public final class ErasureCodingPolicyManager {
   }
 
   /**
-   * Get system-wide default policy, which can be used by default
-   * when no policy is specified for a path.
-   * @return ecPolicy
-   */
-  public static ErasureCodingPolicy getSystemDefaultPolicy() {
-// make this configurable?
-return SYS_POLICY1;
-  }
-
-  /**
* Get a policy by policy ID.
* @return ecPolicy, or null if not found
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/819808a0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index 7bf5cdc..1329195 100644
--- 

[08/33] hadoop git commit: YARN-6196. Improve Resource Donut chart with better label in Node page of new YARN UI. Contributed by Akhil PB.

2017-03-14 Thread jhung
YARN-6196. Improve Resource Donut chart with better label in Node page of new 
YARN UI. Contributed by Akhil PB.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e06ff18a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e06ff18a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e06ff18a

Branch: refs/heads/YARN-5734
Commit: e06ff18ab68d23a0f236df8a0603a42367927f3c
Parents: fd26783
Author: Sunil G 
Authored: Fri Mar 10 16:17:48 2017 +0530
Committer: Sunil G 
Committed: Fri Mar 10 16:17:48 2017 +0530

--
 .../main/webapp/app/helpers/log-files-comma.js  | 10 -
 .../app/serializers/yarn-node-container.js  |  2 +-
 .../main/webapp/app/serializers/yarn-node.js|  4 ++--
 .../main/webapp/app/serializers/yarn-rm-node.js |  4 ++--
 .../src/main/webapp/app/templates/yarn-node.hbs | 23 +---
 .../main/webapp/app/templates/yarn-nodes.hbs|  2 +-
 .../webapp/app/templates/yarn-nodes/table.hbs   | 13 ++-
 7 files changed, 37 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e06ff18a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/log-files-comma.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/log-files-comma.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/log-files-comma.js
index 78dcf25..026cd7f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/log-files-comma.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/log-files-comma.js
@@ -35,8 +35,16 @@ export default Ember.Helper.helper(function(params,hash) {
   var containerId = hash.containerId;
   var html = '';
   for (var i = 0; i < logFilesLen; i++) {
+var logFileName = "";
+if (logFiles[i]) {
+  if (typeof logFiles[i] === "object" && logFiles[i].containerLogFiles) {
+logFileName = logFiles[i].containerLogFiles;
+  } else if (typeof logFiles[i] === "string") {
+logFileName = logFiles[i];
+  }
+}
 html = html + '' + logFiles[i] +
+nodeAddr + '/' + containerId + '/' + logFileName + '">' + logFileName +
 '';
 if (i !== logFilesLen - 1) {
   html = html + ",";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e06ff18a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-node-container.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-node-container.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-node-container.js
index 7e78987..7bcb655 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-node-container.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-node-container.js
@@ -30,7 +30,7 @@ export default DS.JSONAPISerializer.extend({
 containerId: payload.id,
 state: payload.state,
 user: payload.user,
-diagnostics: payload.diagnostics,
+diagnostics: payload.diagnostics || 'N/A',
 exitCode: payload.exitCode,
 totalMemoryNeeded: payload.totalMemoryNeededMB,
 totalVCoresNeeded: payload.totalVCoresNeeded,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e06ff18a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-node.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-node.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-node.js
index 0d9faec..10521e6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-node.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-node.js
@@ -36,8 +36,8 @@ export default DS.JSONAPISerializer.extend({
 pmemCheckEnabled: payload.pmemCheckEnabled,
 nodeHealthy: payload.nodeHealthy,
 lastNodeUpdateTime: 
Converter.timeStampToDate(payload.lastNodeUpdateTime),
-healthReport: payload.healthReport,
-nmStartupTime: Converter.timeStampToDate(payload.nmStartupTime),
+healthReport: payload.healthReport || 'N/A',
+nmStartupTime: payload.nmStartupTime? 
Converter.timeStampToDate(payload.nmStartupTime) : '',
 nodeManagerBuildVersion: payload.nodeManagerBuildVersion,
 hadoopBuildVersion: payload.hadoopBuildVersion
   }


[11/33] hadoop git commit: HADOOP-14111 cut some obsolete, ignored s3 tests in TestS3Credentials. Contributed by Yuanbo Liu

2017-03-14 Thread jhung
HADOOP-14111 cut some obsolete, ignored s3 tests in TestS3Credentials.
Contributed by Yuanbo Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/092ec39f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/092ec39f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/092ec39f

Branch: refs/heads/YARN-5734
Commit: 092ec39fb9d9930d234ed1f0ec507b2f8c6ff4bc
Parents: 4478273
Author: Steve Loughran 
Authored: Fri Mar 10 17:43:22 2017 +
Committer: Steve Loughran 
Committed: Fri Mar 10 17:43:22 2017 +

--
 .../hadoop/fs/s3native/TestS3Credentials.java | 18 --
 1 file changed, 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/092ec39f/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestS3Credentials.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestS3Credentials.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestS3Credentials.java
index 33d0320..17b78c7 100644
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestS3Credentials.java
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestS3Credentials.java
@@ -28,7 +28,6 @@ import java.io.File;
 import java.net.URI;
 
 import org.junit.Before;
-import org.junit.Ignore;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.TemporaryFolder;
@@ -127,21 +126,4 @@ public class TestS3Credentials {
 s3Credentials.getSecretAccessKey());
   }
 
-  @Test(expected=IllegalArgumentException.class)
-  @Ignore
-  public void noSecretShouldThrow() throws Exception {
-S3Credentials s3Credentials = new S3Credentials();
-Configuration conf = new Configuration();
-conf.set(S3_NATIVE_AWS_ACCESS_KEY_ID, EXAMPLE_ID);
-s3Credentials.initialize(new URI("s3n://foobar"), conf);
-  }
-
-  @Test(expected=IllegalArgumentException.class)
-  @Ignore
-  public void noAccessIdShouldThrow() throws Exception {
-S3Credentials s3Credentials = new S3Credentials();
-Configuration conf = new Configuration();
-conf.set(S3_NATIVE_AWS_SECRET_ACCESS_KEY, EXAMPLE_KEY);
-s3Credentials.initialize(new URI("s3n://foobar"), conf);
-  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[05/33] hadoop git commit: YARN-6264. AM not launched when a single vcore is available on the cluster. (Yufei Gu via kasha)

2017-03-14 Thread jhung
YARN-6264. AM not launched when a single vcore is available on the cluster. 
(Yufei Gu via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a96afae1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a96afae1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a96afae1

Branch: refs/heads/YARN-5734
Commit: a96afae125ba02fb4480542d3fb0891623ee4c37
Parents: c5ee7fde
Author: Karthik Kambatla 
Authored: Thu Mar 9 23:11:54 2017 -0800
Committer: Karthik Kambatla 
Committed: Thu Mar 9 23:11:54 2017 -0800

--
 .../hadoop/yarn/util/resource/Resources.java|  7 +
 .../yarn/util/resource/TestResources.java   | 24 -
 .../scheduler/fair/FSLeafQueue.java |  3 ++-
 .../scheduler/fair/TestFairScheduler.java   | 28 ++--
 4 files changed, 46 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a96afae1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
index 57b3a46..7020300 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
@@ -242,6 +242,13 @@ public class Resources {
 out.setVirtualCores((int)(lhs.getVirtualCores() * by));
 return out;
   }
+
+  public static Resource multiplyAndRoundUp(Resource lhs, double by) {
+Resource out = clone(lhs);
+out.setMemorySize((long)Math.ceil(lhs.getMemorySize() * by));
+out.setVirtualCores((int)Math.ceil(lhs.getVirtualCores() * by));
+return out;
+  }
   
   public static Resource normalize(
   ResourceCalculator calculator, Resource lhs, Resource min,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a96afae1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResources.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResources.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResources.java
index 057214b..f8570a8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResources.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResources.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.yarn.util.resource;
 
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
 public class TestResources {
@@ -46,5 +48,25 @@ public class TestResources {
 assertTrue(Resources.none().compareTo(
 createResource(0, 1)) < 0);
   }
-  
+
+  @Test
+  public void testMultipleRoundUp() {
+final double by = 0.5;
+final String memoryErrorMsg = "Invalid memory size.";
+final String vcoreErrorMsg = "Invalid virtual core number.";
+Resource resource = Resources.createResource(1, 1);
+Resource result = Resources.multiplyAndRoundUp(resource, by);
+assertEquals(memoryErrorMsg, result.getMemorySize(), 1);
+assertEquals(vcoreErrorMsg, result.getVirtualCores(), 1);
+
+resource = Resources.createResource(2, 2);
+result = Resources.multiplyAndRoundUp(resource, by);
+assertEquals(memoryErrorMsg, result.getMemorySize(), 1);
+assertEquals(vcoreErrorMsg, result.getVirtualCores(), 1);
+
+resource = Resources.createResource(0, 0);
+result = Resources.multiplyAndRoundUp(resource, by);
+assertEquals(memoryErrorMsg, result.getMemorySize(), 0);
+assertEquals(vcoreErrorMsg, result.getVirtualCores(), 0);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a96afae1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
--
diff --git 

[18/33] hadoop git commit: HADOOP-14173. Remove unused AdlConfKeys#ADL_EVENTS_TRACKING_SOURCE. Contributed by John Zhuge.

2017-03-14 Thread jhung
HADOOP-14173. Remove unused AdlConfKeys#ADL_EVENTS_TRACKING_SOURCE. Contributed 
by John Zhuge.

Change-Id: I5dc6f885816b8834f718874542dfa373458b0333


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5a40bafd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5a40bafd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5a40bafd

Branch: refs/heads/YARN-5734
Commit: 5a40bafdaeec693e613aa02e79dbaaccfdab6f60
Parents: 7992426
Author: John Zhuge 
Authored: Fri Mar 10 17:42:30 2017 -0800
Committer: John Zhuge 
Committed: Mon Mar 13 08:11:25 2017 -0700

--
 .../src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java | 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a40bafd/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
index 7d31103..8fc8e00 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
@@ -66,7 +66,6 @@ public final class AdlConfKeys {
   static final String ADL_HADOOP_CLIENT_NAME = "hadoop-azure-datalake-";
   static final String ADL_HADOOP_CLIENT_VERSION =
   "2.0.0-SNAPSHOT";
-  static final String ADL_EVENTS_TRACKING_SOURCE = 
"adl.events.tracking.source";
   static final String ADL_EVENTS_TRACKING_CLUSTERNAME =
   "adl.events.tracking.clustername";
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6331. Fix flakiness in TestFairScheduler#testDumpState. (Yufei Gu via rchiang)

2017-03-14 Thread rchiang
Repository: hadoop
Updated Branches:
  refs/heads/trunk fa67a96d7 -> 4c66a8d19


YARN-6331. Fix flakiness in TestFairScheduler#testDumpState. (Yufei Gu via 
rchiang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4c66a8d1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4c66a8d1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4c66a8d1

Branch: refs/heads/trunk
Commit: 4c66a8d19b7d503095ad27aeed39d62238b9cb47
Parents: fa67a96
Author: Ray Chiang 
Authored: Tue Mar 14 14:37:18 2017 -0700
Committer: Ray Chiang 
Committed: Tue Mar 14 15:09:47 2017 -0700

--
 .../server/resourcemanager/scheduler/fair/TestFairScheduler.java  | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c66a8d1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
index 537d3d0..baf7434 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
@@ -5201,6 +5201,7 @@ public class TestFairScheduler extends 
FairSchedulerTestBase {
 FSParentQueue parent =
 scheduler.getQueueManager().getParentQueue("parent", false);
 parent.setMaxShare(resource);
+parent.updateDemand();
 
 String parentQueueString = "{Name: root.parent,"
 + " Weight: ,"
@@ -5210,7 +5211,7 @@ public class TestFairScheduler extends 
FairSchedulerTestBase {
 + " MaxShare: ,"
 + " MinShare: ,"
 + " ResourceUsage: ,"
-+ " Demand: ,"
++ " Demand: ,"
 + " MaxAMShare: 0.5,"
 + " Runnable: 0}";
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14170. FileSystemContractBaseTest is not cleaning up test directory clearly. Contributed by Mingliang Liu

2017-03-14 Thread liuml07
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 fa59f4e49 -> ed0d426a8


HADOOP-14170. FileSystemContractBaseTest is not cleaning up test directory 
clearly. Contributed by Mingliang Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ed0d426a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ed0d426a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ed0d426a

Branch: refs/heads/branch-2
Commit: ed0d426a88b23965e4188188258a909aa866f012
Parents: fa59f4e
Author: Mingliang Liu 
Authored: Mon Mar 13 15:19:06 2017 -0700
Committer: Mingliang Liu 
Committed: Tue Mar 14 15:00:59 2017 -0700

--
 .../hadoop/fs/FileSystemContractBaseTest.java   | 209 ---
 .../fs/s3a/ITestS3AFileSystemContract.java  |  39 +---
 2 files changed, 141 insertions(+), 107 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed0d426a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
index 757370e5..4159582 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
@@ -24,8 +24,9 @@ import java.util.ArrayList;
 
 import junit.framework.TestCase;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
 
@@ -43,8 +44,8 @@ import org.apache.hadoop.fs.permission.FsPermission;
  * 
  */
 public abstract class FileSystemContractBaseTest extends TestCase {
-  private static final Log LOG =
-LogFactory.getLog(FileSystemContractBaseTest.class);
+  private static final Logger LOG =
+  LoggerFactory.getLogger(FileSystemContractBaseTest.class);
 
   protected final static String TEST_UMASK = "062";
   protected FileSystem fs;
@@ -52,9 +53,46 @@ public abstract class FileSystemContractBaseTest extends 
TestCase {
 
   @Override
   protected void tearDown() throws Exception {
-fs.delete(path("/test"), true);
+if (fs != null) {
+  // some cases use this absolute path
+  if (rootDirTestEnabled()) {
+cleanupDir(path("/FileSystemContractBaseTest"));
+  }
+  // others use this relative path against test base directory
+  cleanupDir(getTestBaseDir());
+}
+super.tearDown();
   }
-  
+
+  private void cleanupDir(Path p) {
+try {
+  LOG.info("Deleting " + p);
+  fs.delete(p, true);
+} catch (IOException e) {
+  LOG.error("Error deleting test dir: " + p, e);
+}
+  }
+
+  /**
+   * Test base directory for resolving relative test paths.
+   *
+   * The default value is /user/$USER/FileSystemContractBaseTest. Subclass may
+   * set specific test base directory.
+   */
+  protected Path getTestBaseDir() {
+return new Path(fs.getWorkingDirectory(), "FileSystemContractBaseTest");
+  }
+
+  /**
+   * For absolute path return the fully qualified path while for relative path
+   * return the fully qualified path against {@link #getTestBaseDir()}.
+   */
+  protected final Path path(String pathString) {
+Path p = new Path(pathString).makeQualified(fs.getUri(), getTestBaseDir());
+LOG.info("Resolving {} -> {}", pathString, p);
+return p;
+  }
+
   protected int getBlockSize() {
 return 1024;
   }
@@ -67,6 +105,17 @@ public abstract class FileSystemContractBaseTest extends 
TestCase {
 return true;
   }
 
+  /**
+   * Override this if the filesystem does not enable testing root directories.
+   *
+   * If this returns true, the test will create and delete test directories and
+   * files under root directory, which may have side effects, e.g. fail tests
+   * with PermissionDenied exceptions.
+   */
+  protected boolean rootDirTestEnabled() {
+return true;
+  }
+
   public void testFsStatus() throws Exception {
 FsStatus fsStatus = fs.getStatus();
 assertNotNull(fsStatus);
@@ -81,24 +130,24 @@ public abstract class FileSystemContractBaseTest extends 
TestCase {
 Path workDir = path(getDefaultWorkingDirectory());
 assertEquals(workDir, fs.getWorkingDirectory());
 
-fs.setWorkingDirectory(path("."));
+fs.setWorkingDirectory(fs.makeQualified(new Path(".")));
 assertEquals(workDir, fs.getWorkingDirectory());
 
-

hadoop git commit: HADOOP-14170. FileSystemContractBaseTest is not cleaning up test directory clearly. Contributed by Mingliang Liu

2017-03-14 Thread liuml07
Repository: hadoop
Updated Branches:
  refs/heads/trunk e6cda5819 -> fa67a96d7


HADOOP-14170. FileSystemContractBaseTest is not cleaning up test directory 
clearly. Contributed by Mingliang Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fa67a96d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fa67a96d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fa67a96d

Branch: refs/heads/trunk
Commit: fa67a96d7b0812a6557e40a6ef1eb16f19823e73
Parents: e6cda58
Author: Mingliang Liu 
Authored: Fri Mar 10 18:44:27 2017 -0800
Committer: Mingliang Liu 
Committed: Tue Mar 14 14:38:21 2017 -0700

--
 .../hadoop/fs/FileSystemContractBaseTest.java   | 246 +++
 .../fs/TestRawLocalFileSystemContract.java  |  24 +-
 .../fs/s3a/ITestS3AFileSystemContract.java  |  39 +--
 3 files changed, 176 insertions(+), 133 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa67a96d/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
index 6247959..040e9c8 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
@@ -24,8 +24,9 @@ import java.util.ArrayList;
 
 import junit.framework.TestCase;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.security.AccessControlException;
@@ -45,8 +46,8 @@ import org.apache.hadoop.util.StringUtils;
  * 
  */
 public abstract class FileSystemContractBaseTest extends TestCase {
-  private static final Log LOG =
-LogFactory.getLog(FileSystemContractBaseTest.class);
+  private static final Logger LOG =
+  LoggerFactory.getLogger(FileSystemContractBaseTest.class);
 
   protected final static String TEST_UMASK = "062";
   protected FileSystem fs;
@@ -54,15 +55,46 @@ public abstract class FileSystemContractBaseTest extends 
TestCase {
 
   @Override
   protected void tearDown() throws Exception {
-try {
-  if (fs != null) {
-fs.delete(path("/test"), true);
+if (fs != null) {
+  // some cases use this absolute path
+  if (rootDirTestEnabled()) {
+cleanupDir(path("/FileSystemContractBaseTest"));
   }
+  // others use this relative path against test base directory
+  cleanupDir(getTestBaseDir());
+}
+super.tearDown();
+  }
+
+  private void cleanupDir(Path p) {
+try {
+  LOG.info("Deleting " + p);
+  fs.delete(p, true);
 } catch (IOException e) {
-  LOG.error("Error deleting /test: " + e, e);
+  LOG.error("Error deleting test dir: " + p, e);
 }
   }
-  
+
+  /**
+   * Test base directory for resolving relative test paths.
+   *
+   * The default value is /user/$USER/FileSystemContractBaseTest. Subclass may
+   * set specific test base directory.
+   */
+  protected Path getTestBaseDir() {
+return new Path(fs.getWorkingDirectory(), "FileSystemContractBaseTest");
+  }
+
+  /**
+   * For absolute path return the fully qualified path while for relative path
+   * return the fully qualified path against {@link #getTestBaseDir()}.
+   */
+  protected final Path path(String pathString) {
+Path p = new Path(pathString).makeQualified(fs.getUri(), getTestBaseDir());
+LOG.info("Resolving {} -> {}", pathString, p);
+return p;
+  }
+
   protected int getBlockSize() {
 return 1024;
   }
@@ -81,6 +113,17 @@ public abstract class FileSystemContractBaseTest extends 
TestCase {
   }
 
   /**
+   * Override this if the filesystem does not enable testing root directories.
+   *
+   * If this returns true, the test will create and delete test directories and
+   * files under root directory, which may have side effects, e.g. fail tests
+   * with PermissionDenied exceptions.
+   */
+  protected boolean rootDirTestEnabled() {
+return true;
+  }
+
+  /**
* Override this if the filesystem is not case sensitive
* @return true if the case detection/preservation tests should run
*/
@@ -102,24 +145,24 @@ public abstract class FileSystemContractBaseTest extends 
TestCase {
 Path workDir = path(getDefaultWorkingDirectory());
 assertEquals(workDir, fs.getWorkingDirectory());
 

hadoop git commit: YARN-6042. Dump scheduler and queue state information into FairScheduler DEBUG log. (Yufei Gu via rchiang)

2017-03-14 Thread rchiang
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 0e7879052 -> fa59f4e49


YARN-6042. Dump scheduler and queue state information into FairScheduler DEBUG 
log. (Yufei Gu via rchiang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fa59f4e4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fa59f4e4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fa59f4e4

Branch: refs/heads/branch-2
Commit: fa59f4e4907d2c37841d59656d79b3162e774310
Parents: 0e78790
Author: Ray Chiang 
Authored: Tue Mar 14 14:45:13 2017 -0700
Committer: Ray Chiang 
Committed: Tue Mar 14 14:45:13 2017 -0700

--
 .../src/main/conf/log4j.properties  |  9 +++
 .../scheduler/fair/FSAppAttempt.java| 51 +++---
 .../scheduler/fair/FSLeafQueue.java | 21 ++
 .../scheduler/fair/FSParentQueue.java   | 21 ++
 .../resourcemanager/scheduler/fair/FSQueue.java | 41 ++-
 .../scheduler/fair/FairScheduler.java   | 28 +---
 .../scheduler/fair/TestFairScheduler.java   | 72 
 7 files changed, 209 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa59f4e4/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
--
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties 
b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
index 95afc61..7c02b20 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
+++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
@@ -321,3 +321,12 @@ 
log4j.appender.EWMA=org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender
 log4j.appender.EWMA.cleanupInterval=${yarn.ewma.cleanupInterval}
 log4j.appender.EWMA.messageAgeLimitSeconds=${yarn.ewma.messageAgeLimitSeconds}
 log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages}
+
+# Fair scheduler requests log on state dump
+log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler.statedump=DEBUG,FSLOGGER
+log4j.appender.FSLOGGER=org.apache.log4j.RollingFileAppender
+log4j.appender.FSLOGGER.File=${hadoop.log.dir}/fairscheduler-statedump.log
+log4j.appender.FSLOGGER.layout=org.apache.log4j.PatternLayout
+log4j.appender.FSLOGGER.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.FSLOGGER.MaxFileSize=${hadoop.log.maxfilesize}
+log4j.appender.FSLOGGER.MaxBackupIndex=${hadoop.log.maxbackupindex}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa59f4e4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index 60902a2..2e08343 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -834,24 +834,26 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
   return capability;
 }
 
+if (LOG.isDebugEnabled()) {
+  LOG.debug("Resource request: " + capability + " exceeds the available"
+  + " resources of the node.");
+}
+
 // The desired container won't fit here, so reserve
 if (isReservable(capability) &&
 reserve(request, node, reservedContainer, type, schedulerKey)) {
-  if (isWaitingForAMContainer()) {
-updateAMDiagnosticMsg(capability,
-" exceed the available resources of the node and the request is"
-+ " reserved");
+  updateAMDiagnosticMsg(capability, " exceeds the available resources of "
+  + "the node and the request is reserved)");
+  if (LOG.isDebugEnabled()) {
+LOG.debug(getName() + "'s resource request is reserved.");
   }
   return FairScheduler.CONTAINER_RESERVED;
 } else {
-  if (isWaitingForAMContainer()) {
-updateAMDiagnosticMsg(capability,
-" exceed the available resources of the node and the 

hadoop git commit: Revert "HADOOP-14170. FileSystemContractBaseTest is not cleaning up test directory clearly. Contributed by Mingliang Liu"

2017-03-14 Thread liuml07
Repository: hadoop
Updated Branches:
  refs/heads/trunk 871dc420f -> e6cda5819


Revert "HADOOP-14170. FileSystemContractBaseTest is not cleaning up test 
directory clearly. Contributed by Mingliang Liu"

This reverts commit b8c69557b7a23ff9c4c0b2c9d595338a08b873f1.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e6cda581
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e6cda581
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e6cda581

Branch: refs/heads/trunk
Commit: e6cda5819b1f4bbdcb12487260b1e3b787e11879
Parents: 871dc42
Author: Mingliang Liu 
Authored: Tue Mar 14 12:54:46 2017 -0700
Committer: Mingliang Liu 
Committed: Tue Mar 14 13:03:42 2017 -0700

--
 .../hadoop/fs/FileSystemContractBaseTest.java   | 247 +++
 .../fs/TestRawLocalFileSystemContract.java  |  24 +-
 .../fs/s3a/ITestS3AFileSystemContract.java  |  39 ++-
 3 files changed, 132 insertions(+), 178 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6cda581/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
index 78ba1f9..6247959 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
@@ -24,9 +24,8 @@ import java.util.ArrayList;
 
 import junit.framework.TestCase;
 
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.security.AccessControlException;
@@ -46,8 +45,8 @@ import org.apache.hadoop.util.StringUtils;
  * 
  */
 public abstract class FileSystemContractBaseTest extends TestCase {
-  private static final Logger LOG =
-  LoggerFactory.getLogger(FileSystemContractBaseTest.class);
+  private static final Log LOG =
+LogFactory.getLog(FileSystemContractBaseTest.class);
 
   protected final static String TEST_UMASK = "062";
   protected FileSystem fs;
@@ -55,46 +54,15 @@ public abstract class FileSystemContractBaseTest extends 
TestCase {
 
   @Override
   protected void tearDown() throws Exception {
-if (fs != null) {
-  // some cases use this absolute path
-  if (rootDirTestEnabled()) {
-cleanupDir(path("/FileSystemContractBaseTest"));
-  }
-  // others use this relative path against test base directory
-  cleanupDir(getTestBaseDir());
-}
-super.tearDown();
-  }
-
-  private void cleanupDir(Path p) {
 try {
-  LOG.info("Deleting " + p);
-  fs.delete(p, true);
+  if (fs != null) {
+fs.delete(path("/test"), true);
+  }
 } catch (IOException e) {
-  LOG.error("Error deleting test dir: " + p, e);
+  LOG.error("Error deleting /test: " + e, e);
 }
   }
-
-  /**
-   * Test base directory for resolving relative test paths.
-   *
-   * The default value is /user/$USER/FileSystemContractBaseTest. Subclass may
-   * set specific test base directory.
-   */
-  protected Path getTestBaseDir() {
-return new Path(fs.getWorkingDirectory(), "FileSystemContractBaseTest");
-  }
-
-  /**
-   * For absolute path return the fully qualified path while for relative path
-   * return the fully qualified path against {@link #getTestBaseDir()}.
-   */
-  protected final Path path(String pathString) {
-Path p = new Path(pathString).makeQualified(fs.getUri(), getTestBaseDir());
-LOG.info("Resolving {} -> {}", pathString, p);
-return p;
-  }
-
+  
   protected int getBlockSize() {
 return 1024;
   }
@@ -113,17 +81,6 @@ public abstract class FileSystemContractBaseTest extends 
TestCase {
   }
 
   /**
-   * Override this if the filesystem does not enable testing root directories.
-   *
-   * If this returns true, the test will create and delete test directories and
-   * files under root directory, which may have side effects, e.g. fail tests
-   * with PermissionDenied exceptions.
-   */
-  protected boolean rootDirTestEnabled() {
-return true;
-  }
-
-  /**
* Override this if the filesystem is not case sensitive
* @return true if the case detection/preservation tests should run
*/
@@ -145,24 +102,24 @@ public abstract class FileSystemContractBaseTest extends 
TestCase {
 Path workDir = 

hadoop git commit: YARN-6313. YARN logs cli should provide logs for a completed container even when application is still running. Contributed by Xuan Gong.

2017-03-14 Thread junping_du
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 f254002f1 -> 0e7879052


YARN-6313. YARN logs cli should provide logs for a completed container even 
when application is still running. Contributed by Xuan Gong.

(cherry picked from commit b88f5e0f7858d1d89b79dfd325b767c34416052d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0e787905
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0e787905
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0e787905

Branch: refs/heads/branch-2
Commit: 0e7879052ac76d5efd9e716a521b3bba6319010b
Parents: f254002
Author: Junping Du 
Authored: Tue Mar 14 12:56:54 2017 -0700
Committer: Junping Du 
Committed: Tue Mar 14 12:58:41 2017 -0700

--
 .../apache/hadoop/yarn/client/cli/LogsCLI.java  | 172 +--
 .../hadoop/yarn/client/cli/TestLogsCLI.java |  31 
 .../yarn/logaggregation/LogCLIHelpers.java  |  11 +-
 3 files changed, 160 insertions(+), 54 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e787905/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
index e04143f..de0b64b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
@@ -44,6 +44,7 @@ import org.apache.commons.cli.Options;
 import org.apache.commons.cli.ParseException;
 import org.apache.commons.io.IOUtils;
 import org.apache.commons.lang.StringUtils;
+import org.apache.commons.math3.util.Pair;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
@@ -65,6 +66,7 @@ import 
org.apache.hadoop.yarn.logaggregation.ContainerLogsRequest;
 import org.apache.hadoop.yarn.logaggregation.LogCLIHelpers;
 import org.apache.hadoop.yarn.logaggregation.PerContainerLogFileInfo;
 import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
+import org.apache.hadoop.yarn.webapp.util.YarnWebServiceUtils;
 import org.codehaus.jettison.json.JSONArray;
 import org.codehaus.jettison.json.JSONException;
 import org.codehaus.jettison.json.JSONObject;
@@ -409,10 +411,11 @@ public class LogsCLI extends Configured implements Tool {
 return false;
   }
 
-  private List getContainerLogFiles(
+  private List> getContainerLogFiles(
   Configuration conf, String containerIdStr, String nodeHttpAddress)
   throws IOException {
-List logFileInfos = new ArrayList<>();
+List> logFileInfos
+= new ArrayList<>();
 Client webServiceClient = Client.create();
 try {
   WebResource webResource = webServiceClient
@@ -439,16 +442,20 @@ public class LogsCLI extends Configured implements Tool {
   }
   for (int i = 0; i < array.length(); i++) {
 JSONObject log = array.getJSONObject(i);
+String aggregateType = log.has("logAggregationType") ?
+log.getString("logAggregationType") : "N/A";
 Object ob = log.get("containerLogInfo");
 if (ob instanceof JSONArray) {
   JSONArray obArray = (JSONArray)ob;
   for (int j = 0; j < obArray.length(); j++) {
-logFileInfos.add(generatePerContainerLogFileInfoFromJSON(
-obArray.getJSONObject(j)));
+logFileInfos.add(new Pair(
+generatePerContainerLogFileInfoFromJSON(
+obArray.getJSONObject(j)), aggregateType));
   }
 } else if (ob instanceof JSONObject) {
-  logFileInfos.add(generatePerContainerLogFileInfoFromJSON(
-  (JSONObject)ob));
+  logFileInfos.add(new Pair(
+  generatePerContainerLogFileInfoFromJSON(
+  (JSONObject)ob), aggregateType));
 }
   }
 } catch (Exception e) {
@@ -543,10 +550,8 @@ public class LogsCLI extends Configured implements Tool {
   IOUtils.closeQuietly(is);
 }
   }
-  // for the case, we have already uploaded partial logs in HDFS
-  int result = 

hadoop git commit: YARN-6313. YARN logs cli should provide logs for a completed container even when application is still running. Contributed by Xuan Gong.

2017-03-14 Thread junping_du
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0a3aa40fe -> 871dc420f


YARN-6313. YARN logs cli should provide logs for a completed container even 
when application is still running. Contributed by Xuan Gong.

(cherry picked from commit b88f5e0f7858d1d89b79dfd325b767c34416052d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/871dc420
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/871dc420
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/871dc420

Branch: refs/heads/trunk
Commit: 871dc420f8a4f151189c0925e062c64859a8f275
Parents: 0a3aa40
Author: Junping Du 
Authored: Tue Mar 14 12:56:54 2017 -0700
Committer: Junping Du 
Committed: Tue Mar 14 12:58:12 2017 -0700

--
 .../apache/hadoop/yarn/client/cli/LogsCLI.java  | 172 +--
 .../hadoop/yarn/client/cli/TestLogsCLI.java |  31 
 .../yarn/logaggregation/LogCLIHelpers.java  |  11 +-
 3 files changed, 160 insertions(+), 54 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/871dc420/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
index 3cb1c7d..8407b19 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
@@ -44,6 +44,7 @@ import org.apache.commons.cli.Options;
 import org.apache.commons.cli.ParseException;
 import org.apache.commons.io.IOUtils;
 import org.apache.commons.lang.StringUtils;
+import org.apache.commons.math3.util.Pair;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
@@ -65,6 +66,7 @@ import 
org.apache.hadoop.yarn.logaggregation.ContainerLogsRequest;
 import org.apache.hadoop.yarn.logaggregation.LogCLIHelpers;
 import org.apache.hadoop.yarn.logaggregation.PerContainerLogFileInfo;
 import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
+import org.apache.hadoop.yarn.webapp.util.YarnWebServiceUtils;
 import org.codehaus.jettison.json.JSONArray;
 import org.codehaus.jettison.json.JSONException;
 import org.codehaus.jettison.json.JSONObject;
@@ -409,10 +411,11 @@ public class LogsCLI extends Configured implements Tool {
 return false;
   }
 
-  private List getContainerLogFiles(
+  private List> getContainerLogFiles(
   Configuration conf, String containerIdStr, String nodeHttpAddress)
   throws IOException {
-List logFileInfos = new ArrayList<>();
+List> logFileInfos
+= new ArrayList<>();
 Client webServiceClient = Client.create();
 try {
   WebResource webResource = webServiceClient
@@ -438,16 +441,20 @@ public class LogsCLI extends Configured implements Tool {
   }
   for (int i = 0; i < array.length(); i++) {
 JSONObject log = array.getJSONObject(i);
+String aggregateType = log.has("logAggregationType") ?
+log.getString("logAggregationType") : "N/A";
 Object ob = log.get("containerLogInfo");
 if (ob instanceof JSONArray) {
   JSONArray obArray = (JSONArray)ob;
   for (int j = 0; j < obArray.length(); j++) {
-logFileInfos.add(generatePerContainerLogFileInfoFromJSON(
-obArray.getJSONObject(j)));
+logFileInfos.add(new Pair(
+generatePerContainerLogFileInfoFromJSON(
+obArray.getJSONObject(j)), aggregateType));
   }
 } else if (ob instanceof JSONObject) {
-  logFileInfos.add(generatePerContainerLogFileInfoFromJSON(
-  (JSONObject)ob));
+  logFileInfos.add(new Pair(
+  generatePerContainerLogFileInfoFromJSON(
+  (JSONObject)ob), aggregateType));
 }
   }
 } catch (Exception e) {
@@ -542,10 +549,8 @@ public class LogsCLI extends Configured implements Tool {
   IOUtils.closeQuietly(is);
 }
   }
-  // for the case, we have already uploaded partial logs in HDFS
-  int result = 

hadoop git commit: YARN-6327. Removing queues from CapacitySchedulerQueueManager and ParentQueue should be done with iterator. Contributed by Jonathan Hung.

2017-03-14 Thread naganarasimha_gr
Repository: hadoop
Updated Branches:
  refs/heads/trunk 7515e7510 -> 0a3aa40fe


YARN-6327. Removing queues from CapacitySchedulerQueueManager and ParentQueue 
should be done with iterator. Contributed by Jonathan Hung.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0a3aa40f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0a3aa40f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0a3aa40f

Branch: refs/heads/trunk
Commit: 0a3aa40fe7878c939dbf4e6b43466595159ff930
Parents: 7515e75
Author: Naganarasimha 
Authored: Wed Mar 15 01:22:25 2017 +0530
Committer: Naganarasimha 
Committed: Wed Mar 15 01:22:25 2017 +0530

--
 .../scheduler/capacity/CapacitySchedulerQueueManager.java | 7 +--
 .../resourcemanager/scheduler/capacity/ParentQueue.java   | 6 --
 2 files changed, 9 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a3aa40f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
index 8cae6c3..76cb5d6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
@@ -22,6 +22,7 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Comparator;
 import java.util.HashMap;
+import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -312,10 +313,12 @@ public class CapacitySchedulerQueueManager implements 
SchedulerQueueManager<
 existingQueues.put(queueName, queue);
   }
 }
-for (Map.Entry e : existingQueues.entrySet()) {
+for (Iterator> itr = existingQueues.entrySet()
+.iterator(); itr.hasNext();) {
+  Map.Entry e = itr.next();
   String queueName = e.getKey();
   if (!newQueues.containsKey(queueName)) {
-existingQueues.remove(queueName);
+itr.remove();
   }
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a3aa40f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
index 6f82fcc..f84b7a4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
@@ -333,10 +333,12 @@ public class ParentQueue extends AbstractCSQueue {
   }
 
   // remove the deleted queue in the refreshed xml.
-  for (Map.Entry e : currentChildQueues.entrySet()) {
+  for (Iterator> itr = currentChildQueues
+  .entrySet().iterator(); itr.hasNext();) {
+Map.Entry e = itr.next();
 String queueName = e.getKey();
 if (!newChildQueues.containsKey(queueName)) {
-  currentChildQueues.remove(queueName);
+  itr.remove();
 }
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: 

[1/2] hadoop git commit: HDFS-11469. Ozone: SCM: Container allocation based on node report. Contributed by Xiaoyu Yao.

2017-03-14 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 8eca9824c -> 39058dd60


http://git-wip-us.apache.org/repos/asf/hadoop/blob/39058dd6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
index df9e632..f5f1de4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
@@ -53,178 +53,201 @@ public class TestOzoneContainer {
 path += conf.getTrimmed(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT,
 OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT_DEFAULT);
 conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path);
-
-MiniOzoneCluster cluster = new MiniOzoneCluster.Builder(conf)
-.setHandlerType("distributed").build();
-
-// We don't start Ozone Container via data node, we will do it
-// independently in our test path.
-Pipeline pipeline = ContainerTestHelper.createSingleNodePipeline(
-containerName);
-conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
-pipeline.getLeader().getContainerPort());
-OzoneContainer container = new OzoneContainer(conf);
-container.start();
-
-XceiverClient client = new XceiverClient(pipeline, conf);
-client.connect();
-ContainerProtos.ContainerCommandRequestProto request =
-ContainerTestHelper.getCreateContainerRequest(containerName);
-ContainerProtos.ContainerCommandResponseProto response =
-client.sendCommand(request);
-Assert.assertNotNull(response);
-Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
-container.stop();
-cluster.shutdown();
-
+OzoneContainer container = null;
+MiniOzoneCluster cluster = null;
+try {
+  cluster =  new MiniOzoneCluster.Builder(conf)
+  .setHandlerType("distributed").build();
+  // We don't start Ozone Container via data node, we will do it
+  // independently in our test path.
+  Pipeline pipeline = ContainerTestHelper.createSingleNodePipeline(
+  containerName);
+  conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
+  pipeline.getLeader().getContainerPort());
+  container = new OzoneContainer(conf);
+  container.start();
+
+  XceiverClient client = new XceiverClient(pipeline, conf);
+  client.connect();
+  ContainerProtos.ContainerCommandRequestProto request =
+  ContainerTestHelper.getCreateContainerRequest(containerName);
+  ContainerProtos.ContainerCommandResponseProto response =
+  client.sendCommand(request);
+  Assert.assertNotNull(response);
+  Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
+} finally {
+  if (container != null) {
+container.stop();
+  }
+  if(cluster != null) {
+cluster.shutdown();
+  }
+}
   }
 
   @Test
   public void testOzoneContainerViaDataNode() throws Exception {
-String keyName = OzoneUtils.getRequestID();
-String containerName = OzoneUtils.getRequestID();
-OzoneConfiguration conf = new OzoneConfiguration();
-URL p = conf.getClass().getResource("");
-String path = p.getPath().concat(
-TestOzoneContainer.class.getSimpleName());
-path += conf.getTrimmed(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT,
-OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT_DEFAULT);
-conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path);
-
-// Start ozone container Via Datanode create.
-
-Pipeline pipeline =
-ContainerTestHelper.createSingleNodePipeline(containerName);
-conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
-pipeline.getLeader().getContainerPort());
-
-MiniOzoneCluster cluster = new MiniOzoneCluster.Builder(conf)
-.setHandlerType("distributed").build();
-
-// This client talks to ozone container via datanode.
-XceiverClient client = new XceiverClient(pipeline, conf);
-client.connect();
-
-// Create container
-ContainerProtos.ContainerCommandRequestProto request =
-ContainerTestHelper.getCreateContainerRequest(containerName);
-ContainerProtos.ContainerCommandResponseProto response =
-client.sendCommand(request);
-Assert.assertNotNull(response);
-Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
-
-// Write Chunk
-ContainerProtos.ContainerCommandRequestProto writeChunkRequest =
-ContainerTestHelper.getWriteChunkRequest(pipeline, containerName,
-keyName, 1024);
-
-response = 

[2/2] hadoop git commit: HDFS-11469. Ozone: SCM: Container allocation based on node report. Contributed by Xiaoyu Yao.

2017-03-14 Thread aengineer
HDFS-11469. Ozone: SCM: Container allocation based on node report. Contributed 
by Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/39058dd6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/39058dd6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/39058dd6

Branch: refs/heads/HDFS-7240
Commit: 39058dd6010a8dd925d0673840546ef730ad0bdf
Parents: 8eca982
Author: Anu Engineer 
Authored: Tue Mar 14 11:54:26 2017 -0700
Committer: Anu Engineer 
Committed: Tue Mar 14 11:54:26 2017 -0700

--
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  15 +
 .../org/apache/hadoop/scm/ScmConfigKeys.java|  16 +-
 .../scm/client/ContainerOperationClient.java|  32 ++
 .../org/apache/hadoop/scm/client/ScmClient.java |  37 ++
 .../StorageContainerLocationProtocol.java   |  13 +
 ...rLocationProtocolClientSideTranslatorPB.java |  23 +-
 .../StorageContainerLocationProtocol.proto  |   6 +
 .../hadoop/cblock/storage/StorageManager.java   |   3 +-
 .../apache/hadoop/ozone/OzoneClientUtils.java   |   6 +-
 .../org/apache/hadoop/ozone/OzoneConsts.java|   4 +
 .../container/common/helpers/ContainerData.java |   6 +-
 .../ozone/container/common/impl/Dispatcher.java |  13 +-
 .../statemachine/DatanodeStateMachine.java  |   1 +
 .../statemachine/EndpointStateMachine.java  |   6 +-
 .../transport/server/XceiverServerSpi.java  |   3 +-
 .../org/apache/hadoop/ozone/scm/SCMMXBean.java  |  10 +-
 .../ozone/scm/StorageContainerManager.java  |  24 +-
 .../ozone/scm/container/ContainerMapping.java   | 116 --
 .../scm/container/ContainerPlacementPolicy.java |  41 +++
 .../hadoop/ozone/scm/container/Mapping.java |  12 +
 .../SCMContainerPlacementCapacity.java  | 207 +++
 .../container/SCMContainerPlacementRandom.java  | 146 
 .../hadoop/ozone/scm/node/NodeManager.java  |  21 +-
 .../ozone/scm/node/NodeManagerMXBean.java   |  13 +-
 .../hadoop/ozone/scm/node/SCMNodeManager.java   |  24 +-
 .../web/localstorage/OzoneMetadataManager.java  |   2 +-
 .../hadoop/cblock/util/MockStorageClient.java   |  11 +-
 .../apache/hadoop/ozone/MiniOzoneCluster.java   |  20 +-
 .../hadoop/ozone/TestContainerOperations.java   |  11 +-
 .../ozone/container/ContainerTestHelper.java|   1 +
 .../common/TestDatanodeStateMachine.java| 211 +--
 .../ozone/container/common/TestEndPoint.java|  29 +-
 .../container/ozoneimpl/TestOzoneContainer.java | 351 ++-
 .../hadoop/ozone/scm/TestAllocateContainer.java |   6 +-
 .../ozone/scm/TestContainerSmallFile.java   |  16 +-
 .../ozone/scm/container/MockNodeManager.java|  24 +-
 .../ozone/scm/node/TestContainerPlacement.java  | 191 ++
 .../hadoop/ozone/scm/node/TestNodeManager.java  |  92 ++---
 .../hadoop/ozone/web/TestOzoneVolumes.java  |   2 +-
 .../hadoop/ozone/web/client/TestVolume.java |  14 +-
 40 files changed, 1362 insertions(+), 417 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/39058dd6/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
index b4fa926..946a5cb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
@@ -174,6 +174,8 @@ import 
org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.erasurecode.ECSchema;
+import 
org.apache.hadoop.ozone.protocol.proto.StorageContainerLocationProtocolProtos.ContainerRequestProto;
+import org.apache.hadoop.scm.client.ScmClient;
 import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.DataChecksum;
@@ -2375,6 +2377,19 @@ public class PBHelperClient {
 return result;
   }
 
+  public static ContainerRequestProto.ReplicationFactor
+  convertReplicationFactor(ScmClient.ReplicationFactor replicationFactor) {
+switch (replicationFactor) {
+case ONE:
+  return ContainerRequestProto.ReplicationFactor.ONE;
+case THREE:
+  return ContainerRequestProto.ReplicationFactor.THREE;
+default:
+  throw new IllegalArgumentException("Ozone only supports replicaiton" +
+  " factor 1 or 3");
+

hadoop git commit: HDFS-11505. Do not enable any erasure coding policies by default. Contributed by Manoj Govindassamy.

2017-03-14 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 34424e98a -> 7515e7510


HDFS-11505. Do not enable any erasure coding policies by default. Contributed 
by Manoj Govindassamy.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7515e751
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7515e751
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7515e751

Branch: refs/heads/trunk
Commit: 7515e75103c06ce7139b305dd04d4fb2e94b12ad
Parents: 34424e9
Author: Andrew Wang 
Authored: Tue Mar 14 11:47:25 2017 -0700
Committer: Andrew Wang 
Committed: Tue Mar 14 11:47:25 2017 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java   |  2 +-
 .../server/namenode/ErasureCodingPolicyManager.java  |  5 -
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml  |  5 +++--
 .../src/site/markdown/HDFSErasureCoding.md   | 13 +
 .../hadoop/hdfs/TestDecommissionWithStriped.java |  2 ++
 .../hdfs/TestErasureCodeBenchmarkThroughput.java |  2 ++
 .../hdfs/TestErasureCodingPolicyWithSnapshot.java|  2 ++
 .../org/apache/hadoop/hdfs/TestFileChecksum.java |  2 ++
 .../hadoop/hdfs/TestFileStatusWithECPolicy.java  |  5 -
 .../apache/hadoop/hdfs/TestLeaseRecoveryStriped.java |  2 ++
 .../hadoop/hdfs/TestReadStripedFileWithDecoding.java |  2 ++
 .../hdfs/TestReadStripedFileWithMissingBlocks.java   |  2 ++
 .../hadoop/hdfs/TestReconstructStripedFile.java  |  2 ++
 .../hadoop/hdfs/TestSafeModeWithStripedFile.java |  2 ++
 .../apache/hadoop/hdfs/TestWriteReadStripedFile.java |  2 ++
 .../hadoop/hdfs/server/balancer/TestBalancer.java|  2 ++
 .../TestBlockTokenWithDFSStriped.java|  4 
 ...estReconstructStripedBlocksWithRackAwareness.java |  6 ++
 .../blockmanagement/TestSequentialBlockGroupId.java  |  2 ++
 .../datanode/TestDataNodeErasureCodingMetrics.java   |  2 ++
 .../apache/hadoop/hdfs/server/mover/TestMover.java   |  2 ++
 .../namenode/TestAddOverReplicatedStripedBlocks.java |  4 +++-
 .../server/namenode/TestAddStripedBlockInFBR.java|  3 +++
 .../hdfs/server/namenode/TestAddStripedBlocks.java   | 10 ++
 .../hdfs/server/namenode/TestEnabledECPolicies.java  | 15 +--
 .../hdfs/server/namenode/TestFSEditLogLoader.java|  4 
 .../apache/hadoop/hdfs/server/namenode/TestFsck.java |  8 
 .../hdfs/server/namenode/TestNameNodeMXBean.java |  2 ++
 .../server/namenode/TestQuotaWithStripedBlocks.java  |  2 ++
 .../namenode/TestReconstructStripedBlocks.java   |  6 ++
 .../hdfs/server/namenode/TestStripedINodeFile.java   |  4 
 .../TestOfflineImageViewerWithStripedBlocks.java |  2 ++
 32 files changed, 112 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7515e751/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 3fc4980..06b33f9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -563,7 +563,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   "10m";
 
   public static final String  DFS_NAMENODE_EC_POLICIES_ENABLED_KEY = 
"dfs.namenode.ec.policies.enabled";
-  public static final String  DFS_NAMENODE_EC_POLICIES_ENABLED_DEFAULT = 
"RS-6-3-64k";
+  public static final String  DFS_NAMENODE_EC_POLICIES_ENABLED_DEFAULT = "";
   public static final String  
DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_THREADS_KEY = 
"dfs.datanode.ec.reconstruction.stripedread.threads";
   public static final int 
DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_THREADS_DEFAULT = 20;
   public static final String  
DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_BUFFER_SIZE_KEY = 
"dfs.datanode.ec.reconstruction.stripedread.buffer.size";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7515e751/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
index 29af207..c23b034 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
+++ 

hadoop git commit: YARN-6314. Potential infinite redirection on YARN log redirection web service. Contributed by Xuan Gong.

2017-03-14 Thread junping_du
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 f731013c8 -> f254002f1


YARN-6314. Potential infinite redirection on YARN log redirection web service. 
Contributed by Xuan Gong.

(cherry picked from commit 5a9dda796f0e73060ada794ad5752cc6a237ab2e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f254002f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f254002f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f254002f

Branch: refs/heads/branch-2
Commit: f254002f1d6145642e3ecc32c789e9e2533524d3
Parents: f731013
Author: Junping Du 
Authored: Tue Mar 14 02:56:18 2017 -0700
Committer: Junping Du 
Committed: Tue Mar 14 02:58:44 2017 -0700

--
 .../webapp/AHSWebServices.java  | 32 +++-
 .../webapp/TestAHSWebServices.java  | 17 +++
 .../server/webapp/YarnWebServiceParams.java |  1 +
 .../nodemanager/webapp/NMWebServices.java   |  6 +++-
 .../nodemanager/webapp/TestNMWebServices.java   |  6 
 5 files changed, 54 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f254002f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
index 13c1d5e..5c486ed 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
@@ -28,6 +28,7 @@ import java.util.Set;
 
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
+import javax.ws.rs.DefaultValue;
 import javax.ws.rs.GET;
 import javax.ws.rs.Path;
 import javax.ws.rs.PathParam;
@@ -226,6 +227,8 @@ public class AHSWebServices extends WebServices {
*The container ID
* @param nmId
*The Node Manager NodeId
+   * @param redirected_from_node
+   *Whether this is a redirected request from NM
* @return
*The log file's name and current file size
*/
@@ -236,7 +239,9 @@ public class AHSWebServices extends WebServices {
   @Context HttpServletRequest req,
   @Context HttpServletResponse res,
   @PathParam(YarnWebServiceParams.CONTAINER_ID) String containerIdStr,
-  @QueryParam(YarnWebServiceParams.NM_ID) String nmId) {
+  @QueryParam(YarnWebServiceParams.NM_ID) String nmId,
+  @QueryParam(YarnWebServiceParams.REDIRECTED_FROM_NODE)
+  @DefaultValue("false") boolean redirected_from_node) {
 ContainerId containerId = null;
 init(res);
 try {
@@ -244,6 +249,7 @@ public class AHSWebServices extends WebServices {
 } catch (IllegalArgumentException e) {
   throw new BadRequestException("invalid container id, " + containerIdStr);
 }
+
 ApplicationId appId = containerId.getApplicationAttemptId()
 .getApplicationId();
 AppInfo appInfo;
@@ -288,9 +294,12 @@ public class AHSWebServices extends WebServices {
 // make sure nodeHttpAddress is not null and not empty. Otherwise,
 // we would only get log meta for aggregated logs instead of
 // re-directing the request
-if (nodeHttpAddress == null || nodeHttpAddress.isEmpty()) {
+if (nodeHttpAddress == null || nodeHttpAddress.isEmpty()
+|| redirected_from_node) {
   // return log meta for the aggregated logs if exists.
   // It will also return empty log meta for the local logs.
+  // If this is the redirect request from NM, we should not
+  // re-direct the request back. Simply output the aggregated log meta.
   return getContainerLogMeta(appId, appOwner, null,
   containerIdStr, true);
 }
@@ -329,6 +338,8 @@ public class AHSWebServices extends WebServices {
*the size of the log file
* @param nmId
*The Node Manager NodeId
+   * @param redirected_from_node
+   *Whether this is the redirect request from NM
* @return
*The contents of the container's 

hadoop git commit: YARN-6314. Potential infinite redirection on YARN log redirection web service. Contributed by Xuan Gong.

2017-03-14 Thread junping_du
Repository: hadoop
Updated Branches:
  refs/heads/trunk 023b941e3 -> 34424e98a


YARN-6314. Potential infinite redirection on YARN log redirection web service. 
Contributed by Xuan Gong.

(cherry picked from commit 5a9dda796f0e73060ada794ad5752cc6a237ab2e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/34424e98
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/34424e98
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/34424e98

Branch: refs/heads/trunk
Commit: 34424e98a618a9fefce800746168be2b72e17de9
Parents: 023b941
Author: Junping Du 
Authored: Tue Mar 14 02:56:18 2017 -0700
Committer: Junping Du 
Committed: Tue Mar 14 02:58:07 2017 -0700

--
 .../webapp/AHSWebServices.java  | 32 +++-
 .../webapp/TestAHSWebServices.java  | 17 +++
 .../server/webapp/YarnWebServiceParams.java |  1 +
 .../nodemanager/webapp/NMWebServices.java   |  6 +++-
 .../nodemanager/webapp/TestNMWebServices.java   |  6 
 5 files changed, 54 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/34424e98/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
index c296aaa..6195199 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
@@ -28,6 +28,7 @@ import java.util.Set;
 
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
+import javax.ws.rs.DefaultValue;
 import javax.ws.rs.GET;
 import javax.ws.rs.Path;
 import javax.ws.rs.PathParam;
@@ -235,6 +236,8 @@ public class AHSWebServices extends WebServices {
*The container ID
* @param nmId
*The Node Manager NodeId
+   * @param redirected_from_node
+   *Whether this is a redirected request from NM
* @return
*The log file's name and current file size
*/
@@ -245,7 +248,9 @@ public class AHSWebServices extends WebServices {
   @Context HttpServletRequest req,
   @Context HttpServletResponse res,
   @PathParam(YarnWebServiceParams.CONTAINER_ID) String containerIdStr,
-  @QueryParam(YarnWebServiceParams.NM_ID) String nmId) {
+  @QueryParam(YarnWebServiceParams.NM_ID) String nmId,
+  @QueryParam(YarnWebServiceParams.REDIRECTED_FROM_NODE)
+  @DefaultValue("false") boolean redirected_from_node) {
 ContainerId containerId = null;
 init(res);
 try {
@@ -253,6 +258,7 @@ public class AHSWebServices extends WebServices {
 } catch (IllegalArgumentException e) {
   throw new BadRequestException("invalid container id, " + containerIdStr);
 }
+
 ApplicationId appId = containerId.getApplicationAttemptId()
 .getApplicationId();
 AppInfo appInfo;
@@ -297,9 +303,12 @@ public class AHSWebServices extends WebServices {
 // make sure nodeHttpAddress is not null and not empty. Otherwise,
 // we would only get log meta for aggregated logs instead of
 // re-directing the request
-if (nodeHttpAddress == null || nodeHttpAddress.isEmpty()) {
+if (nodeHttpAddress == null || nodeHttpAddress.isEmpty()
+|| redirected_from_node) {
   // return log meta for the aggregated logs if exists.
   // It will also return empty log meta for the local logs.
+  // If this is the redirect request from NM, we should not
+  // re-direct the request back. Simply output the aggregated log meta.
   return getContainerLogMeta(appId, appOwner, null,
   containerIdStr, true);
 }
@@ -338,6 +347,8 @@ public class AHSWebServices extends WebServices {
*the size of the log file
* @param nmId
*The Node Manager NodeId
+   * @param redirected_from_node
+   *Whether this is the redirect request from NM
* @return
*The contents of the container's log 

hadoop git commit: HDFS-11526. Fix confusing block recovery message. Contributed by Yiqun Lin.

2017-03-14 Thread yqlin
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 e03d8ff48 -> f731013c8


HDFS-11526. Fix confusing block recovery message. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f731013c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f731013c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f731013c

Branch: refs/heads/branch-2
Commit: f731013c8836c81198d72dd11ed5a100301f6019
Parents: e03d8ff
Author: Yiqun Lin 
Authored: Tue Mar 14 17:52:31 2017 +0800
Committer: Yiqun Lin 
Committed: Tue Mar 14 17:52:31 2017 +0800

--
 .../apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f731013c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
index e91d8b1..d04aff6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
@@ -152,9 +152,8 @@ public class BlockRecoveryWorker {
   return;
 } catch (IOException e) {
   ++errorCount;
-  InterDatanodeProtocol.LOG.warn(
-  "Failed to obtain replica info for block (=" + block
-  + ") from datanode (=" + id + ")", e);
+  InterDatanodeProtocol.LOG.warn("Failed to recover block (block="
+  + block + ", datanode=" + id + ")", e);
 }
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11526. Fix confusing block recovery message. Contributed by Yiqun Lin.

2017-03-14 Thread yqlin
Repository: hadoop
Updated Branches:
  refs/heads/trunk e5f2eedcb -> 023b941e3


HDFS-11526. Fix confusing block recovery message. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/023b941e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/023b941e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/023b941e

Branch: refs/heads/trunk
Commit: 023b941e3b83f32bc785240dbb1bfce11a987941
Parents: e5f2eed
Author: Yiqun Lin 
Authored: Tue Mar 14 17:49:48 2017 +0800
Committer: Yiqun Lin 
Committed: Tue Mar 14 17:49:48 2017 +0800

--
 .../hadoop/hdfs/server/datanode/BlockRecoveryWorker.java  | 10 --
 1 file changed, 4 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/023b941e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
index d39d050..792b6af 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
@@ -167,9 +167,8 @@ public class BlockRecoveryWorker {
   return;
 } catch (IOException e) {
   ++errorCount;
-  InterDatanodeProtocol.LOG.warn(
-  "Failed to obtain replica info for block (=" + block
-  + ") from datanode (=" + id + ")", e);
+  InterDatanodeProtocol.LOG.warn("Failed to recover block (block="
+  + block + ", datanode=" + id + ")", e);
 }
   }
 
@@ -429,9 +428,8 @@ public class BlockRecoveryWorker {
   + rBlock.getNewGenerationStamp() + " is aborted.", ripE);
   return;
 } catch (IOException e) {
-  InterDatanodeProtocol.LOG.warn(
-  "Failed to obtain replica info for block (=" + block
-  + ") from datanode (=" + id + ")", e);
+  InterDatanodeProtocol.LOG.warn("Failed to recover block (block="
+  + block + ", datanode=" + id + ")", e);
 }
   }
   checkLocations(syncBlocks.size());


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11336: [SPS]: Remove xAttrs when movements done or SPS disabled. Contributed by Yuanbo Liu.

2017-03-14 Thread umamahesh
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-10285 58240d86a -> ff9ccfe4f


HDFS-11336: [SPS]: Remove xAttrs when movements done or SPS disabled. 
Contributed by Yuanbo Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ff9ccfe4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ff9ccfe4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ff9ccfe4

Branch: refs/heads/HDFS-10285
Commit: ff9ccfe4f38f705011775fb45293c6c40cac48c0
Parents: 58240d8
Author: Uma Maheswara Rao G 
Authored: Tue Mar 14 00:52:24 2017 -0700
Committer: Uma Maheswara Rao G 
Committed: Tue Mar 14 00:52:24 2017 -0700

--
 .../BlockStorageMovementAttemptedItems.java |  14 ++-
 .../hdfs/server/namenode/FSDirAttrOp.java   |   8 ++
 .../hdfs/server/namenode/FSDirectory.java   |  16 +++
 .../server/namenode/StoragePolicySatisfier.java |  45 ++--
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |   2 +-
 .../TestBlockStorageMovementAttemptedItems.java |   6 +-
 .../TestPersistentStoragePolicySatisfier.java   | 112 ++-
 7 files changed, 186 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff9ccfe4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
index 042aca3..f15db73 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.apache.hadoop.util.Time.monotonicNow;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
@@ -54,6 +55,7 @@ public class BlockStorageMovementAttemptedItems {
   private final List 
storageMovementAttemptedResults;
   private volatile boolean monitorRunning = true;
   private Daemon timerThread = null;
+  private final StoragePolicySatisfier sps;
   //
   // It might take anywhere between 30 to 60 minutes before
   // a request is timed out.
@@ -69,7 +71,8 @@ public class BlockStorageMovementAttemptedItems {
 
   public BlockStorageMovementAttemptedItems(long recheckTimeout,
   long selfRetryTimeout,
-  BlockStorageMovementNeeded unsatisfiedStorageMovementFiles) {
+  BlockStorageMovementNeeded unsatisfiedStorageMovementFiles,
+  StoragePolicySatisfier sps) {
 if (recheckTimeout > 0) {
   this.minCheckTimeout = Math.min(minCheckTimeout, recheckTimeout);
 }
@@ -78,6 +81,7 @@ public class BlockStorageMovementAttemptedItems {
 this.blockStorageMovementNeeded = unsatisfiedStorageMovementFiles;
 storageMovementAttemptedItems = new HashMap<>();
 storageMovementAttemptedResults = new ArrayList<>();
+this.sps = sps;
   }
 
   /**
@@ -200,6 +204,9 @@ public class BlockStorageMovementAttemptedItems {
 } catch (InterruptedException ie) {
   LOG.info("BlocksStorageMovementAttemptResultMonitor thread "
   + "is interrupted.", ie);
+} catch (IOException ie) {
+  LOG.warn("BlocksStorageMovementAttemptResultMonitor thread "
+  + "received exception and exiting.", ie);
 }
   }
 }
@@ -248,7 +255,7 @@ public class BlockStorageMovementAttemptedItems {
   }
 
   @VisibleForTesting
-  void blockStorageMovementResultCheck() {
+  void blockStorageMovementResultCheck() throws IOException {
 synchronized (storageMovementAttemptedResults) {
   Iterator resultsIter =
   storageMovementAttemptedResults.iterator();
@@ -296,6 +303,9 @@ public class BlockStorageMovementAttemptedItems {
   + " reported from co-ordinating datanode. But the trackID "
   + "doesn't exists in storageMovementAttemptedItems list",
   storageMovementAttemptedResult.getTrackId());
+  // Remove xattr for the track id.
+  this.sps.notifyBlkStorageMovementFinished(
+  storageMovementAttemptedResult.getTrackId());
 }
   }
   // Remove trackID from the attempted list, if any.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff9ccfe4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java

[Hadoop Wiki] Update of "Books" by Packt Publishing

2017-03-14 Thread Apache Wiki
Dear Wiki user,

You have subscribed to a wiki page or wiki category on "Hadoop Wiki" for change 
notification.

The "Books" page has been changed by Packt Publishing:
https://wiki.apache.org/hadoop/Books?action=diff=40=41

Comment:
Added new book

  }}}
  
  
+ === Deep Learning with Hadoop ===
+ 
+ '''Name:'''  
[[https://www.packtpub.com/big-data-and-business-intelligence/deep-learning-hadoop|Deep
 Learning with Hadoop]]
+ 
+ '''Author:''' Dipayan Dev
+ 
+ '''Publisher:''' Packt
+ 
+ '''Date of Publishing:''' February 2017
+ 
+ Build, implement and scale distributed deep learning models for large-scale 
datasets.
+ 
  === Hadoop Blueprints ===
  
  '''Name:'''  
[[https://www.packtpub.com/big-data-and-business-intelligence/hadoop-blueprints|Hadoop
 Blueprints]]

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-5496. Make Node Heatmap Chart categories clickable in new YARN UI. Contributed by Gergely Novák.

2017-03-14 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/trunk 9832ae0ed -> e5f2eedcb


YARN-5496. Make Node Heatmap Chart categories clickable in new YARN UI. 
Contributed by Gergely Novák.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e5f2eedc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e5f2eedc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e5f2eedc

Branch: refs/heads/trunk
Commit: e5f2eedcbfcbfe8fa6fdb6a57b1250f80b12c32f
Parents: 9832ae0
Author: Sunil G 
Authored: Tue Mar 14 11:47:11 2017 +0530
Committer: Sunil G 
Committed: Tue Mar 14 11:47:11 2017 +0530

--
 .../app/components/base-chart-component.js  |   4 +
 .../main/webapp/app/components/nodes-heatmap.js | 106 ++-
 .../src/main/webapp/app/styles/app.css  |  12 +++
 3 files changed, 93 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5f2eedc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/base-chart-component.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/base-chart-component.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/base-chart-component.js
index d11a532..aa41893 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/base-chart-component.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/base-chart-component.js
@@ -141,4 +141,8 @@ export default Ember.Component.extend({
 };
 return layout;
   },
+
+  willDestroy: function() {
+this.tooltip.remove();
+  }
 });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5f2eedc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/nodes-heatmap.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/nodes-heatmap.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/nodes-heatmap.js
index 5652834..ef6e46e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/nodes-heatmap.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/nodes-heatmap.js
@@ -26,17 +26,18 @@ export default BaseChartComponent.extend({
   CELL_MARGIN: 2,
   RACK_MARGIN: 20,
   filter: "",
+  selectedCategory: 0,
 
-  bindTP: function(element) {
+  bindTP: function(element, cell) {
 element.on("mouseover", function() {
   this.tooltip
 .style("left", (d3.event.pageX) + "px")
 .style("top", (d3.event.pageY - 28) + "px");
-  element.style("opacity", 1.0);
+  cell.style("opacity", 1.0);
 }.bind(this))
   .on("mousemove", function() {
 // Handle pie chart case
-var text = element.attr("tooltiptext");
+var text = cell.attr("tooltiptext");
 
 this.tooltip.style("opacity", 0.9);
 this.tooltip.html(text)
@@ -45,10 +46,45 @@ export default BaseChartComponent.extend({
   }.bind(this))
   .on("mouseout", function() {
 this.tooltip.style("opacity", 0);
-element.style("opacity", 0.8);
+cell.style("opacity", 0.8);
   }.bind(this));
   },
 
+  bindSelectCategory: function(element, i) {
+element.on("click", function() {
+  if (this.selectedCategory == i) {
+// Remove selection for second click
+this.selectedCategory = 0;
+  } else {
+this.selectedCategory = i;
+  }
+  this.didInsertElement();
+}.bind(this));
+  },
+
+  isNodeSelected: function(node) {
+if (this.filter) {
+  var rack = node.get("rack");
+  var host = node.get("nodeHostName");
+  if (!rack.includes(this.filter) && !host.includes(this.filter)) {
+return false;
+  }
+}
+
+if (this.selectedCategory === 0) {
+  return true;
+}
+
+var usage = node.get("usedMemoryMB") /
+  (node.get("usedMemoryMB") + node.get("availMemoryMB"))
+var lowerLimit = (this.selectedCategory - 1) * 0.2;
+var upperLimit = this.selectedCategory * 0.2;
+if (lowerLimit <= usage && usage <= upperLimit) {
+  return true;
+}
+return false;
+  },
+
   // data:
   //[{label=label1, value=value1}, ...]
   //...
@@ -84,20 +120,32 @@ export default BaseChartComponent.extend({
 for (i = 1; i <= 5; i++) {
   var ratio = i * 0.2 - 0.1;
 
-  g.append("rect")
+  var rect = g.append("rect")
 .attr("x", sampleXOffset)
 .attr("y", sampleYOffset)
-.attr("fill", colorFunc(ratio))
+.attr("fill", this.selectedCategory