hadoop git commit: HADOOP-13419. Fix javadoc warnings by JDK8 in hadoop-common package. Contributed by Kai Sasaki.

2016-08-15 Thread iwasakims
Repository: hadoop
Updated Branches:
  refs/heads/trunk 4d4d95fdd -> b8a446ba5


HADOOP-13419. Fix javadoc warnings by JDK8 in hadoop-common package. 
Contributed by Kai Sasaki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b8a446ba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b8a446ba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b8a446ba

Branch: refs/heads/trunk
Commit: b8a446ba57d89c0896ec2d56dd919b0101e69f44
Parents: 4d4d95f
Author: Masatake Iwasaki 
Authored: Tue Aug 16 13:30:40 2016 +0900
Committer: Masatake Iwasaki 
Committed: Tue Aug 16 13:30:40 2016 +0900

--
 .../java/org/apache/hadoop/fs/FileContext.java  |  4 +-
 .../apache/hadoop/io/retry/package-info.java| 22 +
 .../org/apache/hadoop/io/retry/package.html | 48 
 .../org/apache/hadoop/ipc/package-info.java |  4 ++
 .../java/org/apache/hadoop/ipc/package.html | 23 --
 5 files changed, 28 insertions(+), 73 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8a446ba/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
index e6a4cf4..f235773 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
@@ -303,7 +303,7 @@ public class FileContext {
* 
* @throws UnsupportedFileSystemException If the file system for
*   absOrFqPath is not supported.
-   * @throws IOExcepton If the file system for absOrFqPath could
+   * @throws IOException If the file system for absOrFqPath could
* not be instantiated.
*/
   protected AbstractFileSystem getFSofPath(final Path absOrFqPath)
@@ -2713,7 +2713,7 @@ public class FileContext {
   /**
* Query the effective storage policy ID for the given file or directory.
*
-   * @param src file or directory path.
+   * @param path file or directory path.
* @return storage policy for give file.
* @throws IOException
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8a446ba/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/package-info.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/package-info.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/package-info.java
index 693065f..089cf6f 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/package-info.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/package-info.java
@@ -15,6 +15,28 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
+/**
+ * A mechanism for selectively retrying methods that throw exceptions under
+ * certain circumstances.
+ * Typical usage is
+ *  UnreliableImplementation unreliableImpl = new UnreliableImplementation();
+ *  UnreliableInterface unreliable = (UnreliableInterface)
+ *  RetryProxy.create(UnreliableInterface.class, unreliableImpl,
+ *  RetryPolicies.retryUpToMaximumCountWithFixedSleep(4, 10,
+ *  TimeUnit.SECONDS));
+ *  unreliable.call();
+ *
+ * This will retry any method called on unreliable four times -
+ * in this case the call() method - sleeping 10 seconds between
+ * each retry. There are a number of
+ * {@link org.apache.hadoop.io.retry.RetryPolicies retry policies}
+ * available, or you can implement a custom one by implementing
+ * {@link org.apache.hadoop.io.retry.RetryPolicy}.
+ * It is also possible to specify retry policies on a
+ * {@link org.apache.hadoop.io.retry.RetryProxy#create(Class, Object, Map)
+ * per-method basis}.
+ */
 @InterfaceAudience.LimitedPrivate({"HBase", "HDFS", "MapReduce"})
 @InterfaceStability.Evolving
 package org.apache.hadoop.io.retry;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8a446ba/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/package.html
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/package.html
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/package.html
deleted file mode 100644
index ae553fc..000
--- 

hadoop git commit: HDFS-10747. o.a.h.hdfs.tools.DebugAdmin usage message is misleading. (Contributed by Mingliang Liu)

2016-08-15 Thread liuml07
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 71d0e4fca -> de96efec2


HDFS-10747. o.a.h.hdfs.tools.DebugAdmin usage message is misleading. 
(Contributed by Mingliang Liu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/de96efec
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/de96efec
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/de96efec

Branch: refs/heads/branch-2.7
Commit: de96efec254e3c5d9b412512372c9f50ad3aa082
Parents: 71d0e4f
Author: Mingliang Liu 
Authored: Mon Aug 15 20:23:47 2016 -0700
Committer: Mingliang Liu 
Committed: Mon Aug 15 21:10:32 2016 -0700

--
 .../src/main/java/org/apache/hadoop/hdfs/tools/DebugAdmin.java   | 4 ++--
 .../hadoop-hdfs/src/site/markdown/HDFSCommands.md| 4 ++--
 2 files changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/de96efec/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DebugAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DebugAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DebugAdmin.java
index 41f1ca4..7db179d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DebugAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DebugAdmin.java
@@ -85,7 +85,7 @@ public class DebugAdmin extends Configured implements Tool {
   private class VerifyBlockChecksumCommand extends DebugCommand {
 VerifyBlockChecksumCommand() {
   super("verify",
-"verify [-meta ] [-block ]",
+"verify -meta  [-block ]",
 "  Verify HDFS metadata and block files.  If a block file is specified, we\n" +
 "  will verify that the checksums in the metadata file match the block\n" +
 "  file.");
@@ -199,7 +199,7 @@ public class DebugAdmin extends Configured implements Tool {
   private class RecoverLeaseCommand extends DebugCommand {
 RecoverLeaseCommand() {
   super("recoverLease",
-"recoverLease [-path ] [-retries ]",
+"recoverLease -path  [-retries ]",
 "  Recover the lease on the specified path.  The path must reside on an\n" +
 "  HDFS filesystem.  The default number of retries is 1.");
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de96efec/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index 5f77694..39af6dd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -495,7 +495,7 @@ Useful commands to help administrators debug HDFS issues, 
like validating block
 
 ### `verify`
 
-Usage: `hdfs debug verify [-meta ] [-block ]`
+Usage: `hdfs debug verify -meta  [-block ]`
 
 | COMMAND\_OPTION | Description |
 |: |: |
@@ -506,7 +506,7 @@ Verify HDFS metadata and block files. If a block file is 
specified, we will veri
 
 ### `recoverLease`
 
-Usage: `hdfs debug recoverLease [-path ] [-retries ]`
+Usage: `hdfs debug recoverLease -path  [-retries ]`
 
 | COMMAND\_OPTION | Description |
 |: |: |


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[4/5] hadoop git commit: HADOOP-13470. GenericTestUtils$LogCapturer is flaky. (Contributed by Mingliang Liu)

2016-08-15 Thread liuml07
HADOOP-13470. GenericTestUtils$LogCapturer is flaky. (Contributed by Mingliang 
Liu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/657064e5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/657064e5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/657064e5

Branch: refs/heads/branch-2.8
Commit: 657064e59397feb92cec95094ac34c85e3040c50
Parents: e9eaad0
Author: Mingliang Liu 
Authored: Mon Aug 15 20:24:54 2016 -0700
Committer: Mingliang Liu 
Committed: Mon Aug 15 20:46:40 2016 -0700

--
 .../apache/hadoop/test/GenericTestUtils.java| 25 ++-
 .../hadoop/test/TestGenericTestUtils.java   | 44 
 2 files changed, 56 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/657064e5/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
index 1907094..015ccea 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
@@ -43,10 +43,10 @@ import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.util.NativeCodeLoader;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
-import org.apache.log4j.Layout;
 import org.apache.log4j.Level;
 import org.apache.log4j.LogManager;
 import org.apache.log4j.Logger;
+import org.apache.log4j.PatternLayout;
 import org.apache.log4j.WriterAppender;
 import org.junit.Assert;
 import org.junit.Assume;
@@ -197,36 +197,35 @@ public abstract class GenericTestUtils {
 private StringWriter sw = new StringWriter();
 private WriterAppender appender;
 private Logger logger;
-
+
 public static LogCapturer captureLogs(Log l) {
   Logger logger = ((Log4JLogger)l).getLogger();
-  LogCapturer c = new LogCapturer(logger);
-  return c;
+  return new LogCapturer(logger);
+}
+
+public static LogCapturer captureLogs(org.slf4j.Logger logger) {
+  return new LogCapturer(toLog4j(logger));
 }
-
 
 private LogCapturer(Logger logger) {
   this.logger = logger;
-  Layout layout = Logger.getRootLogger().getAppender("stdout").getLayout();
-  WriterAppender wa = new WriterAppender(layout, sw);
-  logger.addAppender(wa);
+  this.appender = new WriterAppender(new PatternLayout(), sw);
+  logger.addAppender(appender);
 }
-
+
 public String getOutput() {
   return sw.toString();
 }
-
+
 public void stopCapturing() {
   logger.removeAppender(appender);
-
 }
 
 public void clearOutput() {
   sw.getBuffer().setLength(0);
 }
   }
-  
-  
+
   /**
* Mockito answer helper that triggers one latch as soon as the
* method is called, then waits on another before continuing.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/657064e5/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java
index 8a7b5f6..86df5d5 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java
@@ -18,8 +18,16 @@
 
 package org.apache.hadoop.test;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
 import org.junit.Test;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.junit.Assert.assertTrue;
+
 public class TestGenericTestUtils extends GenericTestUtils {
 
   @Test
@@ -75,4 +83,40 @@ public class TestGenericTestUtils extends GenericTestUtils {
 }
   }
 
+  @Test(timeout = 1)
+  public void testLogCapturer() {
+final Log log = LogFactory.getLog(TestGenericTestUtils.class);
+LogCapturer logCapturer = LogCapturer.captureLogs(log);
+final String infoMessage = "info message";
+// test get output message
+log.info(infoMessage);
+assertTrue(logCapturer.getOutput().endsWith(
+String.format(infoMessage + "%n")));
+// test clear output
+logCapturer.clearOutput();
+

[1/5] hadoop git commit: HDFS-10724. Document caller context config keys. (Contributed by Mingliang Liu)

2016-08-15 Thread liuml07
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 1a8280edd -> 54c974326


HDFS-10724. Document caller context config keys. (Contributed by Mingliang Liu)

(cherry picked from commit 4bcbef39f7ca07601092919a7f2bea531a2dfa07)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6471ec31
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6471ec31
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6471ec31

Branch: refs/heads/branch-2.8
Commit: 6471ec31bc931ba701733f07694d843011490c49
Parents: 1a8280e
Author: Mingliang Liu 
Authored: Mon Aug 15 20:20:33 2016 -0700
Committer: Mingliang Liu 
Committed: Mon Aug 15 20:46:03 2016 -0700

--
 .../src/main/resources/core-default.xml | 29 
 1 file changed, 29 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6471ec31/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 22fe21d..1bfb545 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -2181,4 +2181,33 @@
 The class names of the Span Receivers to use for Hadoop.
 
   
+
+  
+hadoop.caller.context.enabled
+false
+When the feature is enabled, additional fields are written 
into
+  name-node audit log records for auditing coarse granularity operations.
+
+  
+  
+hadoop.caller.context.max.size
+128
+The maximum bytes a caller context string can have. If the
+  passed caller context is longer than this maximum bytes, client will
+  truncate it before sending to server. Note that the server may have a
+  different maximum size, and will truncate the caller context to the
+  maximum size it allows.
+
+  
+  
+hadoop.caller.context.signature.max.size
+40
+
+  The caller's signature (optional) is for offline validation. If the
+  signature exceeds the maximum allowed bytes in server, the caller context
+  will be abandoned, in which case the caller context will not be recorded
+  in audit logs.
+
+  
+
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[3/5] hadoop git commit: HDFS-10747. o.a.h.hdfs.tools.DebugAdmin usage message is misleading. (Contributed by Mingliang Liu)

2016-08-15 Thread liuml07
HDFS-10747. o.a.h.hdfs.tools.DebugAdmin usage message is misleading. 
(Contributed by Mingliang Liu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e9eaad0e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e9eaad0e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e9eaad0e

Branch: refs/heads/branch-2.8
Commit: e9eaad0e677f38018572db4a3e9adc9f4e8519a5
Parents: 0b934c3
Author: Mingliang Liu 
Authored: Mon Aug 15 20:23:47 2016 -0700
Committer: Mingliang Liu 
Committed: Mon Aug 15 20:46:34 2016 -0700

--
 .../src/main/java/org/apache/hadoop/hdfs/tools/DebugAdmin.java   | 4 ++--
 .../hadoop-hdfs/src/site/markdown/HDFSCommands.md| 4 ++--
 2 files changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9eaad0e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DebugAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DebugAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DebugAdmin.java
index d179a5c..a2b91ab 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DebugAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DebugAdmin.java
@@ -86,7 +86,7 @@ public class DebugAdmin extends Configured implements Tool {
   private class VerifyBlockChecksumCommand extends DebugCommand {
 VerifyBlockChecksumCommand() {
   super("verify",
-"verify [-meta ] [-block ]",
+"verify -meta  [-block ]",
 "  Verify HDFS metadata and block files.  If a block file is specified, we\n" +
 "  will verify that the checksums in the metadata file match the block\n" +
 "  file.");
@@ -200,7 +200,7 @@ public class DebugAdmin extends Configured implements Tool {
   private class RecoverLeaseCommand extends DebugCommand {
 RecoverLeaseCommand() {
   super("recoverLease",
-"recoverLease [-path ] [-retries ]",
+"recoverLease -path  [-retries ]",
 "  Recover the lease on the specified path.  The path must reside on an\n" +
 "  HDFS filesystem.  The default number of retries is 1.");
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9eaad0e/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index 5c3b337..fa2dae4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -515,7 +515,7 @@ Useful commands to help administrators debug HDFS issues, 
like validating block
 
 ### `verify`
 
-Usage: `hdfs debug verify [-meta ] [-block ]`
+Usage: `hdfs debug verify -meta  [-block ]`
 
 | COMMAND\_OPTION | Description |
 |: |: |
@@ -526,7 +526,7 @@ Verify HDFS metadata and block files. If a block file is 
specified, we will veri
 
 ### `recoverLease`
 
-Usage: `hdfs debug recoverLease [-path ] [-retries ]`
+Usage: `hdfs debug recoverLease -path  [-retries ]`
 
 | COMMAND\_OPTION | Description |
 |: |: |


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/5] hadoop git commit: HDFS-10678. Documenting NNThroughputBenchmark tool. (Contributed by Mingliang Liu)

2016-08-15 Thread liuml07
HDFS-10678. Documenting NNThroughputBenchmark tool. (Contributed by Mingliang 
Liu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f9a7e590
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f9a7e590
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f9a7e590

Branch: refs/heads/branch-2
Commit: f9a7e59066384c19b482231a1c1ed40a5324d829
Parents: e36a913
Author: Mingliang Liu 
Authored: Mon Aug 15 20:22:14 2016 -0700
Committer: Mingliang Liu 
Committed: Mon Aug 15 20:37:55 2016 -0700

--
 .../src/site/markdown/Benchmarking.md   | 106 +++
 .../server/namenode/NNThroughputBenchmark.java  |  32 +-
 hadoop-project/src/site/site.xml|   1 +
 3 files changed, 110 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9a7e590/hadoop-common-project/hadoop-common/src/site/markdown/Benchmarking.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/Benchmarking.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Benchmarking.md
new file mode 100644
index 000..678dcee
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Benchmarking.md
@@ -0,0 +1,106 @@
+
+
+# Hadoop Benchmarking
+
+
+
+This page is to discuss benchmarking Hadoop using tools it provides.
+
+## NNThroughputBenchmark
+
+### Overview
+
+**NNThroughputBenchmark**, as its name indicates, is a name-node throughput 
benchmark, which runs a series of client threads on a single node against a 
name-node. If no name-node is configured, it will firstly start a name-node in 
the same process (_standalone mode_), in which case each client repetitively 
performs the same operation by directly calling the respective name-node 
methods. Otherwise, the benchmark will perform the operations against a remote 
name-node via client protocol RPCs (_remote mode_). Either way, all clients are 
running locally in a single process rather than remotely across different 
nodes. The reason is to avoid communication overhead caused by RPC connections 
and serialization, and thus reveal the upper bound of pure name-node 
performance.
+
+The benchmark first generates inputs for each thread so that the input 
generation overhead does not effect the resulting statistics. The number of 
operations performed by threads is practically the same. Precisely, the 
difference between the number of operations performed by any two threads does 
not exceed 1. Then the benchmark executes the specified number of operations 
using the specified number of threads and outputs the resulting stats by 
measuring the number of operations performed by the name-node per second.
+
+### Commands
+
+The general command line syntax is:
+
+`hadoop org.apache.hadoop.hdfs.server.namenode.NNThroughputBenchmark 
[genericOptions] [commandOptions]`
+
+ Generic Options
+
+This benchmark honors the [Hadoop command-line Generic 
Options](CommandsManual.html#Generic_Options) to alter its behavior. The 
benchmark, as other tools, will rely on the `fs.defaultFS` config, which is 
overridable by `-fs` command option, to run standalone mode or remote mode. If 
the `fs.defaultFS` scheme is not specified or is `file` (local), the benchmark 
will run in _standalone mode_. Specially, the _remote_ name-node config 
`dfs.namenode.fs-limits.min-block-size` should be set as 16 while in 
_standalone mode_ the benchmark turns off minimum block size verification for 
its internal name-node.
+
+ Command Options
+
+The following are all supported command options:
+
+| COMMAND\_OPTION| Description |
+|: |: |
+|`-op` | Specify the operation. This option must be provided and should be the 
first option. |
+|`-logLevel` | Specify the logging level when the benchmark runs. The default 
logging level is ERROR. |
+|`-UGCacheRefreshCount` | After every specified number of operations, the 
benchmark purges the name-node's user group cache. By default the refresh is 
never called. |
+|`-keepResults` | If specified, do not clean up the name-space after 
execution. By default the name-space will be removed after test. |
+
+# Operations Supported
+
+Following are all the operations supported along with their respective 
operation-specific parameters (all optional) and default values.
+
+| OPERATION\_OPTION| Operation-specific parameters |
+|: |: |
+|`all` | _options for other operations_ |
+|`create` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-close`] |
+|`mkdirs` | [`-threads 3`] [`-dirs 10`] [`-dirsPerDir 2`] |
+|`open` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-useExisting`] |
+|`delete` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] 

[4/5] hadoop git commit: HADOOP-13470. GenericTestUtils$LogCapturer is flaky. (Contributed by Mingliang Liu)

2016-08-15 Thread liuml07
HADOOP-13470. GenericTestUtils$LogCapturer is flaky. (Contributed by Mingliang 
Liu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/23161c67
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/23161c67
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/23161c67

Branch: refs/heads/branch-2
Commit: 23161c67cf1420de417461614d4af86fefe24f50
Parents: 82623ea
Author: Mingliang Liu 
Authored: Mon Aug 15 20:24:54 2016 -0700
Committer: Mingliang Liu 
Committed: Mon Aug 15 20:38:40 2016 -0700

--
 .../apache/hadoop/test/GenericTestUtils.java| 25 ++-
 .../hadoop/test/TestGenericTestUtils.java   | 44 
 2 files changed, 56 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/23161c67/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
index 116a111..6b5135c 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
@@ -42,10 +42,10 @@ import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
-import org.apache.log4j.Layout;
 import org.apache.log4j.Level;
 import org.apache.log4j.LogManager;
 import org.apache.log4j.Logger;
+import org.apache.log4j.PatternLayout;
 import org.apache.log4j.WriterAppender;
 import org.junit.Assert;
 import org.junit.Assume;
@@ -275,36 +275,35 @@ public abstract class GenericTestUtils {
 private StringWriter sw = new StringWriter();
 private WriterAppender appender;
 private Logger logger;
-
+
 public static LogCapturer captureLogs(Log l) {
   Logger logger = ((Log4JLogger)l).getLogger();
-  LogCapturer c = new LogCapturer(logger);
-  return c;
+  return new LogCapturer(logger);
+}
+
+public static LogCapturer captureLogs(org.slf4j.Logger logger) {
+  return new LogCapturer(toLog4j(logger));
 }
-
 
 private LogCapturer(Logger logger) {
   this.logger = logger;
-  Layout layout = Logger.getRootLogger().getAppender("stdout").getLayout();
-  WriterAppender wa = new WriterAppender(layout, sw);
-  logger.addAppender(wa);
+  this.appender = new WriterAppender(new PatternLayout(), sw);
+  logger.addAppender(appender);
 }
-
+
 public String getOutput() {
   return sw.toString();
 }
-
+
 public void stopCapturing() {
   logger.removeAppender(appender);
-
 }
 
 public void clearOutput() {
   sw.getBuffer().setLength(0);
 }
   }
-  
-  
+
   /**
* Mockito answer helper that triggers one latch as soon as the
* method is called, then waits on another before continuing.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/23161c67/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java
index 8a7b5f6..86df5d5 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java
@@ -18,8 +18,16 @@
 
 package org.apache.hadoop.test;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
 import org.junit.Test;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.junit.Assert.assertTrue;
+
 public class TestGenericTestUtils extends GenericTestUtils {
 
   @Test
@@ -75,4 +83,40 @@ public class TestGenericTestUtils extends GenericTestUtils {
 }
   }
 
+  @Test(timeout = 1)
+  public void testLogCapturer() {
+final Log log = LogFactory.getLog(TestGenericTestUtils.class);
+LogCapturer logCapturer = LogCapturer.captureLogs(log);
+final String infoMessage = "info message";
+// test get output message
+log.info(infoMessage);
+assertTrue(logCapturer.getOutput().endsWith(
+String.format(infoMessage + "%n")));
+// test clear output
+logCapturer.clearOutput();
+

[1/5] hadoop git commit: HDFS-10724. Document caller context config keys. (Contributed by Mingliang Liu)

2016-08-15 Thread liuml07
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 74156ee20 -> 2f4660ce9


HDFS-10724. Document caller context config keys. (Contributed by Mingliang Liu)

(cherry picked from commit 4bcbef39f7ca07601092919a7f2bea531a2dfa07)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e36a9136
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e36a9136
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e36a9136

Branch: refs/heads/branch-2
Commit: e36a913663ddb97469e76ddeb072d9530b6a1a29
Parents: 74156ee
Author: Mingliang Liu 
Authored: Mon Aug 15 20:20:33 2016 -0700
Committer: Mingliang Liu 
Committed: Mon Aug 15 20:36:11 2016 -0700

--
 .../src/main/resources/core-default.xml | 29 
 1 file changed, 29 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e36a9136/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 7851632..77171ad 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -2311,4 +2311,33 @@
   needs to be specified in net.topology.script.file.name.
 
   
+
+  
+hadoop.caller.context.enabled
+false
+When the feature is enabled, additional fields are written 
into
+  name-node audit log records for auditing coarse granularity operations.
+
+  
+  
+hadoop.caller.context.max.size
+128
+The maximum bytes a caller context string can have. If the
+  passed caller context is longer than this maximum bytes, client will
+  truncate it before sending to server. Note that the server may have a
+  different maximum size, and will truncate the caller context to the
+  maximum size it allows.
+
+  
+  
+hadoop.caller.context.signature.max.size
+40
+
+  The caller's signature (optional) is for offline validation. If the
+  signature exceeds the maximum allowed bytes in server, the caller context
+  will be abandoned, in which case the caller context will not be recorded
+  in audit logs.
+
+  
+
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[5/5] hadoop git commit: HDFS-10641. TestBlockManager#testBlockReportQueueing fails intermittently. (Contributed by Daryn Sharp)

2016-08-15 Thread liuml07
HDFS-10641. TestBlockManager#testBlockReportQueueing fails intermittently. 
(Contributed by Daryn Sharp)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2f4660ce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2f4660ce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2f4660ce

Branch: refs/heads/branch-2
Commit: 2f4660ce9892e822d241c87954219460e4351779
Parents: 23161c6
Author: Mingliang Liu 
Authored: Mon Aug 15 20:28:40 2016 -0700
Committer: Mingliang Liu 
Committed: Mon Aug 15 20:39:12 2016 -0700

--
 .../hadoop/hdfs/server/blockmanagement/TestBlockManager.java | 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f4660ce/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index 5d0ca96..cc0fafd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -945,6 +945,7 @@ public class TestBlockManager {
 
   final CyclicBarrier startBarrier = new CyclicBarrier(2);
   final CountDownLatch endLatch = new CountDownLatch(3);
+  final CountDownLatch doneLatch = new CountDownLatch(1);
 
   // create a task intended to block while processing, thus causing
   // the queue to backup.  simulates how a full BR is processed.
@@ -952,7 +953,7 @@ public class TestBlockManager {
   new Callable(){
 @Override
 public Void call() throws IOException {
-  return bm.runBlockOp(new Callable() {
+  bm.runBlockOp(new Callable() {
 @Override
 public Void call()
 throws InterruptedException, BrokenBarrierException {
@@ -962,6 +963,9 @@ public class TestBlockManager {
   return null;
 }
   });
+  // signal that runBlockOp returned
+  doneLatch.countDown();
+  return null;
 }
   });
 
@@ -1006,7 +1010,7 @@ public class TestBlockManager {
   startBarrier.await(1, TimeUnit.SECONDS);
   assertTrue(endLatch.await(1, TimeUnit.SECONDS));
   assertEquals(0, bm.getBlockOpQueueLength());
-  assertTrue(blockingOp.isDone());
+  assertTrue(doneLatch.await(1, TimeUnit.SECONDS));
 } finally {
   cluster.shutdown();
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/5] hadoop git commit: HDFS-10678. Documenting NNThroughputBenchmark tool. (Contributed by Mingliang Liu)

2016-08-15 Thread liuml07
HDFS-10678. Documenting NNThroughputBenchmark tool. (Contributed by Mingliang 
Liu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0b934c37
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0b934c37
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0b934c37

Branch: refs/heads/branch-2.8
Commit: 0b934c375ef957ae635a893a6c49fa89068c0227
Parents: 6471ec3
Author: Mingliang Liu 
Authored: Mon Aug 15 20:22:14 2016 -0700
Committer: Mingliang Liu 
Committed: Mon Aug 15 20:46:28 2016 -0700

--
 .../src/site/markdown/Benchmarking.md   | 106 +++
 .../server/namenode/NNThroughputBenchmark.java  |  32 +-
 hadoop-project/src/site/site.xml|   1 +
 3 files changed, 110 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b934c37/hadoop-common-project/hadoop-common/src/site/markdown/Benchmarking.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/Benchmarking.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Benchmarking.md
new file mode 100644
index 000..678dcee
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Benchmarking.md
@@ -0,0 +1,106 @@
+
+
+# Hadoop Benchmarking
+
+
+
+This page is to discuss benchmarking Hadoop using tools it provides.
+
+## NNThroughputBenchmark
+
+### Overview
+
+**NNThroughputBenchmark**, as its name indicates, is a name-node throughput 
benchmark, which runs a series of client threads on a single node against a 
name-node. If no name-node is configured, it will firstly start a name-node in 
the same process (_standalone mode_), in which case each client repetitively 
performs the same operation by directly calling the respective name-node 
methods. Otherwise, the benchmark will perform the operations against a remote 
name-node via client protocol RPCs (_remote mode_). Either way, all clients are 
running locally in a single process rather than remotely across different 
nodes. The reason is to avoid communication overhead caused by RPC connections 
and serialization, and thus reveal the upper bound of pure name-node 
performance.
+
+The benchmark first generates inputs for each thread so that the input 
generation overhead does not effect the resulting statistics. The number of 
operations performed by threads is practically the same. Precisely, the 
difference between the number of operations performed by any two threads does 
not exceed 1. Then the benchmark executes the specified number of operations 
using the specified number of threads and outputs the resulting stats by 
measuring the number of operations performed by the name-node per second.
+
+### Commands
+
+The general command line syntax is:
+
+`hadoop org.apache.hadoop.hdfs.server.namenode.NNThroughputBenchmark 
[genericOptions] [commandOptions]`
+
+ Generic Options
+
+This benchmark honors the [Hadoop command-line Generic 
Options](CommandsManual.html#Generic_Options) to alter its behavior. The 
benchmark, as other tools, will rely on the `fs.defaultFS` config, which is 
overridable by `-fs` command option, to run standalone mode or remote mode. If 
the `fs.defaultFS` scheme is not specified or is `file` (local), the benchmark 
will run in _standalone mode_. Specially, the _remote_ name-node config 
`dfs.namenode.fs-limits.min-block-size` should be set as 16 while in 
_standalone mode_ the benchmark turns off minimum block size verification for 
its internal name-node.
+
+ Command Options
+
+The following are all supported command options:
+
+| COMMAND\_OPTION| Description |
+|: |: |
+|`-op` | Specify the operation. This option must be provided and should be the 
first option. |
+|`-logLevel` | Specify the logging level when the benchmark runs. The default 
logging level is ERROR. |
+|`-UGCacheRefreshCount` | After every specified number of operations, the 
benchmark purges the name-node's user group cache. By default the refresh is 
never called. |
+|`-keepResults` | If specified, do not clean up the name-space after 
execution. By default the name-space will be removed after test. |
+
+# Operations Supported
+
+Following are all the operations supported along with their respective 
operation-specific parameters (all optional) and default values.
+
+| OPERATION\_OPTION| Operation-specific parameters |
+|: |: |
+|`all` | _options for other operations_ |
+|`create` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-close`] |
+|`mkdirs` | [`-threads 3`] [`-dirs 10`] [`-dirsPerDir 2`] |
+|`open` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-useExisting`] |
+|`delete` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] 

[3/5] hadoop git commit: HDFS-10747. o.a.h.hdfs.tools.DebugAdmin usage message is misleading. (Contributed by Mingliang Liu)

2016-08-15 Thread liuml07
HDFS-10747. o.a.h.hdfs.tools.DebugAdmin usage message is misleading. 
(Contributed by Mingliang Liu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/82623ea9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/82623ea9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/82623ea9

Branch: refs/heads/branch-2
Commit: 82623ea9c9a95af64a23aca2bda61f4da65fde31
Parents: f9a7e59
Author: Mingliang Liu 
Authored: Mon Aug 15 20:23:47 2016 -0700
Committer: Mingliang Liu 
Committed: Mon Aug 15 20:38:17 2016 -0700

--
 .../src/main/java/org/apache/hadoop/hdfs/tools/DebugAdmin.java   | 4 ++--
 .../hadoop-hdfs/src/site/markdown/HDFSCommands.md| 4 ++--
 2 files changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/82623ea9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DebugAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DebugAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DebugAdmin.java
index d179a5c..a2b91ab 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DebugAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DebugAdmin.java
@@ -86,7 +86,7 @@ public class DebugAdmin extends Configured implements Tool {
   private class VerifyBlockChecksumCommand extends DebugCommand {
 VerifyBlockChecksumCommand() {
   super("verify",
-"verify [-meta ] [-block ]",
+"verify -meta  [-block ]",
 "  Verify HDFS metadata and block files.  If a block file is specified, we\n" +
 "  will verify that the checksums in the metadata file match the block\n" +
 "  file.");
@@ -200,7 +200,7 @@ public class DebugAdmin extends Configured implements Tool {
   private class RecoverLeaseCommand extends DebugCommand {
 RecoverLeaseCommand() {
   super("recoverLease",
-"recoverLease [-path ] [-retries ]",
+"recoverLease -path  [-retries ]",
 "  Recover the lease on the specified path.  The path must reside on an\n" +
 "  HDFS filesystem.  The default number of retries is 1.");
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82623ea9/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index c224ba1..e54c113 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -556,7 +556,7 @@ Useful commands to help administrators debug HDFS issues, 
like validating block
 
 ### `verify`
 
-Usage: `hdfs debug verify [-meta ] [-block ]`
+Usage: `hdfs debug verify -meta  [-block ]`
 
 | COMMAND\_OPTION | Description |
 |: |: |
@@ -567,7 +567,7 @@ Verify HDFS metadata and block files. If a block file is 
specified, we will veri
 
 ### `recoverLease`
 
-Usage: `hdfs debug recoverLease [-path ] [-retries ]`
+Usage: `hdfs debug recoverLease -path  [-retries ]`
 
 | COMMAND\_OPTION | Description |
 |: |: |


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[5/5] hadoop git commit: HDFS-10641. TestBlockManager#testBlockReportQueueing fails intermittently. (Contributed by Daryn Sharp)

2016-08-15 Thread liuml07
HDFS-10641. TestBlockManager#testBlockReportQueueing fails intermittently. 
(Contributed by Daryn Sharp)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/54c97432
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/54c97432
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/54c97432

Branch: refs/heads/branch-2.8
Commit: 54c974326329b206a2544544a685a306facf99cf
Parents: 657064e
Author: Mingliang Liu 
Authored: Mon Aug 15 20:28:40 2016 -0700
Committer: Mingliang Liu 
Committed: Mon Aug 15 20:46:48 2016 -0700

--
 .../hadoop/hdfs/server/blockmanagement/TestBlockManager.java | 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/54c97432/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index f57324d..e9d4018 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -945,6 +945,7 @@ public class TestBlockManager {
 
   final CyclicBarrier startBarrier = new CyclicBarrier(2);
   final CountDownLatch endLatch = new CountDownLatch(3);
+  final CountDownLatch doneLatch = new CountDownLatch(1);
 
   // create a task intended to block while processing, thus causing
   // the queue to backup.  simulates how a full BR is processed.
@@ -952,7 +953,7 @@ public class TestBlockManager {
   new Callable(){
 @Override
 public Void call() throws IOException {
-  return bm.runBlockOp(new Callable() {
+  bm.runBlockOp(new Callable() {
 @Override
 public Void call()
 throws InterruptedException, BrokenBarrierException {
@@ -962,6 +963,9 @@ public class TestBlockManager {
   return null;
 }
   });
+  // signal that runBlockOp returned
+  doneLatch.countDown();
+  return null;
 }
   });
 
@@ -1006,7 +1010,7 @@ public class TestBlockManager {
   startBarrier.await(1, TimeUnit.SECONDS);
   assertTrue(endLatch.await(1, TimeUnit.SECONDS));
   assertEquals(0, bm.getBlockOpQueueLength());
-  assertTrue(blockingOp.isDone());
+  assertTrue(doneLatch.await(1, TimeUnit.SECONDS));
 } finally {
   cluster.shutdown();
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[5/6] hadoop git commit: HADOOP-13470. GenericTestUtils$LogCapturer is flaky. (Contributed by Mingliang Liu)

2016-08-15 Thread liuml07
HADOOP-13470. GenericTestUtils$LogCapturer is flaky. (Contributed by Mingliang 
Liu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9336a049
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9336a049
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9336a049

Branch: refs/heads/trunk
Commit: 9336a0495f99cd3fbc7ecef452eb37cfbaf57440
Parents: ef55fe1
Author: Mingliang Liu 
Authored: Mon Aug 15 20:24:54 2016 -0700
Committer: Mingliang Liu 
Committed: Mon Aug 15 20:24:54 2016 -0700

--
 .../apache/hadoop/test/GenericTestUtils.java| 25 ++-
 .../hadoop/test/TestGenericTestUtils.java   | 44 
 2 files changed, 56 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9336a049/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
index 116a111..6b5135c 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
@@ -42,10 +42,10 @@ import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
-import org.apache.log4j.Layout;
 import org.apache.log4j.Level;
 import org.apache.log4j.LogManager;
 import org.apache.log4j.Logger;
+import org.apache.log4j.PatternLayout;
 import org.apache.log4j.WriterAppender;
 import org.junit.Assert;
 import org.junit.Assume;
@@ -275,36 +275,35 @@ public abstract class GenericTestUtils {
 private StringWriter sw = new StringWriter();
 private WriterAppender appender;
 private Logger logger;
-
+
 public static LogCapturer captureLogs(Log l) {
   Logger logger = ((Log4JLogger)l).getLogger();
-  LogCapturer c = new LogCapturer(logger);
-  return c;
+  return new LogCapturer(logger);
+}
+
+public static LogCapturer captureLogs(org.slf4j.Logger logger) {
+  return new LogCapturer(toLog4j(logger));
 }
-
 
 private LogCapturer(Logger logger) {
   this.logger = logger;
-  Layout layout = Logger.getRootLogger().getAppender("stdout").getLayout();
-  WriterAppender wa = new WriterAppender(layout, sw);
-  logger.addAppender(wa);
+  this.appender = new WriterAppender(new PatternLayout(), sw);
+  logger.addAppender(appender);
 }
-
+
 public String getOutput() {
   return sw.toString();
 }
-
+
 public void stopCapturing() {
   logger.removeAppender(appender);
-
 }
 
 public void clearOutput() {
   sw.getBuffer().setLength(0);
 }
   }
-  
-  
+
   /**
* Mockito answer helper that triggers one latch as soon as the
* method is called, then waits on another before continuing.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9336a049/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java
index 8a7b5f6..86df5d5 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java
@@ -18,8 +18,16 @@
 
 package org.apache.hadoop.test;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
 import org.junit.Test;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.junit.Assert.assertTrue;
+
 public class TestGenericTestUtils extends GenericTestUtils {
 
   @Test
@@ -75,4 +83,40 @@ public class TestGenericTestUtils extends GenericTestUtils {
 }
   }
 
+  @Test(timeout = 1)
+  public void testLogCapturer() {
+final Log log = LogFactory.getLog(TestGenericTestUtils.class);
+LogCapturer logCapturer = LogCapturer.captureLogs(log);
+final String infoMessage = "info message";
+// test get output message
+log.info(infoMessage);
+assertTrue(logCapturer.getOutput().endsWith(
+String.format(infoMessage + "%n")));
+// test clear output
+logCapturer.clearOutput();
+

[3/6] hadoop git commit: HDFS-10678. Documenting NNThroughputBenchmark tool. (Contributed by Mingliang Liu)

2016-08-15 Thread liuml07
HDFS-10678. Documenting NNThroughputBenchmark tool. (Contributed by Mingliang 
Liu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/382d6152
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/382d6152
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/382d6152

Branch: refs/heads/trunk
Commit: 382d6152602339fe58169b2918ec74e7a7cd5581
Parents: 4bcbef3
Author: Mingliang Liu 
Authored: Mon Aug 15 20:22:14 2016 -0700
Committer: Mingliang Liu 
Committed: Mon Aug 15 20:22:14 2016 -0700

--
 .../src/site/markdown/Benchmarking.md   | 106 +++
 .../server/namenode/NNThroughputBenchmark.java  |  32 +-
 hadoop-project/src/site/site.xml|   1 +
 3 files changed, 110 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/382d6152/hadoop-common-project/hadoop-common/src/site/markdown/Benchmarking.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/Benchmarking.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Benchmarking.md
new file mode 100644
index 000..678dcee
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Benchmarking.md
@@ -0,0 +1,106 @@
+
+
+# Hadoop Benchmarking
+
+
+
+This page is to discuss benchmarking Hadoop using tools it provides.
+
+## NNThroughputBenchmark
+
+### Overview
+
+**NNThroughputBenchmark**, as its name indicates, is a name-node throughput 
benchmark, which runs a series of client threads on a single node against a 
name-node. If no name-node is configured, it will firstly start a name-node in 
the same process (_standalone mode_), in which case each client repetitively 
performs the same operation by directly calling the respective name-node 
methods. Otherwise, the benchmark will perform the operations against a remote 
name-node via client protocol RPCs (_remote mode_). Either way, all clients are 
running locally in a single process rather than remotely across different 
nodes. The reason is to avoid communication overhead caused by RPC connections 
and serialization, and thus reveal the upper bound of pure name-node 
performance.
+
+The benchmark first generates inputs for each thread so that the input 
generation overhead does not effect the resulting statistics. The number of 
operations performed by threads is practically the same. Precisely, the 
difference between the number of operations performed by any two threads does 
not exceed 1. Then the benchmark executes the specified number of operations 
using the specified number of threads and outputs the resulting stats by 
measuring the number of operations performed by the name-node per second.
+
+### Commands
+
+The general command line syntax is:
+
+`hadoop org.apache.hadoop.hdfs.server.namenode.NNThroughputBenchmark 
[genericOptions] [commandOptions]`
+
+ Generic Options
+
+This benchmark honors the [Hadoop command-line Generic 
Options](CommandsManual.html#Generic_Options) to alter its behavior. The 
benchmark, as other tools, will rely on the `fs.defaultFS` config, which is 
overridable by `-fs` command option, to run standalone mode or remote mode. If 
the `fs.defaultFS` scheme is not specified or is `file` (local), the benchmark 
will run in _standalone mode_. Specially, the _remote_ name-node config 
`dfs.namenode.fs-limits.min-block-size` should be set as 16 while in 
_standalone mode_ the benchmark turns off minimum block size verification for 
its internal name-node.
+
+ Command Options
+
+The following are all supported command options:
+
+| COMMAND\_OPTION| Description |
+|: |: |
+|`-op` | Specify the operation. This option must be provided and should be the 
first option. |
+|`-logLevel` | Specify the logging level when the benchmark runs. The default 
logging level is ERROR. |
+|`-UGCacheRefreshCount` | After every specified number of operations, the 
benchmark purges the name-node's user group cache. By default the refresh is 
never called. |
+|`-keepResults` | If specified, do not clean up the name-space after 
execution. By default the name-space will be removed after test. |
+
+# Operations Supported
+
+Following are all the operations supported along with their respective 
operation-specific parameters (all optional) and default values.
+
+| OPERATION\_OPTION| Operation-specific parameters |
+|: |: |
+|`all` | _options for other operations_ |
+|`create` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-close`] |
+|`mkdirs` | [`-threads 3`] [`-dirs 10`] [`-dirsPerDir 2`] |
+|`open` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-useExisting`] |
+|`delete` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] 

[1/6] hadoop git commit: HDFS-10725. Caller context should always be constructed by a builder. (Contributed by Mingliang Liu)

2016-08-15 Thread liuml07
Repository: hadoop
Updated Branches:
  refs/heads/trunk 5628b36c0 -> 4d4d95fdd


HDFS-10725. Caller context should always be constructed by a builder. 
(Contributed by Mingliang Liu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/12ad63d7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/12ad63d7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/12ad63d7

Branch: refs/heads/trunk
Commit: 12ad63d7232ca72be9eff5680d974fc16999aac3
Parents: 5628b36
Author: Mingliang Liu 
Authored: Mon Aug 15 20:13:20 2016 -0700
Committer: Mingliang Liu 
Committed: Mon Aug 15 20:14:05 2016 -0700

--
 .../src/main/java/org/apache/hadoop/ipc/CallerContext.java   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/12ad63d7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
index b197575..3d21bfe 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
@@ -35,7 +35,7 @@ import java.util.Arrays;
 @InterfaceAudience.LimitedPrivate({"HBase", "HDFS", "Hive", "MapReduce",
 "Pig", "YARN"})
 @InterfaceStability.Evolving
-public class CallerContext {
+public final class CallerContext {
   public static final Charset SIGNATURE_ENCODING = StandardCharsets.UTF_8;
   /** The caller context.
*
@@ -54,7 +54,7 @@ public class CallerContext {
*/
   private final byte[] signature;
 
-  public CallerContext(Builder builder) {
+  private CallerContext(Builder builder) {
 this.context = builder.context;
 this.signature = builder.signature;
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[6/6] hadoop git commit: HDFS-10641. TestBlockManager#testBlockReportQueueing fails intermittently. (Contributed by Daryn Sharp)

2016-08-15 Thread liuml07
HDFS-10641. TestBlockManager#testBlockReportQueueing fails intermittently. 
(Contributed by Daryn Sharp)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4d4d95fd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4d4d95fd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4d4d95fd

Branch: refs/heads/trunk
Commit: 4d4d95fdd5e1e985c16005adc45517cc8b549ae8
Parents: 9336a04
Author: Mingliang Liu 
Authored: Mon Aug 15 20:28:40 2016 -0700
Committer: Mingliang Liu 
Committed: Mon Aug 15 20:28:40 2016 -0700

--
 .../hadoop/hdfs/server/blockmanagement/TestBlockManager.java | 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d4d95fd/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index 535acd7..1f58f99 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -1013,6 +1013,7 @@ public class TestBlockManager {
 
   final CyclicBarrier startBarrier = new CyclicBarrier(2);
   final CountDownLatch endLatch = new CountDownLatch(3);
+  final CountDownLatch doneLatch = new CountDownLatch(1);
 
   // create a task intended to block while processing, thus causing
   // the queue to backup.  simulates how a full BR is processed.
@@ -1020,7 +1021,7 @@ public class TestBlockManager {
   new Callable(){
 @Override
 public Void call() throws IOException {
-  return bm.runBlockOp(new Callable() {
+  bm.runBlockOp(new Callable() {
 @Override
 public Void call()
 throws InterruptedException, BrokenBarrierException {
@@ -1030,6 +1031,9 @@ public class TestBlockManager {
   return null;
 }
   });
+  // signal that runBlockOp returned
+  doneLatch.countDown();
+  return null;
 }
   });
 
@@ -1074,7 +1078,7 @@ public class TestBlockManager {
   startBarrier.await(1, TimeUnit.SECONDS);
   assertTrue(endLatch.await(1, TimeUnit.SECONDS));
   assertEquals(0, bm.getBlockOpQueueLength());
-  assertTrue(blockingOp.isDone());
+  assertTrue(doneLatch.await(1, TimeUnit.SECONDS));
 } finally {
   cluster.shutdown();
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/6] hadoop git commit: HDFS-10724. Document caller context config keys. (Contributed by Mingliang Liu)

2016-08-15 Thread liuml07
HDFS-10724. Document caller context config keys. (Contributed by Mingliang Liu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4bcbef39
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4bcbef39
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4bcbef39

Branch: refs/heads/trunk
Commit: 4bcbef39f7ca07601092919a7f2bea531a2dfa07
Parents: 12ad63d
Author: Mingliang Liu 
Authored: Mon Aug 15 20:20:33 2016 -0700
Committer: Mingliang Liu 
Committed: Mon Aug 15 20:20:33 2016 -0700

--
 .../src/main/resources/core-default.xml | 28 
 1 file changed, 28 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4bcbef39/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 992b464..f9c3f72 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -2325,4 +2325,32 @@
 org.apache.hadoop.fs.adl.Adl
   
 
+  
+hadoop.caller.context.enabled
+false
+When the feature is enabled, additional fields are written 
into
+  name-node audit log records for auditing coarse granularity operations.
+
+  
+  
+hadoop.caller.context.max.size
+128
+The maximum bytes a caller context string can have. If the
+  passed caller context is longer than this maximum bytes, client will
+  truncate it before sending to server. Note that the server may have a
+  different maximum size, and will truncate the caller context to the
+  maximum size it allows.
+
+  
+  
+hadoop.caller.context.signature.max.size
+40
+
+  The caller's signature (optional) is for offline validation. If the
+  signature exceeds the maximum allowed bytes in server, the caller context
+  will be abandoned, in which case the caller context will not be recorded
+  in audit logs.
+
+  
+
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[4/6] hadoop git commit: HDFS-10747. o.a.h.hdfs.tools.DebugAdmin usage message is misleading. (Contributed by Mingliang Liu)

2016-08-15 Thread liuml07
HDFS-10747. o.a.h.hdfs.tools.DebugAdmin usage message is misleading. 
(Contributed by Mingliang Liu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ef55fe17
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ef55fe17
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ef55fe17

Branch: refs/heads/trunk
Commit: ef55fe171691446d38e6a14e92c1fd4d3d0c64c5
Parents: 382d615
Author: Mingliang Liu 
Authored: Mon Aug 15 20:23:47 2016 -0700
Committer: Mingliang Liu 
Committed: Mon Aug 15 20:23:47 2016 -0700

--
 .../src/main/java/org/apache/hadoop/hdfs/tools/DebugAdmin.java   | 4 ++--
 .../hadoop-hdfs/src/site/markdown/HDFSCommands.md| 4 ++--
 2 files changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef55fe17/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DebugAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DebugAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DebugAdmin.java
index d179a5c..a2b91ab 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DebugAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DebugAdmin.java
@@ -86,7 +86,7 @@ public class DebugAdmin extends Configured implements Tool {
   private class VerifyBlockChecksumCommand extends DebugCommand {
 VerifyBlockChecksumCommand() {
   super("verify",
-"verify [-meta ] [-block ]",
+"verify -meta  [-block ]",
 "  Verify HDFS metadata and block files.  If a block file is specified, we\n" +
 "  will verify that the checksums in the metadata file match the block\n" +
 "  file.");
@@ -200,7 +200,7 @@ public class DebugAdmin extends Configured implements Tool {
   private class RecoverLeaseCommand extends DebugCommand {
 RecoverLeaseCommand() {
   super("recoverLease",
-"recoverLease [-path ] [-retries ]",
+"recoverLease -path  [-retries ]",
 "  Recover the lease on the specified path.  The path must reside on an\n" +
 "  HDFS filesystem.  The default number of retries is 1.");
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef55fe17/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index cbc293f..22886d3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -604,7 +604,7 @@ Useful commands to help administrators debug HDFS issues, 
like validating block
 
 ### `verify`
 
-Usage: `hdfs debug verify [-meta ] [-block ]`
+Usage: `hdfs debug verify -meta  [-block ]`
 
 | COMMAND\_OPTION | Description |
 |: |: |
@@ -615,7 +615,7 @@ Verify HDFS metadata and block files. If a block file is 
specified, we will veri
 
 ### `recoverLease`
 
-Usage: `hdfs debug recoverLease [-path ] [-retries ]`
+Usage: `hdfs debug recoverLease -path  [-retries ]`
 
 | COMMAND\_OPTION | Description |
 |: |: |


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-10559. DiskBalancer: Use SHA1 for Plan ID. Contributed by Xiaobing Zhou.

2016-08-15 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk 02abd131b -> 5628b36c0


HDFS-10559. DiskBalancer: Use SHA1 for Plan ID. Contributed by Xiaobing Zhou.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5628b36c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5628b36c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5628b36c

Branch: refs/heads/trunk
Commit: 5628b36c0872d58c9b25f23da3dab4eafad9bca3
Parents: 02abd13
Author: Anu Engineer 
Authored: Mon Aug 15 20:10:21 2016 -0700
Committer: Anu Engineer 
Committed: Mon Aug 15 20:10:21 2016 -0700

--
 .../hadoop/hdfs/protocol/ClientDatanodeProtocol.java  |  2 +-
 .../ClientDatanodeProtocolTranslatorPB.java   |  2 +-
 .../src/main/proto/ClientDatanodeProtocol.proto   |  2 +-
 .../hadoop/hdfs/server/datanode/DiskBalancer.java | 14 +++---
 .../server/diskbalancer/command/CancelCommand.java|  2 +-
 .../server/diskbalancer/command/ExecuteCommand.java   |  2 +-
 .../hdfs/server/diskbalancer/TestDiskBalancer.java|  4 ++--
 .../hdfs/server/diskbalancer/TestDiskBalancerRPC.java |  2 +-
 .../diskbalancer/TestDiskBalancerWithMockMover.java   |  8 
 9 files changed, 19 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5628b36c/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
index 477d308..10041f5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
@@ -175,7 +175,7 @@ public interface ClientDatanodeProtocol {
   /**
* Cancel an executing plan.
*
-   * @param planID - A SHA512 hash of the plan string.
+   * @param planID - A SHA-1 hash of the plan string.
*/
   void cancelDiskBalancePlan(String planID) throws IOException;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5628b36c/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
index 045ccd5..0cf006c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
@@ -369,7 +369,7 @@ public class ClientDatanodeProtocolTranslatorPB implements
   /**
* Cancels an executing disk balancer plan.
*
-   * @param planID - A SHA512 hash of the plan string.
+   * @param planID - A SHA-1 hash of the plan string.
* @throws IOException on error
*/
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5628b36c/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientDatanodeProtocol.proto
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientDatanodeProtocol.proto
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientDatanodeProtocol.proto
index 11d04af..e4333cd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientDatanodeProtocol.proto
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientDatanodeProtocol.proto
@@ -154,7 +154,7 @@ message GetBalancerBandwidthResponseProto {
  * balancer plan to a data node.
  */
 message SubmitDiskBalancerPlanRequestProto {
-  required string planID = 1; // A hash of the plan like SHA512
+  required string planID = 1; // A hash of the plan like SHA-1
   required string plan = 2;   // Plan file data in Json format
   optional uint64 planVersion = 3;// Plan version number
   optional bool ignoreDateCheck = 4;  // Ignore date checks on this plan.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5628b36c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java

hadoop git commit: HDFS-10567. Improve plan command help message. Contributed by Xiaobing Zhou.

2016-08-15 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk 9daa9979a -> 02abd131b


HDFS-10567. Improve plan command help message. Contributed by Xiaobing Zhou.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/02abd131
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/02abd131
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/02abd131

Branch: refs/heads/trunk
Commit: 02abd131b857a89d9fc21507296603120bb50810
Parents: 9daa997
Author: Anu Engineer 
Authored: Mon Aug 15 19:54:06 2016 -0700
Committer: Anu Engineer 
Committed: Mon Aug 15 19:58:57 2016 -0700

--
 .../apache/hadoop/hdfs/tools/DiskBalancer.java  | 29 
 1 file changed, 18 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/02abd131/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancer.java
index 70912d0..1ed2fdc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancer.java
@@ -266,33 +266,40 @@ public class DiskBalancer extends Configured implements 
Tool {
   private void addPlanCommands(Options opt) {
 
 Option plan = OptionBuilder.withLongOpt(PLAN)
-.withDescription("creates a plan for datanode.")
+.withDescription("Hostname, IP address or UUID of datanode " +
+"for which a plan is created.")
 .hasArg()
 .create();
 getPlanOptions().addOption(plan);
 opt.addOption(plan);
 
 
-Option outFile = OptionBuilder.withLongOpt(OUTFILE)
-.hasArg()
-.withDescription("File to write output to, if not specified " +
-"defaults will be used.")
+Option outFile = OptionBuilder.withLongOpt(OUTFILE).hasArg()
+.withDescription(
+"Local path of file to write output to, if not specified "
++ "defaults will be used.")
 .create();
 getPlanOptions().addOption(outFile);
 opt.addOption(outFile);
 
-Option bandwidth = OptionBuilder.withLongOpt(BANDWIDTH)
-.hasArg()
-.withDescription("Maximum disk bandwidth to be consumed by " +
-"diskBalancer. e.g. 10")
+Option bandwidth = OptionBuilder.withLongOpt(BANDWIDTH).hasArg()
+.withDescription(
+"Maximum disk bandwidth (MB/s) in integer to be consumed by "
++ "diskBalancer. e.g. 10 MB/s.")
 .create();
 getPlanOptions().addOption(bandwidth);
 opt.addOption(bandwidth);
 
 Option threshold = OptionBuilder.withLongOpt(THRESHOLD)
 .hasArg()
-.withDescription("Percentage skew that we" +
-"tolerate before diskbalancer starts working e.g. 10")
+.withDescription("Percentage of data skew that is tolerated before"
++ " disk balancer starts working. For example, if"
++ " total data on a 2 disk node is 100 GB then disk"
++ " balancer calculates the expected value on each disk,"
++ " which is 50 GB. If the tolerance is 10% then data"
++ " on a single disk needs to be more than 60 GB"
++ " (50 GB + 10% tolerance value) for Disk balancer to"
++ " balance the disks.")
 .create();
 getPlanOptions().addOption(threshold);
 opt.addOption(threshold);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



svn commit: r1756465 - /hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml

2016-08-15 Thread liuml07
Author: liuml07
Date: Tue Aug 16 02:56:13 2016
New Revision: 1756465

URL: http://svn.apache.org/viewvc?rev=1756465=rev
Log:
Add Mingliang Liu (liuml07) to committer list (not PMC list)

Modified:
hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml

Modified: hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml?rev=1756465=1756464=1756465=diff
==
--- hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml 
(original)
+++ hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml Tue 
Aug 16 02:56:13 2016
@@ -303,14 +303,6 @@
  -8
 
 
-  
-liuml07
-http://people.apache.org/~liuml07;>Mingliang Liu
-Hortonworks
-HDFS
--8
-  
-
 
   llu
   http://people.apache.org/~llu;>Luke Lu
@@ -1035,6 +1027,14 @@
 -8

 
+  
+liuml07
+http://people.apache.org/~liuml07;>Mingliang Liu
+Hortonworks
+HDFS
+-8
+  
+

  lohit
  http://people.apache.org/~lohit;>Lohit 
Vijayarenu



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



svn commit: r1756464 - /hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml

2016-08-15 Thread liuml07
Author: liuml07
Date: Tue Aug 16 02:47:11 2016
New Revision: 1756464

URL: http://svn.apache.org/viewvc?rev=1756464=rev
Log:
Add Mingliang Liu (liuml07) to committer list

Modified:
hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml

Modified: hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml?rev=1756464=1756463=1756464=diff
==
--- hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml 
(original)
+++ hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml Tue 
Aug 16 02:47:11 2016
@@ -302,6 +302,15 @@
  
  -8
 
+
+  
+liuml07
+http://people.apache.org/~liuml07;>Mingliang Liu
+Hortonworks
+HDFS
+-8
+  
+
 
   llu
   http://people.apache.org/~llu;>Luke Lu



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-13437. KMS should reload whitelist and default key ACLs when hot-reloading. Contributed by Xiao Chen.

2016-08-15 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 e78db7d2a -> 74156ee20


HADOOP-13437. KMS should reload whitelist and default key ACLs when 
hot-reloading. Contributed by Xiao Chen.

(cherry picked from commit 9daa9979a1f92fb3230361c10ddfcc1633795c0e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/74156ee2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/74156ee2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/74156ee2

Branch: refs/heads/branch-2
Commit: 74156ee20a9334a436cc6aaaec464cb959adde8f
Parents: e78db7d
Author: Xiao Chen 
Authored: Mon Aug 15 18:13:58 2016 -0700
Committer: Xiao Chen 
Committed: Mon Aug 15 18:18:31 2016 -0700

--
 .../hadoop/crypto/key/kms/server/KMSACLs.java   |  75 
 .../crypto/key/kms/server/TestKMSACLs.java  | 174 ++-
 2 files changed, 207 insertions(+), 42 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/74156ee2/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
index 5b67950..c36fcf8 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
@@ -34,7 +34,6 @@ import java.util.Map;
 import java.util.concurrent.Executors;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.TimeUnit;
-import java.util.regex.Pattern;
 
 import com.google.common.annotations.VisibleForTesting;
 
@@ -74,10 +73,10 @@ public class KMSACLs implements Runnable, KeyACLs {
   private volatile Map blacklistedAcls;
   @VisibleForTesting
   volatile Map> keyAcls;
-  private final Map defaultKeyAcls =
-  new HashMap();
-  private final Map whitelistKeyAcls =
-  new HashMap();
+  @VisibleForTesting
+  volatile Map defaultKeyAcls = new HashMap<>();
+  @VisibleForTesting
+  volatile Map whitelistKeyAcls = new 
HashMap<>();
   private ScheduledExecutorService executorService;
   private long lastReload;
 
@@ -111,7 +110,8 @@ public class KMSACLs implements Runnable, KeyACLs {
 blacklistedAcls = tempBlacklist;
   }
 
-  private void setKeyACLs(Configuration conf) {
+  @VisibleForTesting
+  void setKeyACLs(Configuration conf) {
 Map> tempKeyAcls =
 new HashMap>();
 Map allKeyACLS =
@@ -148,38 +148,43 @@ public class KMSACLs implements Runnable, KeyACLs {
 }
   }
 }
-
 keyAcls = tempKeyAcls;
+
+final Map tempDefaults = new HashMap<>();
+final Map tempWhitelists = new HashMap<>();
 for (KeyOpType keyOp : KeyOpType.values()) {
-  if (!defaultKeyAcls.containsKey(keyOp)) {
-String confKey = KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + keyOp;
-String aclStr = conf.get(confKey);
-if (aclStr != null) {
-  if (keyOp == KeyOpType.ALL) {
-// Ignore All operation for default key acl
-LOG.warn("Should not configure default key ACL for KEY_OP '{}'", 
keyOp);
-  } else {
-if (aclStr.equals("*")) {
-  LOG.info("Default Key ACL for KEY_OP '{}' is set to '*'", keyOp);
-}
-defaultKeyAcls.put(keyOp, new AccessControlList(aclStr));
-  }
-}
-  }
-  if (!whitelistKeyAcls.containsKey(keyOp)) {
-String confKey = KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + keyOp;
-String aclStr = conf.get(confKey);
-if (aclStr != null) {
-  if (keyOp == KeyOpType.ALL) {
-// Ignore All operation for whitelist key acl
-LOG.warn("Should not configure whitelist key ACL for KEY_OP '{}'", 
keyOp);
-  } else {
-if (aclStr.equals("*")) {
-  LOG.info("Whitelist Key ACL for KEY_OP '{}' is set to '*'", 
keyOp);
-}
-whitelistKeyAcls.put(keyOp, new AccessControlList(aclStr));
-  }
+  parseAclsWithPrefix(conf, KMSConfiguration.DEFAULT_KEY_ACL_PREFIX,
+  keyOp, tempDefaults);
+  

hadoop git commit: HADOOP-13437. KMS should reload whitelist and default key ACLs when hot-reloading. Contributed by Xiao Chen.

2016-08-15 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk 864f878d5 -> 9daa9979a


HADOOP-13437. KMS should reload whitelist and default key ACLs when 
hot-reloading. Contributed by Xiao Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9daa9979
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9daa9979
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9daa9979

Branch: refs/heads/trunk
Commit: 9daa9979a1f92fb3230361c10ddfcc1633795c0e
Parents: 864f878
Author: Xiao Chen 
Authored: Mon Aug 15 18:13:58 2016 -0700
Committer: Xiao Chen 
Committed: Mon Aug 15 18:14:45 2016 -0700

--
 .../hadoop/crypto/key/kms/server/KMSACLs.java   |  75 
 .../crypto/key/kms/server/TestKMSACLs.java  | 174 ++-
 2 files changed, 207 insertions(+), 42 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9daa9979/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
index 5b67950..c36fcf8 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
@@ -34,7 +34,6 @@ import java.util.Map;
 import java.util.concurrent.Executors;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.TimeUnit;
-import java.util.regex.Pattern;
 
 import com.google.common.annotations.VisibleForTesting;
 
@@ -74,10 +73,10 @@ public class KMSACLs implements Runnable, KeyACLs {
   private volatile Map blacklistedAcls;
   @VisibleForTesting
   volatile Map> keyAcls;
-  private final Map defaultKeyAcls =
-  new HashMap();
-  private final Map whitelistKeyAcls =
-  new HashMap();
+  @VisibleForTesting
+  volatile Map defaultKeyAcls = new HashMap<>();
+  @VisibleForTesting
+  volatile Map whitelistKeyAcls = new 
HashMap<>();
   private ScheduledExecutorService executorService;
   private long lastReload;
 
@@ -111,7 +110,8 @@ public class KMSACLs implements Runnable, KeyACLs {
 blacklistedAcls = tempBlacklist;
   }
 
-  private void setKeyACLs(Configuration conf) {
+  @VisibleForTesting
+  void setKeyACLs(Configuration conf) {
 Map> tempKeyAcls =
 new HashMap>();
 Map allKeyACLS =
@@ -148,38 +148,43 @@ public class KMSACLs implements Runnable, KeyACLs {
 }
   }
 }
-
 keyAcls = tempKeyAcls;
+
+final Map tempDefaults = new HashMap<>();
+final Map tempWhitelists = new HashMap<>();
 for (KeyOpType keyOp : KeyOpType.values()) {
-  if (!defaultKeyAcls.containsKey(keyOp)) {
-String confKey = KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + keyOp;
-String aclStr = conf.get(confKey);
-if (aclStr != null) {
-  if (keyOp == KeyOpType.ALL) {
-// Ignore All operation for default key acl
-LOG.warn("Should not configure default key ACL for KEY_OP '{}'", 
keyOp);
-  } else {
-if (aclStr.equals("*")) {
-  LOG.info("Default Key ACL for KEY_OP '{}' is set to '*'", keyOp);
-}
-defaultKeyAcls.put(keyOp, new AccessControlList(aclStr));
-  }
-}
-  }
-  if (!whitelistKeyAcls.containsKey(keyOp)) {
-String confKey = KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + keyOp;
-String aclStr = conf.get(confKey);
-if (aclStr != null) {
-  if (keyOp == KeyOpType.ALL) {
-// Ignore All operation for whitelist key acl
-LOG.warn("Should not configure whitelist key ACL for KEY_OP '{}'", 
keyOp);
-  } else {
-if (aclStr.equals("*")) {
-  LOG.info("Whitelist Key ACL for KEY_OP '{}' is set to '*'", 
keyOp);
-}
-whitelistKeyAcls.put(keyOp, new AccessControlList(aclStr));
-  }
+  parseAclsWithPrefix(conf, KMSConfiguration.DEFAULT_KEY_ACL_PREFIX,
+  keyOp, tempDefaults);
+  parseAclsWithPrefix(conf, KMSConfiguration.WHITELIST_KEY_ACL_PREFIX,
+  keyOp, 

hadoop git commit: HDFS-10763. Open files can leak permanently due to inconsistent lease update. Contributed by Kihwal Lee.

2016-08-15 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 7519956c3 -> 71d0e4fca


HDFS-10763. Open files can leak permanently due to inconsistent lease update. 
Contributed by Kihwal Lee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/71d0e4fc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/71d0e4fc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/71d0e4fc

Branch: refs/heads/branch-2.7
Commit: 71d0e4fca79d4305fe35e44b614703d3b9883017
Parents: 7519956
Author: Kihwal Lee 
Authored: Mon Aug 15 17:40:07 2016 -0500
Committer: Kihwal Lee 
Committed: Mon Aug 15 17:40:07 2016 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../server/namenode/FSImageFormatPBINode.java   | 12 --
 .../hdfs/server/namenode/FSNamesystem.java  |  3 +-
 .../hdfs/server/namenode/TestLeaseManager.java  | 43 
 4 files changed, 57 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/71d0e4fc/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 0ff7bba..a4fcf86 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -255,6 +255,9 @@ Release 2.7.3 - UNRELEASED
 
 HDFS-9696. Garbage snapshot records linger forever. (kihwal)
 
+HDFS-10763. Open files can leak permanently due to inconsistent lease
+update. (kihwal)
+
 Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71d0e4fc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
index dfd150a..7206b2f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
@@ -220,11 +220,13 @@ public final class FSImageFormatPBINode {
 private final FSDirectory dir;
 private final FSNamesystem fsn;
 private final FSImageFormatProtobuf.Loader parent;
+private final List ucFiles;
 
 Loader(FSNamesystem fsn, final FSImageFormatProtobuf.Loader parent) {
   this.fsn = fsn;
   this.dir = fsn.dir;
   this.parent = parent;
+  this.ucFiles = new ArrayList();
 }
 
 void loadINodeDirectorySection(InputStream in) throws IOException {
@@ -268,17 +270,20 @@ public final class FSImageFormatPBINode {
  * Load the under-construction files section, and update the lease map
  */
 void loadFilesUnderConstructionSection(InputStream in) throws IOException {
+ // This section is consumed, but not actually used for restoring leases.
   while (true) {
 FileUnderConstructionEntry entry = FileUnderConstructionEntry
 .parseDelimitedFrom(in);
 if (entry == null) {
   break;
 }
-// update the lease manager
-INodeFile file = dir.getInode(entry.getInodeId()).asFile();
+  }
+
+  // Add a lease for each and every file under construction.
+  for (INodeFile file : ucFiles) {
 FileUnderConstructionFeature uc = 
file.getFileUnderConstructionFeature();
 Preconditions.checkState(uc != null); // file must be 
under-construction
-fsn.leaseManager.addLease(uc.getClientName(), entry.getFullPath());
+fsn.leaseManager.addLease(uc.getClientName(), file.getFullPathName());
   }
 }
 
@@ -346,6 +351,7 @@ public final class FSImageFormatPBINode {
 
   // under-construction information
   if (f.hasFileUC()) {
+ucFiles.add(file);
 INodeSection.FileUnderConstructionFeature uc = f.getFileUC();
 file.toUnderConstruction(uc.getClientName(), uc.getClientMachine());
 if (blocks.length > 0) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71d0e4fc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index bef296a..3887ac3 100644
--- 

hadoop git commit: HDFS-10763. Open files can leak permanently due to inconsistent lease update. Contributed by Kihwal Lee.

2016-08-15 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 6fed7b9c4 -> 1a8280edd


HDFS-10763. Open files can leak permanently due to inconsistent lease update. 
Contributed by Kihwal Lee.

(cherry picked from commit 864f878d5912c82f3204f1582cfb7eb7c9f1a1da)
(cherry picked from commit e78db7d2a430983807750666fb72ebd5c97ce867)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1a8280ed
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1a8280ed
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1a8280ed

Branch: refs/heads/branch-2.8
Commit: 1a8280edde5cff2393336d0a1cde4a532153f50a
Parents: 6fed7b9
Author: Kihwal Lee 
Authored: Mon Aug 15 17:37:39 2016 -0500
Committer: Kihwal Lee 
Committed: Mon Aug 15 17:37:39 2016 -0500

--
 .../server/namenode/FSImageFormatPBINode.java   | 10 ++---
 .../hdfs/server/namenode/FSNamesystem.java  |  3 +-
 .../hdfs/server/namenode/TestLeaseManager.java  | 47 
 3 files changed, 53 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a8280ed/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
index 3b01f6b..80879b0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
@@ -278,18 +278,14 @@ public final class FSImageFormatPBINode {
  * Load the under-construction files section, and update the lease map
  */
 void loadFilesUnderConstructionSection(InputStream in) throws IOException {
+  // Leases are added when the inode section is loaded. This section is
+  // still read in for compatibility reasons.
   while (true) {
 FileUnderConstructionEntry entry = FileUnderConstructionEntry
 .parseDelimitedFrom(in);
 if (entry == null) {
   break;
 }
-// update the lease manager
-INodeFile file = dir.getInode(entry.getInodeId()).asFile();
-FileUnderConstructionFeature uc = 
file.getFileUnderConstructionFeature();
-Preconditions.checkState(uc != null); // file must be 
under-construction
-fsn.leaseManager.addLease(uc.getClientName(),
-entry.getInodeId());
   }
 }
 
@@ -360,6 +356,8 @@ public final class FSImageFormatPBINode {
   if (f.hasFileUC()) {
 INodeSection.FileUnderConstructionFeature uc = f.getFileUC();
 file.toUnderConstruction(uc.getClientName(), uc.getClientMachine());
+// update the lease manager
+fsn.leaseManager.addLease(uc.getClientName(), file.getId());
 if (blocks.length > 0) {
   BlockInfo lastBlk = file.getLastBlock();
   lastBlk.convertToBlockUnderConstruction(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a8280ed/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 2948ece..6cba82e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -3339,7 +3339,6 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   throw new IOException("Cannot finalize file " + src
   + " because it is not under construction");
 }
-leaseManager.removeLease(uc.getClientName(), pendingFile);
 
 pendingFile.recordModification(latestSnapshot);
 
@@ -3350,6 +3349,8 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 allowCommittedBlock? numCommittedAllowed: 0,
 blockManager.getMinReplication());
 
+leaseManager.removeLease(uc.getClientName(), pendingFile);
+
 waitForLoadingFSImage();
 // close file and persist block allocations for this file
 closeFile(src, pendingFile);


hadoop git commit: HDFS-10763. Open files can leak permanently due to inconsistent lease update. Contributed by Kihwal Lee.

2016-08-15 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 1ef8d7a63 -> e78db7d2a


HDFS-10763. Open files can leak permanently due to inconsistent lease update. 
Contributed by Kihwal Lee.

(cherry picked from commit 864f878d5912c82f3204f1582cfb7eb7c9f1a1da)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e78db7d2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e78db7d2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e78db7d2

Branch: refs/heads/branch-2
Commit: e78db7d2a430983807750666fb72ebd5c97ce867
Parents: 1ef8d7a
Author: Kihwal Lee 
Authored: Mon Aug 15 17:33:16 2016 -0500
Committer: Kihwal Lee 
Committed: Mon Aug 15 17:33:16 2016 -0500

--
 .../server/namenode/FSImageFormatPBINode.java   | 10 ++---
 .../hdfs/server/namenode/FSNamesystem.java  |  3 +-
 .../hdfs/server/namenode/TestLeaseManager.java  | 47 
 3 files changed, 53 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e78db7d2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
index 3b01f6b..80879b0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
@@ -278,18 +278,14 @@ public final class FSImageFormatPBINode {
  * Load the under-construction files section, and update the lease map
  */
 void loadFilesUnderConstructionSection(InputStream in) throws IOException {
+  // Leases are added when the inode section is loaded. This section is
+  // still read in for compatibility reasons.
   while (true) {
 FileUnderConstructionEntry entry = FileUnderConstructionEntry
 .parseDelimitedFrom(in);
 if (entry == null) {
   break;
 }
-// update the lease manager
-INodeFile file = dir.getInode(entry.getInodeId()).asFile();
-FileUnderConstructionFeature uc = 
file.getFileUnderConstructionFeature();
-Preconditions.checkState(uc != null); // file must be 
under-construction
-fsn.leaseManager.addLease(uc.getClientName(),
-entry.getInodeId());
   }
 }
 
@@ -360,6 +356,8 @@ public final class FSImageFormatPBINode {
   if (f.hasFileUC()) {
 INodeSection.FileUnderConstructionFeature uc = f.getFileUC();
 file.toUnderConstruction(uc.getClientName(), uc.getClientMachine());
+// update the lease manager
+fsn.leaseManager.addLease(uc.getClientName(), file.getId());
 if (blocks.length > 0) {
   BlockInfo lastBlk = file.getLastBlock();
   lastBlk.convertToBlockUnderConstruction(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e78db7d2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 9b4be65..2d9a069 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -3275,7 +3275,6 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   throw new IOException("Cannot finalize file " + src
   + " because it is not under construction");
 }
-leaseManager.removeLease(uc.getClientName(), pendingFile);
 
 pendingFile.recordModification(latestSnapshot);
 
@@ -3286,6 +3285,8 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 allowCommittedBlock? numCommittedAllowed: 0,
 blockManager.getMinReplication());
 
+leaseManager.removeLease(uc.getClientName(), pendingFile);
+
 // close file and persist block allocations for this file
 closeFile(src, pendingFile);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e78db7d2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java
--
diff --git 

hadoop git commit: HDFS-10763. Open files can leak permanently due to inconsistent lease update. Contributed by Kihwal Lee.

2016-08-15 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 03dea65e0 -> 864f878d5


HDFS-10763. Open files can leak permanently due to inconsistent lease update. 
Contributed by Kihwal Lee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/864f878d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/864f878d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/864f878d

Branch: refs/heads/trunk
Commit: 864f878d5912c82f3204f1582cfb7eb7c9f1a1da
Parents: 03dea65
Author: Kihwal Lee 
Authored: Mon Aug 15 17:28:09 2016 -0500
Committer: Kihwal Lee 
Committed: Mon Aug 15 17:28:09 2016 -0500

--
 .../server/namenode/FSImageFormatPBINode.java   | 10 ++---
 .../hdfs/server/namenode/FSNamesystem.java  |  3 +-
 .../hdfs/server/namenode/TestLeaseManager.java  | 47 
 3 files changed, 53 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/864f878d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
index 1ecd947..1456ecf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
@@ -281,18 +281,14 @@ public final class FSImageFormatPBINode {
  * Load the under-construction files section, and update the lease map
  */
 void loadFilesUnderConstructionSection(InputStream in) throws IOException {
+  // Leases are added when the inode section is loaded. This section is
+  // still read in for compatibility reasons.
   while (true) {
 FileUnderConstructionEntry entry = FileUnderConstructionEntry
 .parseDelimitedFrom(in);
 if (entry == null) {
   break;
 }
-// update the lease manager
-INodeFile file = dir.getInode(entry.getInodeId()).asFile();
-FileUnderConstructionFeature uc = 
file.getFileUnderConstructionFeature();
-Preconditions.checkState(uc != null); // file must be 
under-construction
-fsn.leaseManager.addLease(uc.getClientName(),
-entry.getInodeId());
   }
 }
 
@@ -371,6 +367,8 @@ public final class FSImageFormatPBINode {
   if (f.hasFileUC()) {
 INodeSection.FileUnderConstructionFeature uc = f.getFileUC();
 file.toUnderConstruction(uc.getClientName(), uc.getClientMachine());
+// update the lease manager
+fsn.leaseManager.addLease(uc.getClientName(), file.getId());
 if (blocks.length > 0) {
   BlockInfo lastBlk = file.getLastBlock();
   // replace the last block of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/864f878d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index be084c5..0621a77 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -3329,7 +3329,6 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   throw new IOException("Cannot finalize file " + src
   + " because it is not under construction");
 }
-leaseManager.removeLease(uc.getClientName(), pendingFile);
 
 pendingFile.recordModification(latestSnapshot);
 
@@ -3340,6 +3339,8 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 allowCommittedBlock? numCommittedAllowed: 0,
 blockManager.getMinReplication());
 
+leaseManager.removeLease(uc.getClientName(), pendingFile);
+
 // close file and persist block allocations for this file
 closeFile(src, pendingFile);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/864f878d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java
--
diff --git 

hadoop git commit: HDFS-10744. Internally optimize path component resolution. Contributed by Daryn Sharp.

2016-08-15 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 5c2f2ae7b -> 1ef8d7a63


HDFS-10744. Internally optimize path component resolution. Contributed by Daryn 
Sharp.

(cherry picked from commit 03dea65e0b17ca2f9460bb6110f6ab3a321b8bf2)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1ef8d7a6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1ef8d7a6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1ef8d7a6

Branch: refs/heads/branch-2
Commit: 1ef8d7a638df5150b8426755af034839d5f88ca2
Parents: 5c2f2ae
Author: Kihwal Lee 
Authored: Mon Aug 15 17:01:40 2016 -0500
Committer: Kihwal Lee 
Committed: Mon Aug 15 17:01:40 2016 -0500

--
 .../hadoop/hdfs/server/namenode/FSDirAclOp.java |  18 ++--
 .../hdfs/server/namenode/FSDirAppendOp.java |   4 +-
 .../hdfs/server/namenode/FSDirAttrOp.java   |  22 ++--
 .../hdfs/server/namenode/FSDirDeleteOp.java |   5 +-
 .../server/namenode/FSDirEncryptionZoneOp.java  |   8 +-
 .../hdfs/server/namenode/FSDirMkdirOp.java  |   3 +-
 .../hdfs/server/namenode/FSDirRenameOp.java |  12 +--
 .../server/namenode/FSDirStatAndListingOp.java  |  27 ++---
 .../hdfs/server/namenode/FSDirSymlinkOp.java|   3 +-
 .../hdfs/server/namenode/FSDirTruncateOp.java   |   4 +-
 .../hdfs/server/namenode/FSDirWriteFileOp.java  |  15 +--
 .../hdfs/server/namenode/FSDirXAttrOp.java  |  13 +--
 .../hdfs/server/namenode/FSDirectory.java   | 100 +--
 .../hdfs/server/namenode/FSNamesystem.java  |  16 +--
 .../hdfs/server/namenode/TestINodeFile.java |  51 --
 15 files changed, 115 insertions(+), 186 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ef8d7a6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
index 0c572b5..296bed2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
@@ -39,8 +39,7 @@ class FSDirAclOp {
 String src = srcArg;
 checkAclsConfigFlag(fsd);
 FSPermissionChecker pc = fsd.getPermissionChecker();
-byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
-src = fsd.resolvePath(pc, src, pathComponents);
+src = fsd.resolvePath(pc, src);
 INodesInPath iip;
 fsd.writeLock();
 try {
@@ -65,8 +64,7 @@ class FSDirAclOp {
 String src = srcArg;
 checkAclsConfigFlag(fsd);
 FSPermissionChecker pc = fsd.getPermissionChecker();
-byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
-src = fsd.resolvePath(pc, src, pathComponents);
+src = fsd.resolvePath(pc, src);
 INodesInPath iip;
 fsd.writeLock();
 try {
@@ -90,8 +88,7 @@ class FSDirAclOp {
 String src = srcArg;
 checkAclsConfigFlag(fsd);
 FSPermissionChecker pc = fsd.getPermissionChecker();
-byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
-src = fsd.resolvePath(pc, src, pathComponents);
+src = fsd.resolvePath(pc, src);
 INodesInPath iip;
 fsd.writeLock();
 try {
@@ -115,8 +112,7 @@ class FSDirAclOp {
 String src = srcArg;
 checkAclsConfigFlag(fsd);
 FSPermissionChecker pc = fsd.getPermissionChecker();
-byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
-src = fsd.resolvePath(pc, src, pathComponents);
+src = fsd.resolvePath(pc, src);
 INodesInPath iip;
 fsd.writeLock();
 try {
@@ -135,9 +131,8 @@ class FSDirAclOp {
   throws IOException {
 String src = srcArg;
 checkAclsConfigFlag(fsd);
-byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
 FSPermissionChecker pc = fsd.getPermissionChecker();
-src = fsd.resolvePath(pc, src, pathComponents);
+src = fsd.resolvePath(pc, src);
 INodesInPath iip;
 fsd.writeLock();
 try {
@@ -155,8 +150,7 @@ class FSDirAclOp {
   FSDirectory fsd, String src) throws IOException {
 checkAclsConfigFlag(fsd);
 FSPermissionChecker pc = fsd.getPermissionChecker();
-byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
-src = fsd.resolvePath(pc, src, pathComponents);
+src = fsd.resolvePath(pc, src);
 

hadoop git commit: YARN-5519. Add SubClusterId in AddApplicationHomeSubClusterResponse for Router Failover. (Ellen Hui via Subru)

2016-08-15 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2915 b689f557b -> 0048ce1ba


YARN-5519. Add SubClusterId in AddApplicationHomeSubClusterResponse for Router 
Failover. (Ellen Hui via Subru)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0048ce1b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0048ce1b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0048ce1b

Branch: refs/heads/YARN-2915
Commit: 0048ce1ba643bbb0e4ccaf63b5cdac80b275adb9
Parents: b689f55
Author: Subru Krishnan 
Authored: Mon Aug 15 14:47:02 2016 -0700
Committer: Subru Krishnan 
Committed: Mon Aug 15 14:47:02 2016 -0700

--
 ...ederationApplicationHomeSubClusterStore.java | 21 +++---
 .../store/impl/MemoryFederationStateStore.java  | 22 +++---
 .../AddApplicationHomeSubClusterResponse.java   | 29 ++--
 ...ApplicationHomeSubClusterResponsePBImpl.java | 39 +++
 .../proto/yarn_server_federation_protos.proto   |  1 +
 .../impl/FederationStateStoreBaseTest.java  | 71 +---
 6 files changed, 120 insertions(+), 63 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0048ce1b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationApplicationHomeSubClusterStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationApplicationHomeSubClusterStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationApplicationHomeSubClusterStore.java
index 22bb88a..ace2457 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationApplicationHomeSubClusterStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationApplicationHomeSubClusterStore.java
@@ -51,15 +51,20 @@ public interface FederationApplicationHomeSubClusterStore {
   /**
* Register the home {@code SubClusterId} of the newly submitted
* {@code ApplicationId}. Currently response is empty if the operation was
-   * successful, if not an exception reporting reason for a failure.
+   * successful, if not an exception reporting reason for a failure. If a
+   * mapping for the application already existed, the {@code SubClusterId} in
+   * this response will return the existing mapping which might be different
+   * from that in the {@code AddApplicationHomeSubClusterRequest}.
*
* @param request the request to register a new application with its home
*  sub-cluster
-   * @return empty on successful registration of the application in the
-   * StateStore, if not an exception reporting reason for a failure
+   * @return upon successful registration of the application in the StateStore,
+   * {@code AddApplicationHomeSubClusterRequest} containing the home
+   * sub-cluster of the application. Otherwise, an exception reporting
+   * reason for a failure
* @throws YarnException if the request is invalid/fails
*/
-  AddApplicationHomeSubClusterResponse addApplicationHomeSubClusterMap(
+  AddApplicationHomeSubClusterResponse addApplicationHomeSubCluster(
   AddApplicationHomeSubClusterRequest request) throws YarnException;
 
   /**
@@ -73,7 +78,7 @@ public interface FederationApplicationHomeSubClusterStore {
* not an exception reporting reason for a failure
* @throws YarnException if the request is invalid/fails
*/
-  UpdateApplicationHomeSubClusterResponse updateApplicationHomeSubClusterMap(
+  UpdateApplicationHomeSubClusterResponse updateApplicationHomeSubCluster(
   UpdateApplicationHomeSubClusterRequest request) throws YarnException;
 
   /**
@@ -85,7 +90,7 @@ public interface FederationApplicationHomeSubClusterStore {
* subcluster
* @throws YarnException if the request is invalid/fails
*/
-  GetApplicationHomeSubClusterResponse getApplicationHomeSubClusterMap(
+  GetApplicationHomeSubClusterResponse getApplicationHomeSubCluster(
   GetApplicationHomeSubClusterRequest request) throws YarnException;
 
   /**
@@ -96,7 +101,7 @@ public interface FederationApplicationHomeSubClusterStore {
* @return the mapping of all submitted application to it's home sub-cluster
* @throws YarnException if the request is invalid/fails
*/
-  GetApplicationsHomeSubClusterResponse getApplicationsHomeSubClusterMap(
+  

hadoop git commit: HDFS-10744. Internally optimize path component resolution. Contributed by Daryn Sharp.

2016-08-15 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk d714030b5 -> 03dea65e0


HDFS-10744. Internally optimize path component resolution. Contributed by Daryn 
Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/03dea65e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/03dea65e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/03dea65e

Branch: refs/heads/trunk
Commit: 03dea65e0b17ca2f9460bb6110f6ab3a321b8bf2
Parents: d714030
Author: Kihwal Lee 
Authored: Mon Aug 15 16:44:18 2016 -0500
Committer: Kihwal Lee 
Committed: Mon Aug 15 16:45:44 2016 -0500

--
 .../hadoop/hdfs/server/namenode/FSDirAclOp.java |  18 ++--
 .../hdfs/server/namenode/FSDirAppendOp.java |   4 +-
 .../hdfs/server/namenode/FSDirAttrOp.java   |  22 ++--
 .../hdfs/server/namenode/FSDirDeleteOp.java |   5 +-
 .../server/namenode/FSDirEncryptionZoneOp.java  |   8 +-
 .../server/namenode/FSDirErasureCodingOp.java   |   8 +-
 .../hdfs/server/namenode/FSDirMkdirOp.java  |   3 +-
 .../hdfs/server/namenode/FSDirRenameOp.java |  12 +--
 .../server/namenode/FSDirStatAndListingOp.java  |  27 ++---
 .../hdfs/server/namenode/FSDirSymlinkOp.java|   3 +-
 .../hdfs/server/namenode/FSDirTruncateOp.java   |   4 +-
 .../hdfs/server/namenode/FSDirWriteFileOp.java  |  15 +--
 .../hdfs/server/namenode/FSDirXAttrOp.java  |  13 +--
 .../hdfs/server/namenode/FSDirectory.java   | 100 +--
 .../hdfs/server/namenode/FSNamesystem.java  |  16 +--
 .../hdfs/server/namenode/TestINodeFile.java |  53 +-
 16 files changed, 118 insertions(+), 193 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/03dea65e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
index 0c572b5..296bed2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
@@ -39,8 +39,7 @@ class FSDirAclOp {
 String src = srcArg;
 checkAclsConfigFlag(fsd);
 FSPermissionChecker pc = fsd.getPermissionChecker();
-byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
-src = fsd.resolvePath(pc, src, pathComponents);
+src = fsd.resolvePath(pc, src);
 INodesInPath iip;
 fsd.writeLock();
 try {
@@ -65,8 +64,7 @@ class FSDirAclOp {
 String src = srcArg;
 checkAclsConfigFlag(fsd);
 FSPermissionChecker pc = fsd.getPermissionChecker();
-byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
-src = fsd.resolvePath(pc, src, pathComponents);
+src = fsd.resolvePath(pc, src);
 INodesInPath iip;
 fsd.writeLock();
 try {
@@ -90,8 +88,7 @@ class FSDirAclOp {
 String src = srcArg;
 checkAclsConfigFlag(fsd);
 FSPermissionChecker pc = fsd.getPermissionChecker();
-byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
-src = fsd.resolvePath(pc, src, pathComponents);
+src = fsd.resolvePath(pc, src);
 INodesInPath iip;
 fsd.writeLock();
 try {
@@ -115,8 +112,7 @@ class FSDirAclOp {
 String src = srcArg;
 checkAclsConfigFlag(fsd);
 FSPermissionChecker pc = fsd.getPermissionChecker();
-byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
-src = fsd.resolvePath(pc, src, pathComponents);
+src = fsd.resolvePath(pc, src);
 INodesInPath iip;
 fsd.writeLock();
 try {
@@ -135,9 +131,8 @@ class FSDirAclOp {
   throws IOException {
 String src = srcArg;
 checkAclsConfigFlag(fsd);
-byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
 FSPermissionChecker pc = fsd.getPermissionChecker();
-src = fsd.resolvePath(pc, src, pathComponents);
+src = fsd.resolvePath(pc, src);
 INodesInPath iip;
 fsd.writeLock();
 try {
@@ -155,8 +150,7 @@ class FSDirAclOp {
   FSDirectory fsd, String src) throws IOException {
 checkAclsConfigFlag(fsd);
 FSPermissionChecker pc = fsd.getPermissionChecker();
-byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
-src = fsd.resolvePath(pc, src, pathComponents);
+src = fsd.resolvePath(pc, src);
 String srcs = FSDirectory.normalizePath(src);
 fsd.readLock();
 try {


hadoop git commit: HADOOP-13333. testConf.xml ls comparators in wrong order (Vrushali C via Varun Saxena)

2016-08-15 Thread varunsaxena
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 74831973c -> 6fed7b9c4


HADOOP-1. testConf.xml ls comparators in wrong order (Vrushali C via Varun 
Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6fed7b9c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6fed7b9c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6fed7b9c

Branch: refs/heads/branch-2.8
Commit: 6fed7b9c457e3f19404b435034a1dd869b266855
Parents: 7483197
Author: Varun Saxena 
Authored: Tue Aug 16 03:14:24 2016 +0530
Committer: Varun Saxena 
Committed: Tue Aug 16 03:14:24 2016 +0530

--
 .../hadoop-common/src/test/resources/testConf.xml| 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6fed7b9c/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml 
b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
index dbbe401..d497dc9 100644
--- a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
+++ b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
@@ -106,11 +106,11 @@
 
 
   RegexpComparator
-  ^\s*-q\s+Print \? instead of non-printable 
characters\.( )*
+  ^\s*rather than a number of bytes\.( 
)*
 
 
   RegexpComparator
-  ^\s*rather than a number of bytes\.( 
)*
+  ^\s*-q\s+Print \? instead of non-printable 
characters\.( )*
 
 
   RegexpComparator


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-13333. testConf.xml ls comparators in wrong order (Vrushali C via Varun Saxena)

2016-08-15 Thread varunsaxena
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 354afbb9d -> 5c2f2ae7b


HADOOP-1. testConf.xml ls comparators in wrong order (Vrushali C via Varun 
Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5c2f2ae7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5c2f2ae7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5c2f2ae7

Branch: refs/heads/branch-2
Commit: 5c2f2ae7bc55a029e8a2d160d4645cac27154fbd
Parents: 354afbb
Author: Varun Saxena 
Authored: Tue Aug 16 03:05:16 2016 +0530
Committer: Varun Saxena 
Committed: Tue Aug 16 03:05:16 2016 +0530

--
 .../hadoop-common/src/test/resources/testConf.xml| 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c2f2ae7/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml 
b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
index dbbe401..d497dc9 100644
--- a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
+++ b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
@@ -106,11 +106,11 @@
 
 
   RegexpComparator
-  ^\s*-q\s+Print \? instead of non-printable 
characters\.( )*
+  ^\s*rather than a number of bytes\.( 
)*
 
 
   RegexpComparator
-  ^\s*rather than a number of bytes\.( 
)*
+  ^\s*-q\s+Print \? instead of non-printable 
characters\.( )*
 
 
   RegexpComparator


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-13333. testConf.xml ls comparators in wrong order (Vrushali C via Varun Saxena)

2016-08-15 Thread varunsaxena
Repository: hadoop
Updated Branches:
  refs/heads/trunk bed69d18e -> d714030b5


HADOOP-1. testConf.xml ls comparators in wrong order (Vrushali C via Varun 
Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d714030b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d714030b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d714030b

Branch: refs/heads/trunk
Commit: d714030b5d4124f307c09d716d72a9f5a4a25995
Parents: bed69d1
Author: Varun Saxena 
Authored: Tue Aug 16 03:03:44 2016 +0530
Committer: Varun Saxena 
Committed: Tue Aug 16 03:03:44 2016 +0530

--
 .../hadoop-common/src/test/resources/testConf.xml| 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d714030b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml 
b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
index bbbc1ec..82bc789 100644
--- a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
+++ b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
@@ -106,11 +106,11 @@
 
 
   RegexpComparator
-  ^\s*-q\s+Print \? instead of non-printable 
characters\.( )*
+  ^\s*rather than a number of bytes\.( 
)*
 
 
   RegexpComparator
-  ^\s*rather than a number of bytes\.( 
)*
+  ^\s*-q\s+Print \? instead of non-printable 
characters\.( )*
 
 
   RegexpComparator


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-9696. Garbage snapshot records linger forever. Adding an entry to CHANGES.txt.

2016-08-15 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 e3b809a28 -> 7519956c3


HDFS-9696. Garbage snapshot records linger forever. Adding an entry to 
CHANGES.txt.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7519956c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7519956c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7519956c

Branch: refs/heads/branch-2.7
Commit: 7519956c30ee5072f1a8186410f842cb28521e2b
Parents: e3b809a
Author: Kihwal Lee 
Authored: Mon Aug 15 15:50:54 2016 -0500
Committer: Kihwal Lee 
Committed: Mon Aug 15 15:50:54 2016 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7519956c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 9efd8a4..0ff7bba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -253,6 +253,8 @@ Release 2.7.3 - UNRELEASED
 HDFS-10623. Remove unused import of httpclient.HttpConnection from
 TestWebHdfsTokens. (Hanish Koneru via Arpit Agarwal)
 
+HDFS-9696. Garbage snapshot records linger forever. (kihwal)
+
 Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] Git Push Summary

2016-08-15 Thread vinodkv
Repository: hadoop
Updated Tags:  refs/tags/release-2.7.3-RC1 2c543d156 -> 98cacdb50

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: Set the release date for 2.7.3-RC1

2016-08-15 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7.3 5474c9e73 -> 7a4746cbb


Set the release date for 2.7.3-RC1


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7a4746cb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7a4746cb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7a4746cb

Branch: refs/heads/branch-2.7.3
Commit: 7a4746cbbd4eb019219ab316aa757613b318d16f
Parents: 5474c9e
Author: Vinod Kumar Vavilapalli (I am also known as @tshooter.) 

Authored: Mon Aug 15 12:52:39 2016 -0700
Committer: Vinod Kumar Vavilapalli (I am also known as @tshooter.) 

Committed: Mon Aug 15 12:52:39 2016 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 2 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 +-
 hadoop-mapreduce-project/CHANGES.txt| 2 +-
 hadoop-yarn-project/CHANGES.txt | 2 +-
 4 files changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a4746cb/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 2272388..3777e32 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1,6 +1,6 @@
 Hadoop Change Log
 
-Release 2.7.3 - 2016-09-19
+Release 2.7.3 - 2016-08-19
 
   INCOMPATIBLE CHANGES
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a4746cb/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e467646..748489d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1,6 +1,6 @@
 Hadoop HDFS Change Log
 
-Release 2.7.3 - 2016-09-19
+Release 2.7.3 - 2016-08-19
 
   INCOMPATIBLE CHANGES
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a4746cb/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index fff3b1d..26b71a5 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -1,6 +1,6 @@
 Hadoop MapReduce Change Log
 
-Release 2.7.3 - 2016-09-19
+Release 2.7.3 - 2016-08-19
 
   INCOMPATIBLE CHANGES
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a4746cb/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index de33018..9f8b014 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1,6 +1,6 @@
 Hadoop YARN Change Log
 
-Release 2.7.3 - 2016-09-19
+Release 2.7.3 - 2016-08-19
 
   INCOMPATIBLE CHANGES
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-10580. DiskBalancer: Make use of unused methods in GreedyPlanner to print debug info. Contributed by Yiqun Lin

2016-08-15 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk 24249115b -> bed69d18e


HDFS-10580. DiskBalancer: Make use of unused methods in GreedyPlanner to print 
debug info. Contributed by Yiqun Lin


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bed69d18
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bed69d18
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bed69d18

Branch: refs/heads/trunk
Commit: bed69d18e6c84583cbe5fd765f068d9faa807617
Parents: 2424911
Author: Anu Engineer 
Authored: Mon Aug 15 12:40:29 2016 -0700
Committer: Anu Engineer 
Committed: Mon Aug 15 12:40:29 2016 -0700

--
 .../diskbalancer/planner/GreedyPlanner.java | 45 +++-
 1 file changed, 25 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bed69d18/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/GreedyPlanner.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/GreedyPlanner.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/GreedyPlanner.java
index 0df9843..fb83eeb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/GreedyPlanner.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/GreedyPlanner.java
@@ -158,6 +158,7 @@ public class GreedyPlanner implements Planner {
 
 // since the volume data changed , we need to recompute the DataDensity.
 currentSet.computeVolumeDataDensity();
+printQueue(currentSet.getSortedQueue());
   }
 
   /**
@@ -184,7 +185,7 @@ public class GreedyPlanner implements Planner {
 if (maxLowVolumeCanReceive <= 0) {
   LOG.debug("{} Skipping disk from computation. Maximum data size " +
   "achieved.", lowVolume.getPath());
-  lowVolume.setSkip(true);
+  skipVolume(currentSet, lowVolume);
 }
 
 long maxHighVolumeCanGive = highVolume.getUsed() -
@@ -195,7 +196,7 @@ public class GreedyPlanner implements Planner {
 if (maxHighVolumeCanGive <= 0) {
   LOG.debug(" {} Skipping disk from computation. Minimum data size " +
   "achieved.", highVolume.getPath());
-  highVolume.setSkip(true);
+  skipVolume(currentSet, highVolume);
 }
 
 
@@ -219,16 +220,19 @@ public class GreedyPlanner implements Planner {
*/
   private void skipVolume(DiskBalancerVolumeSet currentSet,
   DiskBalancerVolume volume) {
-
-String message = String.format(
-"Skipping volume. Volume : %s " +
-"Type : %s Target " +
-"Number of bytes : %f lowVolume dfsUsed : %d. Skipping this " +
-"volume from all future balancing calls.", volume.getPath(),
-volume.getStorageType(),
-currentSet.getIdealUsed() * volume.getCapacity(), volume.getUsed());
+if (LOG.isDebugEnabled()) {
+  String message =
+  String.format(
+  "Skipping volume. Volume : %s " +
+  "Type : %s Target " +
+  "Number of bytes : %f lowVolume dfsUsed : %d. Skipping this " +
+  "volume from all future balancing calls.", volume.getPath(),
+  volume.getStorageType(),
+  currentSet.getIdealUsed() * volume.getCapacity(),
+  volume.getUsed());
+  LOG.debug(message);
+}
 volume.setSkip(true);
-LOG.debug(message);
   }
 
   // Removes all volumes which are part of the volumeSet but skip flag is set.
@@ -242,6 +246,7 @@ public class GreedyPlanner implements Planner {
   }
 }
 currentSet.computeVolumeDataDensity();
+printQueue(currentSet.getSortedQueue());
   }
 
   /**
@@ -251,14 +256,14 @@ public class GreedyPlanner implements Planner {
* @param queue - Queue
*/
   private void printQueue(TreeSet queue) {
-String format = String.format("First Volume : %s, DataDensity : %f",
-queue.first().getPath(),
-queue.first().getVolumeDataDensity());
-LOG.info(format);
-
-format = String
-.format("Last Volume : %s, DataDensity : %f%n", queue.last().getPath(),
-queue.last().getVolumeDataDensity());
-LOG.info(format);
+if (LOG.isDebugEnabled()) {
+  String format =
+  String.format(
+  "First Volume : %s, DataDensity : %f, " +
+  "Last Volume : %s, DataDensity : %f",
+  queue.first().getPath(), queue.first().getVolumeDataDensity(),
+  queue.last().getPath(), queue.last().getVolumeDataDensity());
+  LOG.debug(format);
+}
   }
 }

hadoop git commit: YARN-4566. Fix test failure in TestMiniYarnClusterNodeUtilization. (Takashi Ohnishi via rohithsharmaks) (cherry picked from commit e0b14f26f5201a149218276469434df511697acc)

2016-08-15 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 1a38dd9ce -> 74831973c


YARN-4566. Fix test failure in TestMiniYarnClusterNodeUtilization. (Takashi 
Ohnishi via rohithsharmaks)
(cherry picked from commit e0b14f26f5201a149218276469434df511697acc)

Conflicts:

hadoop-yarn-project/CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/74831973
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/74831973
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/74831973

Branch: refs/heads/branch-2.8
Commit: 74831973cb161bcc61eadc9574478e54260a21b6
Parents: 1a38dd9
Author: Jason Lowe 
Authored: Mon Aug 15 19:45:45 2016 +
Committer: Jason Lowe 
Committed: Mon Aug 15 19:45:45 2016 +

--
 .../hadoop/yarn/server/TestMiniYarnClusterNodeUtilization.java | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/74831973/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestMiniYarnClusterNodeUtilization.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestMiniYarnClusterNodeUtilization.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestMiniYarnClusterNodeUtilization.java
index cf83e67..49a82c8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestMiniYarnClusterNodeUtilization.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestMiniYarnClusterNodeUtilization.java
@@ -214,8 +214,10 @@ public class TestMiniYarnClusterNodeUtilization {
 // We check if the nodeUtilization is up to date
 for (int i=0; i<100; i++) {
   for (RMNode ni : rmContext.getRMNodes().values()) {
-if (ni.getNodeUtilization().equals(nodeUtilization)) {
-  break;
+if (ni.getNodeUtilization() != null) {
+if (ni.getNodeUtilization().equals(nodeUtilization)) {
+  break;
+}
 }
   }
   Thread.sleep(100);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-4916. TestNMProxy.tesNMProxyRPCRetry fails. Contributed by Tibor Kiss. (cherry picked from commit 00058167431475c6e63c80207424f1d365569e3a)

2016-08-15 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 579709fc2 -> 1a38dd9ce


YARN-4916. TestNMProxy.tesNMProxyRPCRetry fails. Contributed by Tibor Kiss.
(cherry picked from commit 00058167431475c6e63c80207424f1d365569e3a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1a38dd9c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1a38dd9c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1a38dd9c

Branch: refs/heads/branch-2.8
Commit: 1a38dd9ceefd972869c4550a502cb169544cd423
Parents: 579709f
Author: Junping Du 
Authored: Tue Apr 5 09:01:08 2016 -0700
Committer: Jason Lowe 
Committed: Mon Aug 15 19:18:36 2016 +

--
 .../yarn/server/nodemanager/containermanager/TestNMProxy.java | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a38dd9c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java
index 7ce15c5..46b32de 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java
@@ -188,8 +188,7 @@ public class TestNMProxy extends BaseContainerManagerTest {
   Assert.fail("should get socket exception");
 } catch (IOException e) {
   // socket exception should be thrown immediately, without RPC retries.
-  Assert.assertTrue(e.toString().
-  contains("Failed on local exception: java.net.SocketException"));
+  Assert.assertTrue(e instanceof java.net.SocketException);
 }
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-5521. Fix random failure of TestCapacityScheduler#testKillAllAppsInQueue (sandflee via Varun Saxena)

2016-08-15 Thread varunsaxena
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 4766a3153 -> 354afbb9d


YARN-5521. Fix random failure of TestCapacityScheduler#testKillAllAppsInQueue 
(sandflee via Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/354afbb9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/354afbb9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/354afbb9

Branch: refs/heads/branch-2
Commit: 354afbb9df898406ca8aa7086abb1831dab096d2
Parents: 4766a31
Author: Varun Saxena 
Authored: Tue Aug 16 00:04:40 2016 +0530
Committer: Varun Saxena 
Committed: Tue Aug 16 00:04:40 2016 +0530

--
 .../resourcemanager/scheduler/capacity/TestCapacityScheduler.java   | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/354afbb9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
index 0b52b86..09c16d0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
@@ -2184,6 +2184,7 @@ public class TestCapacityScheduler {
 
 // check postconditions
 rm.waitForState(app.getApplicationId(), RMAppState.KILLED);
+rm.waitForAppRemovedFromScheduler(app.getApplicationId());
 appsInRoot = scheduler.getAppsInQueue("root");
 assertTrue(appsInRoot.isEmpty());
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-5521. Fix random failure of TestCapacityScheduler#testKillAllAppsInQueue (sandflee via Varun Saxena)

2016-08-15 Thread varunsaxena
Repository: hadoop
Updated Branches:
  refs/heads/trunk 83e57e083 -> 24249115b


YARN-5521. Fix random failure of TestCapacityScheduler#testKillAllAppsInQueue 
(sandflee via Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/24249115
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/24249115
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/24249115

Branch: refs/heads/trunk
Commit: 24249115bff3162c4202387da5bdd8eba13e6961
Parents: 83e57e0
Author: Varun Saxena 
Authored: Tue Aug 16 00:03:16 2016 +0530
Committer: Varun Saxena 
Committed: Tue Aug 16 00:03:29 2016 +0530

--
 .../resourcemanager/scheduler/capacity/TestCapacityScheduler.java   | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/24249115/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
index 0b52b86..09c16d0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
@@ -2184,6 +2184,7 @@ public class TestCapacityScheduler {
 
 // check postconditions
 rm.waitForState(app.getApplicationId(), RMAppState.KILLED);
+rm.waitForAppRemovedFromScheduler(app.getApplicationId());
 appsInRoot = scheduler.getAppsInQueue("root");
 assertTrue(appsInRoot.isEmpty());
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-9696. Garbage snapshot records linger forever. Contributed by Kihwal Lee

2016-08-15 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 13414be1a -> e3b809a28


HDFS-9696. Garbage snapshot records linger forever. Contributed by Kihwal Lee

(cherry picked from commit 83e57e083f2cf6c0de8a46966c5492faeabd8f2a)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java

(cherry picked from commit 4766a3153dd517ac832d4761c884ed88a83a6c09)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e3b809a2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e3b809a2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e3b809a2

Branch: refs/heads/branch-2.7
Commit: e3b809a284ba689034d9aa82ecd51e79f810911f
Parents: 13414be
Author: Kihwal Lee 
Authored: Mon Aug 15 13:17:02 2016 -0500
Committer: Kihwal Lee 
Committed: Mon Aug 15 13:17:02 2016 -0500

--
 .../server/namenode/FSImageFormatProtobuf.java  |  6 ++-
 .../hdfs/server/namenode/TestSaveNamespace.java | 41 
 2 files changed, 46 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e3b809a2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
index 0340401..adade58 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
@@ -443,7 +443,11 @@ public final class FSImageFormatProtobuf {
   this, summary, context, context.getSourceNamesystem());
 
   snapshotSaver.serializeSnapshotSection(sectionOutputStream);
-  snapshotSaver.serializeSnapshotDiffSection(sectionOutputStream);
+  // Skip snapshot-related sections when there is no snapshot.
+  if (context.getSourceNamesystem().getSnapshotManager()
+  .getNumSnapshots() > 0) {
+snapshotSaver.serializeSnapshotDiffSection(sectionOutputStream);
+  }
   snapshotSaver.serializeINodeReferenceSection(sectionOutputStream);
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e3b809a2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
index 5151bf5..3d79c27 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
@@ -625,6 +625,47 @@ public class TestSaveNamespace {
 }
   }
 
+  @Test
+  public void testSkipSnapshotSection() throws Exception {
+MiniDFSCluster cluster = new MiniDFSCluster.Builder(new Configuration())
+.numDataNodes(1).build();
+cluster.waitActive();
+DistributedFileSystem fs = cluster.getFileSystem();
+OutputStream out = null;
+try {
+  String path = "/skipSnapshot";
+  out = fs.create(new Path(path));
+  out.close();
+
+  // add a bogus filediff
+  FSDirectory dir = cluster.getNamesystem().getFSDirectory();
+  INodeFile file = dir.getINode(path).asFile();
+  file.addSnapshotFeature(null).getDiffs()
+  .saveSelf2Snapshot(-1, file, null, false);
+
+  // make sure it has a diff
+  assertTrue("Snapshot fileDiff is missing.",
+  file.getFileWithSnapshotFeature().getDiffs() != null);
+
+  // saveNamespace
+  fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+  cluster.getNameNodeRpc().saveNamespace();
+  fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+
+  // restart namenode
+  cluster.restartNameNode(true);
+  dir = cluster.getNamesystem().getFSDirectory();
+  file = dir.getINode(path).asFile();
+
+  // there should be no snapshot feature for the inode, when there is
+  // no snapshot.
+  assertTrue("There should be no snapshot feature for this INode.",
+  file.getFileWithSnapshotFeature() == null);
+} finally {
+  cluster.shutdown();
+}
+  }
+
   private void doAnEdit(FSNamesystem fsn, int id) throws IOException {
 // Make an edit
 

hadoop git commit: HDFS-9696. Garbage snapshot records linger forever. Contributed by Kihwal Lee

2016-08-15 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 ca139a3f8 -> 579709fc2


HDFS-9696. Garbage snapshot records linger forever. Contributed by Kihwal Lee

(cherry picked from commit 83e57e083f2cf6c0de8a46966c5492faeabd8f2a)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java

(cherry picked from commit 4766a3153dd517ac832d4761c884ed88a83a6c09)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/579709fc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/579709fc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/579709fc

Branch: refs/heads/branch-2.8
Commit: 579709fc222d3b1c7d80c3adc037695d615c62d7
Parents: ca139a3
Author: Kihwal Lee 
Authored: Mon Aug 15 13:13:49 2016 -0500
Committer: Kihwal Lee 
Committed: Mon Aug 15 13:13:49 2016 -0500

--
 .../server/namenode/FSImageFormatProtobuf.java  |  6 ++-
 .../hdfs/server/namenode/TestSaveNamespace.java | 41 
 2 files changed, 46 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/579709fc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
index 5552e29..90fe30f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
@@ -455,7 +455,11 @@ public final class FSImageFormatProtobuf {
   this, summary, context, context.getSourceNamesystem());
 
   snapshotSaver.serializeSnapshotSection(sectionOutputStream);
-  snapshotSaver.serializeSnapshotDiffSection(sectionOutputStream);
+  // Skip snapshot-related sections when there is no snapshot.
+  if (context.getSourceNamesystem().getSnapshotManager()
+  .getNumSnapshots() > 0) {
+snapshotSaver.serializeSnapshotDiffSection(sectionOutputStream);
+  }
   snapshotSaver.serializeINodeReferenceSection(sectionOutputStream);
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/579709fc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
index 9a90f0f..1fc1b2d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
@@ -625,6 +625,47 @@ public class TestSaveNamespace {
 }
   }
 
+  @Test
+  public void testSkipSnapshotSection() throws Exception {
+MiniDFSCluster cluster = new MiniDFSCluster.Builder(new Configuration())
+.numDataNodes(1).build();
+cluster.waitActive();
+DistributedFileSystem fs = cluster.getFileSystem();
+OutputStream out = null;
+try {
+  String path = "/skipSnapshot";
+  out = fs.create(new Path(path));
+  out.close();
+
+  // add a bogus filediff
+  FSDirectory dir = cluster.getNamesystem().getFSDirectory();
+  INodeFile file = dir.getINode(path).asFile();
+  file.addSnapshotFeature(null).getDiffs()
+  .saveSelf2Snapshot(-1, file, null, false);
+
+  // make sure it has a diff
+  assertTrue("Snapshot fileDiff is missing.",
+  file.getFileWithSnapshotFeature().getDiffs() != null);
+
+  // saveNamespace
+  fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+  cluster.getNameNodeRpc().saveNamespace();
+  fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+
+  // restart namenode
+  cluster.restartNameNode(true);
+  dir = cluster.getNamesystem().getFSDirectory();
+  file = dir.getINode(path).asFile();
+
+  // there should be no snapshot feature for the inode, when there is
+  // no snapshot.
+  assertTrue("There should be no snapshot feature for this INode.",
+  file.getFileWithSnapshotFeature() == null);
+} finally {
+  cluster.shutdown();
+}
+  }
+
   private void doAnEdit(FSNamesystem fsn, int id) throws IOException {
 // Make an edit
 

hadoop git commit: HDFS-9696. Garbage snapshot records linger forever. Contributed by Kihwal Lee

2016-08-15 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 3c7cf5cd7 -> 4766a3153


HDFS-9696. Garbage snapshot records linger forever. Contributed by Kihwal Lee

(cherry picked from commit 83e57e083f2cf6c0de8a46966c5492faeabd8f2a)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4766a315
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4766a315
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4766a315

Branch: refs/heads/branch-2
Commit: 4766a3153dd517ac832d4761c884ed88a83a6c09
Parents: 3c7cf5c
Author: Kihwal Lee 
Authored: Mon Aug 15 13:09:50 2016 -0500
Committer: Kihwal Lee 
Committed: Mon Aug 15 13:09:50 2016 -0500

--
 .../server/namenode/FSImageFormatProtobuf.java  |  6 ++-
 .../hdfs/server/namenode/TestSaveNamespace.java | 41 
 2 files changed, 46 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4766a315/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
index 2880823..82bbd63 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
@@ -455,7 +455,11 @@ public final class FSImageFormatProtobuf {
   this, summary, context, context.getSourceNamesystem());
 
   snapshotSaver.serializeSnapshotSection(sectionOutputStream);
-  snapshotSaver.serializeSnapshotDiffSection(sectionOutputStream);
+  // Skip snapshot-related sections when there is no snapshot.
+  if (context.getSourceNamesystem().getSnapshotManager()
+  .getNumSnapshots() > 0) {
+snapshotSaver.serializeSnapshotDiffSection(sectionOutputStream);
+  }
   snapshotSaver.serializeINodeReferenceSection(sectionOutputStream);
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4766a315/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
index 8e138e3..5050998 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
@@ -646,6 +646,47 @@ public class TestSaveNamespace {
 }
   }
 
+  @Test
+  public void testSkipSnapshotSection() throws Exception {
+MiniDFSCluster cluster = new MiniDFSCluster.Builder(new Configuration())
+.numDataNodes(1).build();
+cluster.waitActive();
+DistributedFileSystem fs = cluster.getFileSystem();
+OutputStream out = null;
+try {
+  String path = "/skipSnapshot";
+  out = fs.create(new Path(path));
+  out.close();
+
+  // add a bogus filediff
+  FSDirectory dir = cluster.getNamesystem().getFSDirectory();
+  INodeFile file = dir.getINode(path).asFile();
+  file.addSnapshotFeature(null).getDiffs()
+  .saveSelf2Snapshot(-1, file, null, false);
+
+  // make sure it has a diff
+  assertTrue("Snapshot fileDiff is missing.",
+  file.getFileWithSnapshotFeature().getDiffs() != null);
+
+  // saveNamespace
+  fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+  cluster.getNameNodeRpc().saveNamespace();
+  fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+
+  // restart namenode
+  cluster.restartNameNode(true);
+  dir = cluster.getNamesystem().getFSDirectory();
+  file = dir.getINode(path).asFile();
+
+  // there should be no snapshot feature for the inode, when there is
+  // no snapshot.
+  assertTrue("There should be no snapshot feature for this INode.",
+  file.getFileWithSnapshotFeature() == null);
+} finally {
+  cluster.shutdown();
+}
+  }
+
   private void doAnEdit(FSNamesystem fsn, int id) throws IOException {
 // Make an edit
 fsn.mkdirs(



hadoop git commit: HDFS-9696. Garbage snapshot records linger forever. Contributed by Kihwal Lee

2016-08-15 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 9f29f423e -> 83e57e083


HDFS-9696. Garbage snapshot records linger forever. Contributed by Kihwal Lee


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/83e57e08
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/83e57e08
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/83e57e08

Branch: refs/heads/trunk
Commit: 83e57e083f2cf6c0de8a46966c5492faeabd8f2a
Parents: 9f29f42
Author: Kihwal Lee 
Authored: Mon Aug 15 13:01:23 2016 -0500
Committer: Kihwal Lee 
Committed: Mon Aug 15 13:01:23 2016 -0500

--
 .../server/namenode/FSImageFormatProtobuf.java  |  6 ++-
 .../hdfs/server/namenode/TestSaveNamespace.java | 42 
 2 files changed, 47 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/83e57e08/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
index 05087d1..7a81f9e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
@@ -459,7 +459,11 @@ public final class FSImageFormatProtobuf {
   this, summary, context, context.getSourceNamesystem());
 
   snapshotSaver.serializeSnapshotSection(sectionOutputStream);
-  snapshotSaver.serializeSnapshotDiffSection(sectionOutputStream);
+  // Skip snapshot-related sections when there is no snapshot.
+  if (context.getSourceNamesystem().getSnapshotManager()
+  .getNumSnapshots() > 0) {
+snapshotSaver.serializeSnapshotDiffSection(sectionOutputStream);
+  }
   snapshotSaver.serializeINodeReferenceSection(sectionOutputStream);
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/83e57e08/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
index a374585..0c86ef4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
@@ -647,6 +647,48 @@ public class TestSaveNamespace {
 }
   }
 
+
+  @Test
+  public void testSkipSnapshotSection() throws Exception {
+MiniDFSCluster cluster = new MiniDFSCluster.Builder(new Configuration())
+.numDataNodes(1).build();
+cluster.waitActive();
+DistributedFileSystem fs = cluster.getFileSystem();
+OutputStream out = null;
+try {
+  String path = "/skipSnapshot";
+  out = fs.create(new Path(path));
+  out.close();
+
+  // add a bogus filediff
+  FSDirectory dir = cluster.getNamesystem().getFSDirectory();
+  INodeFile file = dir.getINode(path).asFile();
+  file.addSnapshotFeature(null).getDiffs()
+  .saveSelf2Snapshot(-1, file, null, false);
+
+  // make sure it has a diff
+  assertTrue("Snapshot fileDiff is missing.",
+  file.getFileWithSnapshotFeature().getDiffs() != null);
+
+  // saveNamespace
+  fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+  cluster.getNameNodeRpc().saveNamespace(0, 0);
+  fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+
+  // restart namenode
+  cluster.restartNameNode(true);
+  dir = cluster.getNamesystem().getFSDirectory();
+  file = dir.getINode(path).asFile();
+
+  // there should be no snapshot feature for the inode, when there is
+  // no snapshot.
+  assertTrue("There should be no snapshot feature for this INode.",
+  file.getFileWithSnapshotFeature() == null);
+} finally {
+  cluster.shutdown();
+}
+  }
+
   @Test
   public void testSaveNamespaceBeforeShutdown() throws Exception {
 Configuration conf = new HdfsConfiguration();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-10737. disk balancer add volume path to report command. Contributed by Yuanbo Liu.

2016-08-15 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk d677b68c2 -> 9f29f423e


HDFS-10737. disk balancer add volume path to report command. Contributed by 
Yuanbo Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9f29f423
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9f29f423
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9f29f423

Branch: refs/heads/trunk
Commit: 9f29f423e426e2d42e650cbed88e46c1c29a2a63
Parents: d677b68
Author: Anu Engineer 
Authored: Mon Aug 15 09:47:30 2016 -0700
Committer: Anu Engineer 
Committed: Mon Aug 15 09:47:30 2016 -0700

--
 .../server/diskbalancer/command/Command.java| 35 ++
 .../diskbalancer/command/PlanCommand.java   | 34 --
 .../diskbalancer/command/ReportCommand.java |  4 +-
 .../command/TestDiskBalancerCommand.java| 48 
 4 files changed, 86 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f29f423/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
index de77365..3110c1a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
@@ -33,13 +33,17 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
+import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerConstants;
 import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector;
 import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ConnectorFactory;
 import 
org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster;
 import 
org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
+import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume;
+import 
org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet;
 import org.apache.hadoop.hdfs.tools.DiskBalancer;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.codehaus.jackson.map.ObjectMapper;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -421,6 +425,37 @@ public abstract class Command extends Configured {
   }
 
   /**
+   * Reads the Physical path of the disks we are balancing. This is needed to
+   * make the disk balancer human friendly and not used in balancing.
+   *
+   * @param node - Disk Balancer Node.
+   */
+  protected void populatePathNames(
+  DiskBalancerDataNode node) throws IOException {
+// if the cluster is a local file system, there is no need to
+// invoke rpc call to dataNode.
+if (getClusterURI().getScheme().startsWith("file")) {
+  return;
+}
+String dnAddress = node.getDataNodeIP() + ":" + node.getDataNodePort();
+ClientDatanodeProtocol dnClient = getDataNodeProxy(dnAddress);
+String volumeNameJson = dnClient.getDiskBalancerSetting(
+DiskBalancerConstants.DISKBALANCER_VOLUME_NAME);
+ObjectMapper mapper = new ObjectMapper();
+
+@SuppressWarnings("unchecked")
+Map volumeMap =
+mapper.readValue(volumeNameJson, HashMap.class);
+for (DiskBalancerVolumeSet set : node.getVolumeSets().values()) {
+  for (DiskBalancerVolume vol : set.getVolumes()) {
+if (volumeMap.containsKey(vol.getUuid())) {
+  vol.setPath(volumeMap.get(vol.getUuid()));
+}
+  }
+}
+  }
+
+  /**
* Set top number of nodes to be processed.
* */
   public void setTopNodes(int topNodes) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f29f423/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
index 3159312..72ad2c6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
+++ 

hadoop git commit: HDFS-10746: libhdfs++: synchronize access to working_directory and bytes_read_. Contributed by Anatoli Shein.

2016-08-15 Thread jhc
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-8707 ea932e709 -> c64f61285


HDFS-10746: libhdfs++: synchronize access to working_directory and bytes_read_. 
 Contributed by Anatoli Shein.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c64f6128
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c64f6128
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c64f6128

Branch: refs/heads/HDFS-8707
Commit: c64f6128561b380af7d338c80a9db80084461cd1
Parents: ea932e7
Author: James 
Authored: Mon Aug 15 12:21:25 2016 -0400
Committer: James 
Committed: Mon Aug 15 12:21:25 2016 -0400

--
 .../main/native/libhdfspp/lib/bindings/c/hdfs.cc   | 17 -
 .../src/main/native/libhdfspp/lib/fs/filehandle.cc |  4 ++--
 .../src/main/native/libhdfspp/lib/fs/filehandle.h  |  2 +-
 3 files changed, 15 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c64f6128/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc
index 7fda4e2..4003358 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc
@@ -43,18 +43,25 @@ static constexpr tPort kDefaultPort = 8020;
 
 /* Separate the handles used by the C api from the C++ API*/
 struct hdfs_internal {
-  hdfs_internal(FileSystem *p) : filesystem_(p), working_directory("/") {}
+  hdfs_internal(FileSystem *p) : filesystem_(p), working_directory_("/") {}
   hdfs_internal(std::unique_ptr p)
-  : filesystem_(std::move(p)), working_directory("/") {}
+  : filesystem_(std::move(p)), working_directory_("/") {}
   virtual ~hdfs_internal(){};
   FileSystem *get_impl() { return filesystem_.get(); }
   const FileSystem *get_impl() const { return filesystem_.get(); }
-  std::string get_working_directory() { return working_directory; }
-  void set_working_directory(std::string new_directory) { working_directory = 
new_directory; }
+  std::string get_working_directory() {
+std::lock_guard read_guard(wd_lock_);
+return working_directory_;
+  }
+  void set_working_directory(std::string new_directory) {
+std::lock_guard write_guard(wd_lock_);
+working_directory_ = new_directory;
+  }
 
  private:
   std::unique_ptr filesystem_;
-  std::string working_directory;  //has to always start and end with '/'
+  std::string working_directory_;  //has to always start and end with '/'
+  std::mutex wd_lock_; //synchronize access to the working 
directory
 };
 
 struct hdfsFile_internal {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c64f6128/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filehandle.cc
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filehandle.cc
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filehandle.cc
index 2600944..8f1a82c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filehandle.cc
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filehandle.cc
@@ -353,8 +353,8 @@ bool FileHandle::ShouldExclude(const Status ) {
   }
 }
 
-uint64_t FileHandleImpl::get_bytes_read() { return bytes_read_; }
+uint64_t FileHandleImpl::get_bytes_read() { return bytes_read_.load(); }
 
-void FileHandleImpl::clear_bytes_read() { bytes_read_ = 0; }
+void FileHandleImpl::clear_bytes_read() { bytes_read_.store(0); }
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c64f6128/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filehandle.h
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filehandle.h
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filehandle.h
index 03c55ff..7e7c79d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filehandle.h
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/fs/filehandle.h
@@ -139,7 +139,7 @@ private:
   CancelHandle cancel_state_;
   ReaderGroup readers_;
   std::shared_ptr event_handlers_;
-  uint64_t bytes_read_;
+  std::atomic bytes_read_;
 };
 
 }