hadoop git commit: HDFS-13719. Docs around dfs.image.transfer.timeout are misleading. Contributed by Kitti Nansi.

2018-07-09 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 512d20e9c -> 913417bbe


HDFS-13719. Docs around dfs.image.transfer.timeout are misleading. Contributed 
by Kitti Nansi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/913417bb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/913417bb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/913417bb

Branch: refs/heads/branch-2
Commit: 913417bbeab558154fbcc833af54664e7eeba97e
Parents: 512d20e
Author: Andrew Wang 
Authored: Mon Jul 9 15:17:26 2018 +0200
Committer: Andrew Wang 
Committed: Mon Jul 9 15:17:26 2018 +0200

--
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml| 13 +
 1 file changed, 5 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/913417bb/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 07c1a54..c8f63fc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -1235,11 +1235,10 @@
   dfs.image.transfer.timeout
   6
   
-Socket timeout for image transfer in milliseconds. This timeout and 
the related
-dfs.image.transfer.bandwidthPerSec parameter should be configured such
-that normal image transfer can complete successfully.
-This timeout prevents client hangs when the sender fails during
-image transfer. This is socket timeout during image transfer.
+Socket timeout for the HttpURLConnection instance used in the image
+transfer. This is measured in milliseconds.
+This timeout prevents client hangs if the connection is idle
+for this configured timeout, during image transfer.
   
 
 
@@ -1250,9 +1249,7 @@
 Maximum bandwidth used for regular image transfers (instead of
 bootstrapping the standby namenode), in bytes per second.
 This can help keep normal namenode operations responsive during
-checkpointing. The maximum bandwidth and timeout in
-dfs.image.transfer.timeout should be set such that normal image
-transfers can complete successfully.
+checkpointing.
 A default value of 0 indicates that throttling is disabled.
 The maximum bandwidth used for bootstrapping standby namenode is
 configured with dfs.image.transfer-bootstrap-standby.bandwidthPerSec.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13719. Docs around dfs.image.transfer.timeout are misleading. Contributed by Kitti Nansi.

2018-07-09 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 e994c4f8a -> 242b5acdb


HDFS-13719. Docs around dfs.image.transfer.timeout are misleading. Contributed 
by Kitti Nansi.

(cherry picked from commit eecb5ba54599aeae758abd4007e55e5b531f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/242b5acd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/242b5acd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/242b5acd

Branch: refs/heads/branch-3.1
Commit: 242b5acdb358d886b6cc23707bc4e7b484ff514e
Parents: e994c4f
Author: Andrew Wang 
Authored: Mon Jul 9 15:17:21 2018 +0200
Committer: Andrew Wang 
Committed: Mon Jul 9 15:17:34 2018 +0200

--
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml| 13 +
 1 file changed, 5 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/242b5acd/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index d17020d..c092bff 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -1286,11 +1286,10 @@
   dfs.image.transfer.timeout
   6
   
-Socket timeout for image transfer in milliseconds. This timeout and 
the related
-dfs.image.transfer.bandwidthPerSec parameter should be configured such
-that normal image transfer can complete successfully.
-This timeout prevents client hangs when the sender fails during
-image transfer. This is socket timeout during image transfer.
+Socket timeout for the HttpURLConnection instance used in the image
+transfer. This is measured in milliseconds.
+This timeout prevents client hangs if the connection is idle
+for this configured timeout, during image transfer.
   
 
 
@@ -1301,9 +1300,7 @@
 Maximum bandwidth used for regular image transfers (instead of
 bootstrapping the standby namenode), in bytes per second.
 This can help keep normal namenode operations responsive during
-checkpointing. The maximum bandwidth and timeout in
-dfs.image.transfer.timeout should be set such that normal image
-transfers can complete successfully.
+checkpointing.
 A default value of 0 indicates that throttling is disabled.
 The maximum bandwidth used for bootstrapping standby namenode is
 configured with dfs.image.transfer-bootstrap-standby.bandwidthPerSec.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13719. Docs around dfs.image.transfer.timeout are misleading. Contributed by Kitti Nansi.

2018-07-09 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 0a6942d58 -> 0e57e81b0


HDFS-13719. Docs around dfs.image.transfer.timeout are misleading. Contributed 
by Kitti Nansi.

(cherry picked from commit eecb5ba54599aeae758abd4007e55e5b531f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0e57e81b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0e57e81b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0e57e81b

Branch: refs/heads/branch-3.0
Commit: 0e57e81b0c090d5c518d041175b88c3cf69f6e13
Parents: 0a6942d
Author: Andrew Wang 
Authored: Mon Jul 9 15:17:21 2018 +0200
Committer: Andrew Wang 
Committed: Mon Jul 9 15:17:48 2018 +0200

--
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml| 13 +
 1 file changed, 5 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e57e81b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 7b58331..41e3773 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -1286,11 +1286,10 @@
   dfs.image.transfer.timeout
   6
   
-Socket timeout for image transfer in milliseconds. This timeout and 
the related
-dfs.image.transfer.bandwidthPerSec parameter should be configured such
-that normal image transfer can complete successfully.
-This timeout prevents client hangs when the sender fails during
-image transfer. This is socket timeout during image transfer.
+Socket timeout for the HttpURLConnection instance used in the image
+transfer. This is measured in milliseconds.
+This timeout prevents client hangs if the connection is idle
+for this configured timeout, during image transfer.
   
 
 
@@ -1301,9 +1300,7 @@
 Maximum bandwidth used for regular image transfers (instead of
 bootstrapping the standby namenode), in bytes per second.
 This can help keep normal namenode operations responsive during
-checkpointing. The maximum bandwidth and timeout in
-dfs.image.transfer.timeout should be set such that normal image
-transfers can complete successfully.
+checkpointing.
 A default value of 0 indicates that throttling is disabled.
 The maximum bandwidth used for bootstrapping standby namenode is
 configured with dfs.image.transfer-bootstrap-standby.bandwidthPerSec.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13712. BlockReaderRemote.read() logging improvement. Contributed by Gergo Repas.

2018-07-03 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 98f2444a7 -> 896b51578


HDFS-13712. BlockReaderRemote.read() logging improvement. Contributed by Gergo 
Repas.

(cherry picked from commit 344f324710522ffb27852c1a673c4f7d3d6eac4b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/896b5157
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/896b5157
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/896b5157

Branch: refs/heads/branch-3.0
Commit: 896b51578f751817ac9fb3d8dc55d82aa5346cea
Parents: 98f2444
Author: Andrew Wang 
Authored: Tue Jul 3 11:07:45 2018 +0200
Committer: Andrew Wang 
Committed: Tue Jul 3 11:08:05 2018 +0200

--
 .../hadoop/hdfs/client/impl/BlockReaderRemote.java| 14 ++
 1 file changed, 10 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/896b5157/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderRemote.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderRemote.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderRemote.java
index caf15e4..ea1baed 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderRemote.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderRemote.java
@@ -129,16 +129,22 @@ public class BlockReaderRemote implements BlockReader {
   @Override
   public synchronized int read(byte[] buf, int off, int len)
   throws IOException {
-UUID randomId = (LOG.isTraceEnabled() ? UUID.randomUUID() : null);
-LOG.trace("Starting read #{} file {} from datanode {}",
-randomId, filename, datanodeID.getHostName());
+boolean logTraceEnabled = LOG.isTraceEnabled();
+UUID randomId = null;
+if (logTraceEnabled) {
+  randomId = UUID.randomUUID();
+  LOG.trace("Starting read #{} file {} from datanode {}",
+  randomId, filename, datanodeID.getHostName());
+}
 
 if (curDataSlice == null ||
 curDataSlice.remaining() == 0 && bytesNeededToFinish > 0) {
   readNextPacket();
 }
 
-LOG.trace("Finishing read #{}", randomId);
+if (logTraceEnabled) {
+  LOG.trace("Finishing read #{}", randomId);
+}
 
 if (curDataSlice.remaining() == 0) {
   // we're at EOF now


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13712. BlockReaderRemote.read() logging improvement. Contributed by Gergo Repas.

2018-07-03 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk d9ba6f365 -> 344f32471


HDFS-13712. BlockReaderRemote.read() logging improvement. Contributed by Gergo 
Repas.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/344f3247
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/344f3247
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/344f3247

Branch: refs/heads/trunk
Commit: 344f324710522ffb27852c1a673c4f7d3d6eac4b
Parents: d9ba6f3
Author: Andrew Wang 
Authored: Tue Jul 3 11:07:45 2018 +0200
Committer: Andrew Wang 
Committed: Tue Jul 3 11:07:45 2018 +0200

--
 .../hadoop/hdfs/client/impl/BlockReaderRemote.java| 14 ++
 1 file changed, 10 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/344f3247/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderRemote.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderRemote.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderRemote.java
index caf15e4..ea1baed 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderRemote.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderRemote.java
@@ -129,16 +129,22 @@ public class BlockReaderRemote implements BlockReader {
   @Override
   public synchronized int read(byte[] buf, int off, int len)
   throws IOException {
-UUID randomId = (LOG.isTraceEnabled() ? UUID.randomUUID() : null);
-LOG.trace("Starting read #{} file {} from datanode {}",
-randomId, filename, datanodeID.getHostName());
+boolean logTraceEnabled = LOG.isTraceEnabled();
+UUID randomId = null;
+if (logTraceEnabled) {
+  randomId = UUID.randomUUID();
+  LOG.trace("Starting read #{} file {} from datanode {}",
+  randomId, filename, datanodeID.getHostName());
+}
 
 if (curDataSlice == null ||
 curDataSlice.remaining() == 0 && bytesNeededToFinish > 0) {
   readNextPacket();
 }
 
-LOG.trace("Finishing read #{}", randomId);
+if (logTraceEnabled) {
+  LOG.trace("Finishing read #{}", randomId);
+}
 
 if (curDataSlice.remaining() == 0) {
   // we're at EOF now


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13712. BlockReaderRemote.read() logging improvement. Contributed by Gergo Repas.

2018-07-03 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 0d7b811ee -> 3b266abda


HDFS-13712. BlockReaderRemote.read() logging improvement. Contributed by Gergo 
Repas.

(cherry picked from commit 344f324710522ffb27852c1a673c4f7d3d6eac4b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3b266abd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3b266abd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3b266abd

Branch: refs/heads/branch-3.1
Commit: 3b266abdadd8a2734489c873ba9e2cb2563e29ac
Parents: 0d7b811
Author: Andrew Wang 
Authored: Tue Jul 3 11:07:45 2018 +0200
Committer: Andrew Wang 
Committed: Tue Jul 3 11:07:57 2018 +0200

--
 .../hadoop/hdfs/client/impl/BlockReaderRemote.java| 14 ++
 1 file changed, 10 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b266abd/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderRemote.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderRemote.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderRemote.java
index caf15e4..ea1baed 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderRemote.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderRemote.java
@@ -129,16 +129,22 @@ public class BlockReaderRemote implements BlockReader {
   @Override
   public synchronized int read(byte[] buf, int off, int len)
   throws IOException {
-UUID randomId = (LOG.isTraceEnabled() ? UUID.randomUUID() : null);
-LOG.trace("Starting read #{} file {} from datanode {}",
-randomId, filename, datanodeID.getHostName());
+boolean logTraceEnabled = LOG.isTraceEnabled();
+UUID randomId = null;
+if (logTraceEnabled) {
+  randomId = UUID.randomUUID();
+  LOG.trace("Starting read #{} file {} from datanode {}",
+  randomId, filename, datanodeID.getHostName());
+}
 
 if (curDataSlice == null ||
 curDataSlice.remaining() == 0 && bytesNeededToFinish > 0) {
   readNextPacket();
 }
 
-LOG.trace("Finishing read #{}", randomId);
+if (logTraceEnabled) {
+  LOG.trace("Finishing read #{}", randomId);
+}
 
 if (curDataSlice.remaining() == 0) {
   // we're at EOF now


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15554. Improve JIT performance for Configuration parsing. Contributed by Todd Lipcon.

2018-07-02 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 2d2639bea -> 9f2d57697


HADOOP-15554. Improve JIT performance for Configuration parsing. Contributed by 
Todd Lipcon.

(cherry picked from commit f51da9c4d1423c2ac92eb4f40e973264e7e968cc)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9f2d5769
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9f2d5769
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9f2d5769

Branch: refs/heads/branch-3.1
Commit: 9f2d57697add3d435814bc45dc7a2a1eb42619a6
Parents: 2d2639b
Author: Andrew Wang 
Authored: Mon Jul 2 18:31:21 2018 +0200
Committer: Andrew Wang 
Committed: Mon Jul 2 18:31:27 2018 +0200

--
 .../org/apache/hadoop/conf/Configuration.java   | 458 +++
 1 file changed, 276 insertions(+), 182 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f2d5769/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index bd6277f..bde2bab 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -41,6 +41,7 @@ import java.io.Writer;
 import java.lang.ref.WeakReference;
 import java.net.InetSocketAddress;
 import java.net.JarURLConnection;
+import java.net.MalformedURLException;
 import java.net.URL;
 import java.net.URLConnection;
 import java.util.ArrayList;
@@ -2981,187 +2982,11 @@ public class Configuration implements 
Iterable>,
   if(returnCachedProperties) {
 toAddTo = new Properties();
   }
-  DeprecationContext deprecations = deprecationContext.get();
 
-  StringBuilder token = new StringBuilder();
-  String confName = null;
-  String confValue = null;
-  String confInclude = null;
-  String confTag = null;
-  boolean confFinal = false;
-  boolean fallbackAllowed = false;
-  boolean fallbackEntered = false;
-  boolean parseToken = false;
-  LinkedList confSource = new LinkedList();
-
-  while (reader.hasNext()) {
-switch (reader.next()) {
-case XMLStreamConstants.START_ELEMENT:
-  switch (reader.getLocalName()) {
-  case "property":
-confName = null;
-confValue = null;
-confFinal = false;
-confTag = null;
-confSource.clear();
-
-// First test for short format configuration
-int attrCount = reader.getAttributeCount();
-for (int i = 0; i < attrCount; i++) {
-  String propertyAttr = reader.getAttributeLocalName(i);
-  if ("name".equals(propertyAttr)) {
-confName = StringInterner.weakIntern(
-reader.getAttributeValue(i));
-  } else if ("value".equals(propertyAttr)) {
-confValue = StringInterner.weakIntern(
-reader.getAttributeValue(i));
-  } else if ("final".equals(propertyAttr)) {
-confFinal = "true".equals(reader.getAttributeValue(i));
-  } else if ("source".equals(propertyAttr)) {
-confSource.add(StringInterner.weakIntern(
-reader.getAttributeValue(i)));
-  } else if ("tag".equals(propertyAttr)) {
-confTag = StringInterner
-.weakIntern(reader.getAttributeValue(i));
-  }
-}
-break;
-  case "name":
-  case "value":
-  case "final":
-  case "source":
-  case "tag":
-parseToken = true;
-token.setLength(0);
-break;
-  case "include":
-// Determine href for xi:include
-confInclude = null;
-attrCount = reader.getAttributeCount();
-for (int i = 0; i < attrCount; i++) {
-  String attrName = reader.getAttributeLocalName(i);
-  if ("href".equals(attrName)) {
-confInclude = reader.getAttributeValue(i);
-  }
-}
-if (confInclude == null) {
-  break;
-}
-if (isRestricted) {
-  throw new RuntimeException("Error parsing resource " + wrapper
-  + ": XInclude is not supported for 

hadoop git commit: HADOOP-15554. Improve JIT performance for Configuration parsing. Contributed by Todd Lipcon.

2018-07-02 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 a41dc6ef5 -> a4d0336ba


HADOOP-15554. Improve JIT performance for Configuration parsing. Contributed by 
Todd Lipcon.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a4d0336b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a4d0336b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a4d0336b

Branch: refs/heads/branch-3.0
Commit: a4d0336ba27a7e67b0feab9adb7051abcdbccea8
Parents: a41dc6e
Author: Andrew Wang 
Authored: Mon Jul 2 18:32:07 2018 +0200
Committer: Andrew Wang 
Committed: Mon Jul 2 18:32:07 2018 +0200

--
 .../org/apache/hadoop/conf/Configuration.java   | 423 +++
 1 file changed, 258 insertions(+), 165 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4d0336b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index db5ae39..0699f83 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -41,6 +41,7 @@ import java.io.Writer;
 import java.lang.ref.WeakReference;
 import java.net.InetSocketAddress;
 import java.net.JarURLConnection;
+import java.net.MalformedURLException;
 import java.net.URL;
 import java.net.URLConnection;
 import java.util.ArrayList;
@@ -51,7 +52,6 @@ import java.util.Enumeration;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
-import java.util.LinkedList;
 import java.util.List;
 import java.util.ListIterator;
 import java.util.Map;
@@ -2944,171 +2944,11 @@ public class Configuration implements 
Iterable>,
   if(returnCachedProperties) {
 toAddTo = new Properties();
   }
-  DeprecationContext deprecations = deprecationContext.get();
 
-  StringBuilder token = new StringBuilder();
-  String confName = null;
-  String confValue = null;
-  String confInclude = null;
-  boolean confFinal = false;
-  boolean fallbackAllowed = false;
-  boolean fallbackEntered = false;
-  boolean parseToken = false;
-  LinkedList confSource = new LinkedList();
-
-  while (reader.hasNext()) {
-switch (reader.next()) {
-case XMLStreamConstants.START_ELEMENT:
-  switch (reader.getLocalName()) {
-  case "property":
-confName = null;
-confValue = null;
-confFinal = false;
-confSource.clear();
-
-// First test for short format configuration
-int attrCount = reader.getAttributeCount();
-for (int i = 0; i < attrCount; i++) {
-  String propertyAttr = reader.getAttributeLocalName(i);
-  if ("name".equals(propertyAttr)) {
-confName = StringInterner.weakIntern(
-reader.getAttributeValue(i));
-  } else if ("value".equals(propertyAttr)) {
-confValue = StringInterner.weakIntern(
-reader.getAttributeValue(i));
-  } else if ("final".equals(propertyAttr)) {
-confFinal = "true".equals(reader.getAttributeValue(i));
-  } else if ("source".equals(propertyAttr)) {
-confSource.add(StringInterner.weakIntern(
-reader.getAttributeValue(i)));
-  }
-}
-break;
-  case "name":
-  case "value":
-  case "final":
-  case "source":
-parseToken = true;
-token.setLength(0);
-break;
-  case "include":
-// Determine href for xi:include
-confInclude = null;
-attrCount = reader.getAttributeCount();
-for (int i = 0; i < attrCount; i++) {
-  String attrName = reader.getAttributeLocalName(i);
-  if ("href".equals(attrName)) {
-confInclude = reader.getAttributeValue(i);
-  }
-}
-if (confInclude == null) {
-  break;
-}
-if (isRestricted) {
-  throw new RuntimeException("Error parsing resource " + wrapper
-  + ": XInclude is not supported for restricted resources");
-}
-// Determine if the included resource 

hadoop git commit: HADOOP-15554. Improve JIT performance for Configuration parsing. Contributed by Todd Lipcon.

2018-07-02 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 5d748bd05 -> f51da9c4d


HADOOP-15554. Improve JIT performance for Configuration parsing. Contributed by 
Todd Lipcon.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f51da9c4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f51da9c4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f51da9c4

Branch: refs/heads/trunk
Commit: f51da9c4d1423c2ac92eb4f40e973264e7e968cc
Parents: 5d748bd
Author: Andrew Wang 
Authored: Mon Jul 2 18:31:21 2018 +0200
Committer: Andrew Wang 
Committed: Mon Jul 2 18:31:21 2018 +0200

--
 .../org/apache/hadoop/conf/Configuration.java   | 458 +++
 1 file changed, 276 insertions(+), 182 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f51da9c4/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index b1125e5..a78e311 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -41,6 +41,7 @@ import java.io.Writer;
 import java.lang.ref.WeakReference;
 import java.net.InetSocketAddress;
 import java.net.JarURLConnection;
+import java.net.MalformedURLException;
 import java.net.URL;
 import java.net.URLConnection;
 import java.util.ArrayList;
@@ -2981,187 +2982,11 @@ public class Configuration implements 
Iterable>,
   if(returnCachedProperties) {
 toAddTo = new Properties();
   }
-  DeprecationContext deprecations = deprecationContext.get();
 
-  StringBuilder token = new StringBuilder();
-  String confName = null;
-  String confValue = null;
-  String confInclude = null;
-  String confTag = null;
-  boolean confFinal = false;
-  boolean fallbackAllowed = false;
-  boolean fallbackEntered = false;
-  boolean parseToken = false;
-  LinkedList confSource = new LinkedList();
-
-  while (reader.hasNext()) {
-switch (reader.next()) {
-case XMLStreamConstants.START_ELEMENT:
-  switch (reader.getLocalName()) {
-  case "property":
-confName = null;
-confValue = null;
-confFinal = false;
-confTag = null;
-confSource.clear();
-
-// First test for short format configuration
-int attrCount = reader.getAttributeCount();
-for (int i = 0; i < attrCount; i++) {
-  String propertyAttr = reader.getAttributeLocalName(i);
-  if ("name".equals(propertyAttr)) {
-confName = StringInterner.weakIntern(
-reader.getAttributeValue(i));
-  } else if ("value".equals(propertyAttr)) {
-confValue = StringInterner.weakIntern(
-reader.getAttributeValue(i));
-  } else if ("final".equals(propertyAttr)) {
-confFinal = "true".equals(reader.getAttributeValue(i));
-  } else if ("source".equals(propertyAttr)) {
-confSource.add(StringInterner.weakIntern(
-reader.getAttributeValue(i)));
-  } else if ("tag".equals(propertyAttr)) {
-confTag = StringInterner
-.weakIntern(reader.getAttributeValue(i));
-  }
-}
-break;
-  case "name":
-  case "value":
-  case "final":
-  case "source":
-  case "tag":
-parseToken = true;
-token.setLength(0);
-break;
-  case "include":
-// Determine href for xi:include
-confInclude = null;
-attrCount = reader.getAttributeCount();
-for (int i = 0; i < attrCount; i++) {
-  String attrName = reader.getAttributeLocalName(i);
-  if ("href".equals(attrName)) {
-confInclude = reader.getAttributeValue(i);
-  }
-}
-if (confInclude == null) {
-  break;
-}
-if (isRestricted) {
-  throw new RuntimeException("Error parsing resource " + wrapper
-  + ": XInclude is not supported for restricted resources");
-}
-// Determine if the included resour

hadoop git commit: HADOOP-15252. Checkstyle version is not compatible with IDEA's checkstyle plugin

2018-07-02 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 d34260f12 -> a41dc6ef5


HADOOP-15252. Checkstyle version is not compatible with IDEA's checkstyle plugin

Signed-off-by: Akira Ajisaka 
(cherry picked from commit 90d2bdcb752f5f32ef65765af6fd5596dd5d373b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a41dc6ef
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a41dc6ef
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a41dc6ef

Branch: refs/heads/branch-3.0
Commit: a41dc6ef5b2e9244a389200169c5c678f770debd
Parents: d34260f
Author: Andras Bokor 
Authored: Fri Mar 2 17:08:17 2018 -0800
Committer: Andrew Wang 
Committed: Mon Jul 2 18:28:46 2018 +0200

--
 hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml | 5 ++---
 pom.xml | 4 ++--
 2 files changed, 4 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a41dc6ef/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml
--
diff --git a/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml 
b/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml
index 2053561..f4e4f42 100644
--- a/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml
+++ b/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml
@@ -50,8 +50,6 @@
 
 
 
-
-
 
 
 
@@ -72,7 +70,8 @@
 
 
 
-
+
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a41dc6ef/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 479f914..122432c 100644
--- a/pom.xml
+++ b/pom.xml
@@ -107,8 +107,8 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xs
 3.3.0
 2.5.0
 1.0.0
-2.17
-7.5.1
+3.0.0
+8.8
 1.4.3
 
 bash


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13702. Remove HTrace hooks from DFSClient to reduce CPU usage. Contributed by Todd Lipcon.

2018-07-02 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 0268b954e -> 2d2639bea


HDFS-13702. Remove HTrace hooks from DFSClient to reduce CPU usage. Contributed 
by Todd Lipcon.

(cherry picked from commit 5d748bd056a32f2c6922514cd0c5b31d866a9919)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2d2639be
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2d2639be
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2d2639be

Branch: refs/heads/branch-3.1
Commit: 2d2639beaa9f9de558e87ed78b2116f5a2cc7890
Parents: 0268b95
Author: Andrew Wang 
Authored: Mon Jul 2 12:11:06 2018 +0200
Committer: Andrew Wang 
Committed: Mon Jul 2 12:11:11 2018 +0200

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  | 19 
 .../org/apache/hadoop/hdfs/DFSInputStream.java  | 44 ++---
 .../hdfs/client/impl/BlockReaderFactory.java| 16 +---
 .../hdfs/client/impl/BlockReaderLocal.java  | 93 
 .../client/impl/BlockReaderLocalLegacy.java | 44 -
 .../hdfs/client/impl/BlockReaderRemote.java | 19 +---
 .../erasurecode/StripedBlockReader.java |  2 +-
 .../hdfs/server/namenode/NamenodeFsck.java  |  1 -
 .../hdfs/client/impl/BlockReaderTestUtil.java   |  2 -
 .../hdfs/client/impl/TestBlockReaderLocal.java  |  2 -
 .../blockmanagement/TestBlockTokenWithDFS.java  |  2 -
 .../datanode/TestDataNodeVolumeFailure.java |  2 -
 12 files changed, 68 insertions(+), 178 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2639be/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 5f1b2bb..2f00693 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -3065,25 +3065,6 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   }
 
   /**
-   * Full detailed tracing for read requests: path, position in the file,
-   * and length.
-   *
-   * @param reqLen requested length
-   */
-  TraceScope newReaderTraceScope(String description, String path, long pos,
-  int reqLen) {
-TraceScope scope = newPathTraceScope(description, path);
-scope.addKVAnnotation("pos", Long.toString(pos));
-scope.addKVAnnotation("reqLen", Integer.toString(reqLen));
-return scope;
-  }
-
-  /** Add the returned length info to the scope. */
-  void addRetLenToReaderScope(TraceScope scope, int retLen) {
-scope.addKVAnnotation("retLen", Integer.toString(retLen));
-  }
-
-  /**
* Get the erasure coding policy information for the specified path
*
* @param src path to get the information for

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d2639be/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index aab1a10..75eb2ea 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -84,8 +84,6 @@ import org.apache.hadoop.util.IdentityHashStore;
 import org.apache.hadoop.util.StopWatch;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.htrace.core.SpanId;
-import org.apache.htrace.core.TraceScope;
-import org.apache.htrace.core.Tracer;
 
 import com.google.common.annotations.VisibleForTesting;
 
@@ -640,7 +638,6 @@ public class DFSInputStream extends FSInputStream
 setClientCacheContext(dfsClient.getClientContext()).
 setUserGroupInformation(dfsClient.ugi).
 setConfiguration(dfsClient.getConfiguration()).
-setTracer(dfsClient.getTracer()).
 build();
   }
 
@@ -820,31 +817,14 @@ public class DFSInputStream extends FSInputStream
 }
 ReaderStrategy byteArrayReader =
 new ByteArrayStrategy(buf, off, len, readStatistics, dfsClient);
-try (TraceScope scope =
- dfsClient.newReaderTraceScope("DFSInputStream#byteArrayRead",
- src, getPos(), len)) {
-  int retLen = readWithStrategy(byteArrayReader);
-  if (retLen < len) {
-dfsClient.addRetLenToRead

hadoop git commit: HDFS-13702. Remove HTrace hooks from DFSClient to reduce CPU usage. Contributed by Todd Lipcon.

2018-07-02 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 7cd769b31 -> d34260f12


HDFS-13702. Remove HTrace hooks from DFSClient to reduce CPU usage. Contributed 
by Todd Lipcon.

(cherry picked from commit 5d748bd056a32f2c6922514cd0c5b31d866a9919)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d34260f1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d34260f1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d34260f1

Branch: refs/heads/branch-3.0
Commit: d34260f1225876f590bbbe5c97ea23bf0f84d2ed
Parents: 7cd769b
Author: Andrew Wang 
Authored: Mon Jul 2 12:11:06 2018 +0200
Committer: Andrew Wang 
Committed: Mon Jul 2 12:11:14 2018 +0200

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  | 19 
 .../org/apache/hadoop/hdfs/DFSInputStream.java  | 44 ++---
 .../hdfs/client/impl/BlockReaderFactory.java| 16 +---
 .../hdfs/client/impl/BlockReaderLocal.java  | 93 
 .../client/impl/BlockReaderLocalLegacy.java | 44 -
 .../hdfs/client/impl/BlockReaderRemote.java | 19 +---
 .../erasurecode/StripedBlockReader.java |  2 +-
 .../hdfs/server/namenode/NamenodeFsck.java  |  1 -
 .../hdfs/client/impl/BlockReaderTestUtil.java   |  2 -
 .../hdfs/client/impl/TestBlockReaderLocal.java  |  2 -
 .../blockmanagement/TestBlockTokenWithDFS.java  |  2 -
 .../datanode/TestDataNodeVolumeFailure.java |  2 -
 12 files changed, 68 insertions(+), 178 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d34260f1/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index fa2a2bd..7bcf488 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -2954,25 +2954,6 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   }
 
   /**
-   * Full detailed tracing for read requests: path, position in the file,
-   * and length.
-   *
-   * @param reqLen requested length
-   */
-  TraceScope newReaderTraceScope(String description, String path, long pos,
-  int reqLen) {
-TraceScope scope = newPathTraceScope(description, path);
-scope.addKVAnnotation("pos", Long.toString(pos));
-scope.addKVAnnotation("reqLen", Integer.toString(reqLen));
-return scope;
-  }
-
-  /** Add the returned length info to the scope. */
-  void addRetLenToReaderScope(TraceScope scope, int retLen) {
-scope.addKVAnnotation("retLen", Integer.toString(retLen));
-  }
-
-  /**
* Get the erasure coding policy information for the specified path
*
* @param src path to get the information for

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d34260f1/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index f314266..ae24572 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -84,8 +84,6 @@ import org.apache.hadoop.util.IdentityHashStore;
 import org.apache.hadoop.util.StopWatch;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.htrace.core.SpanId;
-import org.apache.htrace.core.TraceScope;
-import org.apache.htrace.core.Tracer;
 
 import com.google.common.annotations.VisibleForTesting;
 
@@ -640,7 +638,6 @@ public class DFSInputStream extends FSInputStream
 setClientCacheContext(dfsClient.getClientContext()).
 setUserGroupInformation(dfsClient.ugi).
 setConfiguration(dfsClient.getConfiguration()).
-setTracer(dfsClient.getTracer()).
 build();
   }
 
@@ -820,31 +817,14 @@ public class DFSInputStream extends FSInputStream
 }
 ReaderStrategy byteArrayReader =
 new ByteArrayStrategy(buf, off, len, readStatistics, dfsClient);
-try (TraceScope scope =
- dfsClient.newReaderTraceScope("DFSInputStream#byteArrayRead",
- src, getPos(), len)) {
-  int retLen = readWithStrategy(byteArrayReader);
-  if (retLen < len) {
-dfsClient.addRetLenToRead

hadoop git commit: HDFS-13702. Remove HTrace hooks from DFSClient to reduce CPU usage. Contributed by Todd Lipcon.

2018-07-02 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6ba997410 -> 5d748bd05


HDFS-13702. Remove HTrace hooks from DFSClient to reduce CPU usage. Contributed 
by Todd Lipcon.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5d748bd0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5d748bd0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5d748bd0

Branch: refs/heads/trunk
Commit: 5d748bd056a32f2c6922514cd0c5b31d866a9919
Parents: 6ba9974
Author: Andrew Wang 
Authored: Mon Jul 2 12:11:06 2018 +0200
Committer: Andrew Wang 
Committed: Mon Jul 2 12:11:06 2018 +0200

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  | 19 
 .../org/apache/hadoop/hdfs/DFSInputStream.java  | 44 ++---
 .../hdfs/client/impl/BlockReaderFactory.java| 16 +---
 .../hdfs/client/impl/BlockReaderLocal.java  | 93 
 .../client/impl/BlockReaderLocalLegacy.java | 44 -
 .../hdfs/client/impl/BlockReaderRemote.java | 19 +---
 .../erasurecode/StripedBlockReader.java |  2 +-
 .../hdfs/server/namenode/NamenodeFsck.java  |  1 -
 .../hdfs/client/impl/BlockReaderTestUtil.java   |  2 -
 .../hdfs/client/impl/TestBlockReaderLocal.java  |  2 -
 .../blockmanagement/TestBlockTokenWithDFS.java  |  2 -
 .../datanode/TestDataNodeVolumeFailure.java |  2 -
 12 files changed, 68 insertions(+), 178 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d748bd0/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 96c4505..85d6512 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -3071,25 +3071,6 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   }
 
   /**
-   * Full detailed tracing for read requests: path, position in the file,
-   * and length.
-   *
-   * @param reqLen requested length
-   */
-  TraceScope newReaderTraceScope(String description, String path, long pos,
-  int reqLen) {
-TraceScope scope = newPathTraceScope(description, path);
-scope.addKVAnnotation("pos", Long.toString(pos));
-scope.addKVAnnotation("reqLen", Integer.toString(reqLen));
-return scope;
-  }
-
-  /** Add the returned length info to the scope. */
-  void addRetLenToReaderScope(TraceScope scope, int retLen) {
-scope.addKVAnnotation("retLen", Integer.toString(retLen));
-  }
-
-  /**
* Get the erasure coding policy information for the specified path
*
* @param src path to get the information for

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d748bd0/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 1bdc50a..e5640d2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -85,8 +85,6 @@ import org.apache.hadoop.util.IdentityHashStore;
 import org.apache.hadoop.util.StopWatch;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.htrace.core.SpanId;
-import org.apache.htrace.core.TraceScope;
-import org.apache.htrace.core.Tracer;
 
 import com.google.common.annotations.VisibleForTesting;
 
@@ -641,7 +639,6 @@ public class DFSInputStream extends FSInputStream
 setClientCacheContext(dfsClient.getClientContext()).
 setUserGroupInformation(dfsClient.ugi).
 setConfiguration(dfsClient.getConfiguration()).
-setTracer(dfsClient.getTracer()).
 build();
   }
 
@@ -821,31 +818,14 @@ public class DFSInputStream extends FSInputStream
 }
 ReaderStrategy byteArrayReader =
 new ByteArrayStrategy(buf, off, len, readStatistics, dfsClient);
-try (TraceScope scope =
- dfsClient.newReaderTraceScope("DFSInputStream#byteArrayRead",
- src, getPos(), len)) {
-  int retLen = readWithStrategy(byteArrayReader);
-  if (retLen < len) {
-dfsClient.addRetLenToReaderScope(scope, retLen);
-  }
-  return retLen;
-}
+return readWith

hadoop git commit: HDFS-13703. Avoid allocation of CorruptedBlocks hashmap when no corrupted blocks are hit. Contributed by Todd Lipcon.

2018-07-02 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 c5f2fb5ae -> 7cd769b31


HDFS-13703. Avoid allocation of CorruptedBlocks hashmap when no corrupted 
blocks are hit. Contributed by Todd Lipcon.

(cherry picked from commit 6ba99741086170b83c38d3e7e715d9e8046a1e00)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7cd769b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7cd769b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7cd769b3

Branch: refs/heads/branch-3.0
Commit: 7cd769b31c664caac0884fd3b45647e2aa6ec7e1
Parents: c5f2fb5
Author: Andrew Wang 
Authored: Mon Jul 2 12:02:19 2018 +0200
Committer: Andrew Wang 
Committed: Mon Jul 2 12:02:27 2018 +0200

--
 .../main/java/org/apache/hadoop/hdfs/DFSInputStream.java |  2 +-
 .../main/java/org/apache/hadoop/hdfs/DFSUtilClient.java  | 11 ++-
 .../org/apache/hadoop/hdfs/server/datanode/DataNode.java |  2 +-
 3 files changed, 8 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7cd769b3/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index b38e629..f314266 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -1419,7 +1419,7 @@ public class DFSInputStream extends FSInputStream
 
 Map> corruptedBlockMap =
 corruptedBlocks.getCorruptionMap();
-if (corruptedBlockMap.isEmpty()) {
+if (corruptedBlockMap == null) {
   return;
 }
 List reportList = new ArrayList<>(corruptedBlockMap.size());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7cd769b3/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
index 5fa2331..daa5a03 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
@@ -696,14 +696,14 @@ public class DFSUtilClient {
   public static class CorruptedBlocks {
 private Map> corruptionMap;
 
-public CorruptedBlocks() {
-  this.corruptionMap = new HashMap<>();
-}
-
 /**
  * Indicate a block replica on the specified datanode is corrupted
  */
 public void addCorruptedBlock(ExtendedBlock blk, DatanodeInfo node) {
+  if (corruptionMap == null) {
+corruptionMap = new HashMap<>();
+  }
+
   Set dnSet = corruptionMap.get(blk);
   if (dnSet == null) {
 dnSet = new HashSet<>();
@@ -715,7 +715,8 @@ public class DFSUtilClient {
 }
 
 /**
- * @return the map that contains all the corruption entries.
+ * @return the map that contains all the corruption entries, or null if
+ * there were no corrupted entries
  */
 public Map> getCorruptionMap() {
   return corruptionMap;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7cd769b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 04a8ef1..302e992 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -1278,7 +1278,7 @@ public class DataNode extends ReconfigurableBase
   DFSUtilClient.CorruptedBlocks corruptedBlocks) throws IOException {
 Map> corruptionMap =
 corruptedBlocks.getCorruptionMap();
-if (!corruptionMap.isEmpty()) {
+if (corruptionMap != null) {
   for (Map.Entry> entry :
   corruptionMap.entrySet()) {
 for (DatanodeInfo dnInfo : entry.getValue()) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.

hadoop git commit: HDFS-13703. Avoid allocation of CorruptedBlocks hashmap when no corrupted blocks are hit. Contributed by Todd Lipcon.

2018-07-02 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk d40121845 -> 6ba997410


HDFS-13703. Avoid allocation of CorruptedBlocks hashmap when no corrupted 
blocks are hit. Contributed by Todd Lipcon.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6ba99741
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6ba99741
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6ba99741

Branch: refs/heads/trunk
Commit: 6ba99741086170b83c38d3e7e715d9e8046a1e00
Parents: d401218
Author: Andrew Wang 
Authored: Mon Jul 2 12:02:19 2018 +0200
Committer: Andrew Wang 
Committed: Mon Jul 2 12:02:19 2018 +0200

--
 .../main/java/org/apache/hadoop/hdfs/DFSInputStream.java |  2 +-
 .../main/java/org/apache/hadoop/hdfs/DFSUtilClient.java  | 11 ++-
 .../org/apache/hadoop/hdfs/server/datanode/DataNode.java |  2 +-
 3 files changed, 8 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ba99741/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 573b860..1bdc50a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -1423,7 +1423,7 @@ public class DFSInputStream extends FSInputStream
 
 Map> corruptedBlockMap =
 corruptedBlocks.getCorruptionMap();
-if (corruptedBlockMap.isEmpty()) {
+if (corruptedBlockMap == null) {
   return;
 }
 List reportList = new ArrayList<>(corruptedBlockMap.size());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ba99741/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
index 6c0b106..313b973 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
@@ -751,14 +751,14 @@ public class DFSUtilClient {
   public static class CorruptedBlocks {
 private Map> corruptionMap;
 
-public CorruptedBlocks() {
-  this.corruptionMap = new HashMap<>();
-}
-
 /**
  * Indicate a block replica on the specified datanode is corrupted
  */
 public void addCorruptedBlock(ExtendedBlock blk, DatanodeInfo node) {
+  if (corruptionMap == null) {
+corruptionMap = new HashMap<>();
+  }
+
   Set dnSet = corruptionMap.get(blk);
   if (dnSet == null) {
 dnSet = new HashSet<>();
@@ -770,7 +770,8 @@ public class DFSUtilClient {
 }
 
 /**
- * @return the map that contains all the corruption entries.
+ * @return the map that contains all the corruption entries, or null if
+ * there were no corrupted entries
  */
 public Map> getCorruptionMap() {
   return corruptionMap;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ba99741/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 96b0f36..4baafb9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -1278,7 +1278,7 @@ public class DataNode extends ReconfigurableBase
   DFSUtilClient.CorruptedBlocks corruptedBlocks) throws IOException {
 Map> corruptionMap =
 corruptedBlocks.getCorruptionMap();
-if (!corruptionMap.isEmpty()) {
+if (corruptionMap != null) {
   for (Map.Entry> entry :
   corruptionMap.entrySet()) {
 for (DatanodeInfo dnInfo : entry.getValue()) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13703. Avoid allocation of CorruptedBlocks hashmap when no corrupted blocks are hit. Contributed by Todd Lipcon.

2018-07-02 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 e1cd5732a -> 0268b954e


HDFS-13703. Avoid allocation of CorruptedBlocks hashmap when no corrupted 
blocks are hit. Contributed by Todd Lipcon.

(cherry picked from commit 6ba99741086170b83c38d3e7e715d9e8046a1e00)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0268b954
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0268b954
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0268b954

Branch: refs/heads/branch-3.1
Commit: 0268b954e0f5c446850e4741590b0a4df1fde894
Parents: e1cd573
Author: Andrew Wang 
Authored: Mon Jul 2 12:02:19 2018 +0200
Committer: Andrew Wang 
Committed: Mon Jul 2 12:02:48 2018 +0200

--
 .../main/java/org/apache/hadoop/hdfs/DFSInputStream.java |  2 +-
 .../main/java/org/apache/hadoop/hdfs/DFSUtilClient.java  | 11 ++-
 .../org/apache/hadoop/hdfs/server/datanode/DataNode.java |  2 +-
 3 files changed, 8 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0268b954/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index a25425a..aab1a10 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -1419,7 +1419,7 @@ public class DFSInputStream extends FSInputStream
 
 Map> corruptedBlockMap =
 corruptedBlocks.getCorruptionMap();
-if (corruptedBlockMap.isEmpty()) {
+if (corruptedBlockMap == null) {
   return;
 }
 List reportList = new ArrayList<>(corruptedBlockMap.size());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0268b954/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
index 6c0b106..313b973 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
@@ -751,14 +751,14 @@ public class DFSUtilClient {
   public static class CorruptedBlocks {
 private Map> corruptionMap;
 
-public CorruptedBlocks() {
-  this.corruptionMap = new HashMap<>();
-}
-
 /**
  * Indicate a block replica on the specified datanode is corrupted
  */
 public void addCorruptedBlock(ExtendedBlock blk, DatanodeInfo node) {
+  if (corruptionMap == null) {
+corruptionMap = new HashMap<>();
+  }
+
   Set dnSet = corruptionMap.get(blk);
   if (dnSet == null) {
 dnSet = new HashSet<>();
@@ -770,7 +770,8 @@ public class DFSUtilClient {
 }
 
 /**
- * @return the map that contains all the corruption entries.
+ * @return the map that contains all the corruption entries, or null if
+ * there were no corrupted entries
  */
 public Map> getCorruptionMap() {
   return corruptionMap;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0268b954/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 96b0f36..4baafb9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -1278,7 +1278,7 @@ public class DataNode extends ReconfigurableBase
   DFSUtilClient.CorruptedBlocks corruptedBlocks) throws IOException {
 Map> corruptionMap =
 corruptedBlocks.getCorruptionMap();
-if (!corruptionMap.isEmpty()) {
+if (corruptionMap != null) {
   for (Map.Entry> entry :
   corruptionMap.entrySet()) {
 for (DatanodeInfo dnInfo : entry.getValue()) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.

hadoop git commit: HDFS-13611. Unsafe use of Text as a ConcurrentHashMap key in PBHelperClient.

2018-05-24 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b0b32988d -> 097347c85


HDFS-13611. Unsafe use of Text as a ConcurrentHashMap key in PBHelperClient.

(cherry picked from commit c9b63deb533274ca8ef4939f6cd13f728a067f7b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/097347c8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/097347c8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/097347c8

Branch: refs/heads/branch-2
Commit: 097347c857b5a74b228db2b0d076cde2c767672d
Parents: b0b3298
Author: Andrew Wang <w...@apache.org>
Authored: Thu May 24 09:56:23 2018 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Thu May 24 09:56:54 2018 -0700

--
 .../java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/097347c8/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
index db4a02e..7694387 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
@@ -211,7 +211,7 @@ public class PBHelperClient {
 ByteString value = fixedByteStringCache.get(key);
 if (value == null) {
   value = ByteString.copyFromUtf8(key.toString());
-  fixedByteStringCache.put(key, value);
+  fixedByteStringCache.put(new Text(key.copyBytes()), value);
 }
 return value;
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13611. Unsafe use of Text as a ConcurrentHashMap key in PBHelperClient.

2018-05-24 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 4711980d7 -> 731abae55


HDFS-13611. Unsafe use of Text as a ConcurrentHashMap key in PBHelperClient.

(cherry picked from commit c9b63deb533274ca8ef4939f6cd13f728a067f7b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/731abae5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/731abae5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/731abae5

Branch: refs/heads/branch-3.0
Commit: 731abae55716a3023f145dce3bed2ef81ccb81a1
Parents: 4711980
Author: Andrew Wang <w...@apache.org>
Authored: Thu May 24 09:56:23 2018 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Thu May 24 09:56:40 2018 -0700

--
 .../java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/731abae5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
index f973952..b7f7f9f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
@@ -240,7 +240,7 @@ public class PBHelperClient {
 ByteString value = fixedByteStringCache.get(key);
 if (value == null) {
   value = ByteString.copyFromUtf8(key.toString());
-  fixedByteStringCache.put(key, value);
+  fixedByteStringCache.put(new Text(key.copyBytes()), value);
 }
 return value;
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13611. Unsafe use of Text as a ConcurrentHashMap key in PBHelperClient.

2018-05-24 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 1388de18a -> c9b63deb5


HDFS-13611. Unsafe use of Text as a ConcurrentHashMap key in PBHelperClient.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c9b63deb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c9b63deb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c9b63deb

Branch: refs/heads/trunk
Commit: c9b63deb533274ca8ef4939f6cd13f728a067f7b
Parents: 1388de1
Author: Andrew Wang <w...@apache.org>
Authored: Thu May 24 09:56:23 2018 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Thu May 24 09:56:23 2018 -0700

--
 .../java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9b63deb/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
index 579ac43..490ccb4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
@@ -247,7 +247,7 @@ public class PBHelperClient {
 ByteString value = fixedByteStringCache.get(key);
 if (value == null) {
   value = ByteString.copyFromUtf8(key.toString());
-  fixedByteStringCache.put(key, value);
+  fixedByteStringCache.put(new Text(key.copyBytes()), value);
 }
 return value;
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13611. Unsafe use of Text as a ConcurrentHashMap key in PBHelperClient.

2018-05-24 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 de43b41d0 -> 31d597f0c


HDFS-13611. Unsafe use of Text as a ConcurrentHashMap key in PBHelperClient.

(cherry picked from commit c9b63deb533274ca8ef4939f6cd13f728a067f7b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/31d597f0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/31d597f0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/31d597f0

Branch: refs/heads/branch-3.1
Commit: 31d597f0c771b874071fc066058cbe9511ef517c
Parents: de43b41
Author: Andrew Wang <w...@apache.org>
Authored: Thu May 24 09:56:23 2018 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Thu May 24 09:56:37 2018 -0700

--
 .../java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/31d597f0/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
index 579ac43..490ccb4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
@@ -247,7 +247,7 @@ public class PBHelperClient {
 ByteString value = fixedByteStringCache.get(key);
 if (value == null) {
   value = ByteString.copyFromUtf8(key.toString());
-  fixedByteStringCache.put(key, value);
+  fixedByteStringCache.put(new Text(key.copyBytes()), value);
 }
 return value;
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13601. Optimize ByteString conversions in PBHelper.

2018-05-23 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 2bbf1b443 -> d47c0fc39


HDFS-13601. Optimize ByteString conversions in PBHelper.

(cherry picked from commit 1d2640b6132e8308c07476badd2d1482be68a298)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d47c0fc3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d47c0fc3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d47c0fc3

Branch: refs/heads/branch-3.1
Commit: d47c0fc39a48dace6c7a2332ee041885cc4bb89c
Parents: 2bbf1b4
Author: Andrew Wang <w...@apache.org>
Authored: Tue May 22 23:55:20 2018 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Tue May 22 23:55:35 2018 -0700

--
 .../dev-support/findbugsExcludeFile.xml |  5 ++
 .../apache/hadoop/hdfs/protocol/DatanodeID.java | 50 +--
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  | 67 +---
 .../TestDataXceiverBackwardsCompat.java | 10 +++
 4 files changed, 118 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d47c0fc3/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml 
b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
index 8e2bc94..fa9654b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
@@ -91,5 +91,10 @@
 
 
   
+  
+
+
+
+  
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d47c0fc3/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
index af720c7..718661e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hdfs.protocol;
 
+import com.google.protobuf.ByteString;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
@@ -44,7 +45,9 @@ public class DatanodeID implements Comparable {
   "null", "null", 0, 0, 0, 0);
 
   private String ipAddr; // IP address
+  private ByteString ipAddrBytes; // ipAddr ByteString to save on PB serde
   private String hostName;   // hostname claimed by datanode
+  private ByteString hostNameBytes; // hostName ByteString to save on PB serde
   private String peerHostName; // hostname from the actual connection
   private int xferPort;  // data streaming port
   private int infoPort;  // info server port
@@ -58,6 +61,8 @@ public class DatanodeID implements Comparable {
* For newly formatted Datanodes it is a UUID.
*/
   private final String datanodeUuid;
+  // datanodeUuid ByteString to save on PB serde
+  private final ByteString datanodeUuidBytes;
 
   public DatanodeID(DatanodeID from) {
 this(from.getDatanodeUuid(), from);
@@ -66,8 +71,11 @@ public class DatanodeID implements Comparable {
   @VisibleForTesting
   public DatanodeID(String datanodeUuid, DatanodeID from) {
 this(from.getIpAddr(),
+from.getIpAddrBytes(),
 from.getHostName(),
+from.getHostNameBytes(),
 datanodeUuid,
+getByteString(datanodeUuid),
 from.getXferPort(),
 from.getInfoPort(),
 from.getInfoSecurePort(),
@@ -89,22 +97,43 @@ public class DatanodeID implements Comparable {
*/
   public DatanodeID(String ipAddr, String hostName, String datanodeUuid,
   int xferPort, int infoPort, int infoSecurePort, int ipcPort) {
-setIpAndXferPort(ipAddr, xferPort);
+this(ipAddr, getByteString(ipAddr),
+hostName, getByteString(hostName),
+datanodeUuid, getByteString(datanodeUuid),
+xferPort, infoPort, infoSecurePort, ipcPort);
+  }
+
+  private DatanodeID(String ipAddr, ByteString ipAddrBytes,
+  String hostName, ByteString hostNameBytes,
+  String datanodeUuid, ByteString datanodeUuidBytes,
+  int xferPort, int infoPort, int infoSecurePort, int ipcPort) {
+setIpAndXferPort(ipAddr, ipAddrBytes, xferPort);
 this.hostName = hostName;
+this.hostNameBytes = hostNameBytes;
 this.datanodeUuid = checkDatanodeUuid(datanodeUuid);
+this.da

hadoop git commit: HDFS-13601. Optimize ByteString conversions in PBHelper.

2018-05-23 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 5dcd57cbe -> 0f0d29a8d


HDFS-13601. Optimize ByteString conversions in PBHelper.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0f0d29a8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0f0d29a8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0f0d29a8

Branch: refs/heads/branch-2
Commit: 0f0d29a8d10538f3fba180bc6f32332c577527b5
Parents: 5dcd57c
Author: Andrew Wang <w...@apache.org>
Authored: Tue May 22 23:58:11 2018 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Tue May 22 23:58:11 2018 -0700

--
 .../dev-support/findbugsExcludeFile.xml |  7 ++
 .../apache/hadoop/hdfs/protocol/DatanodeID.java | 50 +--
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  | 67 +---
 3 files changed, 110 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f0d29a8/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml 
b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
index 2b3295e..c36631d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
@@ -59,4 +59,11 @@
 
 
   
+
+  
+
+
+
+  
+
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f0d29a8/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
index e94c07d..235d5a7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hdfs.protocol;
 
+import com.google.protobuf.ByteString;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
@@ -42,7 +43,9 @@ public class DatanodeID implements Comparable {
   public static final DatanodeID[] EMPTY_ARRAY = {};
 
   private String ipAddr; // IP address
+  private ByteString ipAddrBytes; // ipAddr ByteString to save on PB serde
   private String hostName;   // hostname claimed by datanode
+  private ByteString hostNameBytes; // hostName ByteString to save on PB serde
   private String peerHostName; // hostname from the actual connection
   private int xferPort;  // data streaming port
   private int infoPort;  // info server port
@@ -56,6 +59,8 @@ public class DatanodeID implements Comparable {
* For newly formatted Datanodes it is a UUID.
*/
   private final String datanodeUuid;
+  // datanodeUuid ByteString to save on PB serde
+  private final ByteString datanodeUuidBytes;
 
   public DatanodeID(DatanodeID from) {
 this(from.getDatanodeUuid(), from);
@@ -64,8 +69,11 @@ public class DatanodeID implements Comparable {
   @VisibleForTesting
   public DatanodeID(String datanodeUuid, DatanodeID from) {
 this(from.getIpAddr(),
+from.getIpAddrBytes(),
 from.getHostName(),
+from.getHostNameBytes(),
 datanodeUuid,
+getByteString(datanodeUuid),
 from.getXferPort(),
 from.getInfoPort(),
 from.getInfoSecurePort(),
@@ -87,22 +95,43 @@ public class DatanodeID implements Comparable {
*/
   public DatanodeID(String ipAddr, String hostName, String datanodeUuid,
   int xferPort, int infoPort, int infoSecurePort, int ipcPort) {
-setIpAndXferPort(ipAddr, xferPort);
+this(ipAddr, getByteString(ipAddr),
+hostName, getByteString(hostName),
+datanodeUuid, getByteString(datanodeUuid),
+xferPort, infoPort, infoSecurePort, ipcPort);
+  }
+
+  private DatanodeID(String ipAddr, ByteString ipAddrBytes,
+  String hostName, ByteString hostNameBytes,
+  String datanodeUuid, ByteString datanodeUuidBytes,
+  int xferPort, int infoPort, int infoSecurePort, int ipcPort) {
+setIpAndXferPort(ipAddr, ipAddrBytes, xferPort);
 this.hostName = hostName;
+this.hostNameBytes = hostNameBytes;
 this.datanodeUuid = checkDatanodeUuid(datanodeUuid);
+this.datanodeUuidBytes = datanodeUuidBytes;
 this.infoPort = infoPort;
 this.infoSecurePort = infoSecurePort;
 this.ipcPort = ipcPort;
  

hadoop git commit: HDFS-13601. Optimize ByteString conversions in PBHelper.

2018-05-23 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 96313e03c -> 5b3a6e367


HDFS-13601. Optimize ByteString conversions in PBHelper.

(cherry picked from commit 1d2640b6132e8308c07476badd2d1482be68a298)
(cherry picked from commit d47c0fc39a48dace6c7a2332ee041885cc4bb89c)

 Conflicts:

hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5b3a6e36
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5b3a6e36
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5b3a6e36

Branch: refs/heads/branch-3.0
Commit: 5b3a6e36784ad49c39de41635a188f8b3f6c9b7d
Parents: 96313e0
Author: Andrew Wang <w...@apache.org>
Authored: Tue May 22 23:55:20 2018 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Tue May 22 23:57:05 2018 -0700

--
 .../dev-support/findbugsExcludeFile.xml |  7 ++
 .../apache/hadoop/hdfs/protocol/DatanodeID.java | 50 +--
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  | 67 +---
 .../TestDataXceiverBackwardsCompat.java | 10 +++
 4 files changed, 120 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b3a6e36/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml 
b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
index 9d6ab9a..fca7d09 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
@@ -60,4 +60,11 @@
 
 
   
+
+  
+
+
+
+  
+
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b3a6e36/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
index af720c7..718661e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hdfs.protocol;
 
+import com.google.protobuf.ByteString;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
@@ -44,7 +45,9 @@ public class DatanodeID implements Comparable {
   "null", "null", 0, 0, 0, 0);
 
   private String ipAddr; // IP address
+  private ByteString ipAddrBytes; // ipAddr ByteString to save on PB serde
   private String hostName;   // hostname claimed by datanode
+  private ByteString hostNameBytes; // hostName ByteString to save on PB serde
   private String peerHostName; // hostname from the actual connection
   private int xferPort;  // data streaming port
   private int infoPort;  // info server port
@@ -58,6 +61,8 @@ public class DatanodeID implements Comparable {
* For newly formatted Datanodes it is a UUID.
*/
   private final String datanodeUuid;
+  // datanodeUuid ByteString to save on PB serde
+  private final ByteString datanodeUuidBytes;
 
   public DatanodeID(DatanodeID from) {
 this(from.getDatanodeUuid(), from);
@@ -66,8 +71,11 @@ public class DatanodeID implements Comparable {
   @VisibleForTesting
   public DatanodeID(String datanodeUuid, DatanodeID from) {
 this(from.getIpAddr(),
+from.getIpAddrBytes(),
 from.getHostName(),
+from.getHostNameBytes(),
 datanodeUuid,
+getByteString(datanodeUuid),
 from.getXferPort(),
 from.getInfoPort(),
 from.getInfoSecurePort(),
@@ -89,22 +97,43 @@ public class DatanodeID implements Comparable {
*/
   public DatanodeID(String ipAddr, String hostName, String datanodeUuid,
   int xferPort, int infoPort, int infoSecurePort, int ipcPort) {
-setIpAndXferPort(ipAddr, xferPort);
+this(ipAddr, getByteString(ipAddr),
+hostName, getByteString(hostName),
+datanodeUuid, getByteString(datanodeUuid),
+xferPort, infoPort, infoSecurePort, ipcPort);
+  }
+
+  private DatanodeID(String ipAddr, ByteString ipAddrBytes,
+  String hostName, ByteString hostNameBytes,
+  String datanodeUuid, ByteString datanodeUuidBytes,
+  int xferPort, int infoPort, int infoSecurePort, int ipcPort) {
+setIpAndXferPort(ipAddr, ipAddrB

hadoop git commit: HDFS-13601. Optimize ByteString conversions in PBHelper.

2018-05-23 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 5a9140690 -> 1d2640b61


HDFS-13601. Optimize ByteString conversions in PBHelper.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1d2640b6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1d2640b6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1d2640b6

Branch: refs/heads/trunk
Commit: 1d2640b6132e8308c07476badd2d1482be68a298
Parents: 5a91406
Author: Andrew Wang <w...@apache.org>
Authored: Tue May 22 23:55:20 2018 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Tue May 22 23:55:20 2018 -0700

--
 .../dev-support/findbugsExcludeFile.xml |  5 ++
 .../apache/hadoop/hdfs/protocol/DatanodeID.java | 50 +--
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  | 67 +---
 .../TestDataXceiverBackwardsCompat.java | 10 +++
 4 files changed, 118 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d2640b6/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml 
b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
index 8e2bc94..fa9654b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
@@ -91,5 +91,10 @@
 
 
   
+  
+
+
+
+  
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d2640b6/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
index af720c7..718661e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hdfs.protocol;
 
+import com.google.protobuf.ByteString;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
@@ -44,7 +45,9 @@ public class DatanodeID implements Comparable {
   "null", "null", 0, 0, 0, 0);
 
   private String ipAddr; // IP address
+  private ByteString ipAddrBytes; // ipAddr ByteString to save on PB serde
   private String hostName;   // hostname claimed by datanode
+  private ByteString hostNameBytes; // hostName ByteString to save on PB serde
   private String peerHostName; // hostname from the actual connection
   private int xferPort;  // data streaming port
   private int infoPort;  // info server port
@@ -58,6 +61,8 @@ public class DatanodeID implements Comparable {
* For newly formatted Datanodes it is a UUID.
*/
   private final String datanodeUuid;
+  // datanodeUuid ByteString to save on PB serde
+  private final ByteString datanodeUuidBytes;
 
   public DatanodeID(DatanodeID from) {
 this(from.getDatanodeUuid(), from);
@@ -66,8 +71,11 @@ public class DatanodeID implements Comparable {
   @VisibleForTesting
   public DatanodeID(String datanodeUuid, DatanodeID from) {
 this(from.getIpAddr(),
+from.getIpAddrBytes(),
 from.getHostName(),
+from.getHostNameBytes(),
 datanodeUuid,
+getByteString(datanodeUuid),
 from.getXferPort(),
 from.getInfoPort(),
 from.getInfoSecurePort(),
@@ -89,22 +97,43 @@ public class DatanodeID implements Comparable {
*/
   public DatanodeID(String ipAddr, String hostName, String datanodeUuid,
   int xferPort, int infoPort, int infoSecurePort, int ipcPort) {
-setIpAndXferPort(ipAddr, xferPort);
+this(ipAddr, getByteString(ipAddr),
+hostName, getByteString(hostName),
+datanodeUuid, getByteString(datanodeUuid),
+xferPort, infoPort, infoSecurePort, ipcPort);
+  }
+
+  private DatanodeID(String ipAddr, ByteString ipAddrBytes,
+  String hostName, ByteString hostNameBytes,
+  String datanodeUuid, ByteString datanodeUuidBytes,
+  int xferPort, int infoPort, int infoSecurePort, int ipcPort) {
+setIpAndXferPort(ipAddr, ipAddrBytes, xferPort);
 this.hostName = hostName;
+this.hostNameBytes = hostNameBytes;
 this.datanodeUuid = checkDatanodeUuid(datanodeUuid);
+this.datanodeUuidBytes = datanodeUuidBytes;
 this.infoPort = infoPort;
 t

[hadoop] Git Push Summary

2017-12-18 Thread wang
Repository: hadoop
Updated Tags:  refs/tags/rel/release-3.0.0 [created] 426dd

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



svn commit: r1818175 - in /hadoop/common/site/main: author/src/documentation/content/xdocs/ publish/ publish/docs/ publish/docs/r3.0.0/ publish/docs/r3.0.0/api/ publish/docs/r3.0.0/api/org/ publish/do

2017-12-14 Thread wang
Author: wang
Date: Thu Dec 14 18:14:23 2017
New Revision: 1818175

URL: http://svn.apache.org/viewvc?rev=1818175=rev
Log:
Update site for 3.0.0 release


[This commit notification would consist of 4410 parts, 
which exceeds the limit of 50 ones, so it was shortened to the summary.]

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



svn commit: r1818176 - in /hadoop/common/site/main: author/src/documentation/content/xdocs/ publish/

2017-12-14 Thread wang
Author: wang
Date: Thu Dec 14 18:21:26 2017
New Revision: 1818176

URL: http://svn.apache.org/viewvc?rev=1818176=rev
Log:
Fix the 3.0.0 table on Hadoop releases page

Modified:
hadoop/common/site/main/author/src/documentation/content/xdocs/releases.xml
hadoop/common/site/main/publish/bylaws.pdf
hadoop/common/site/main/publish/committer_criteria.pdf
hadoop/common/site/main/publish/index.pdf
hadoop/common/site/main/publish/issue_tracking.pdf
hadoop/common/site/main/publish/linkmap.pdf
hadoop/common/site/main/publish/mailing_lists.pdf
hadoop/common/site/main/publish/privacy_policy.pdf
hadoop/common/site/main/publish/releases.html
hadoop/common/site/main/publish/releases.pdf
hadoop/common/site/main/publish/version_control.pdf
hadoop/common/site/main/publish/versioning.pdf
hadoop/common/site/main/publish/who.pdf

Modified: 
hadoop/common/site/main/author/src/documentation/content/xdocs/releases.xml
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/author/src/documentation/content/xdocs/releases.xml?rev=1818176=1818175=1818176=diff
==
--- hadoop/common/site/main/author/src/documentation/content/xdocs/releases.xml 
(original)
+++ hadoop/common/site/main/author/src/documentation/content/xdocs/releases.xml 
Thu Dec 14 18:21:26 2017
@@ -37,6 +37,13 @@
  https://dist.apache.org/repos/dist/release/hadoop/common/hadoop-3.0.0/hadoop-3.0.0-src.tar.gz.mds;>checksum
 file


+ 
+ 
+ http://www.apache.org/dyn/closer.cgi/hadoop/common/hadoop-3.0.0/hadoop-3.0.0.tar.gz;>binary
+ https://dist.apache.org/repos/dist/release/hadoop/common/hadoop-3.0.0/hadoop-3.0.0.tar.gz.asc;>signature
+ https://dist.apache.org/repos/dist/release/hadoop/common/hadoop-3.0.0/hadoop-3.0.0.tar.gz.mds;>checksum
 file
+   
+   
  2.9.0
  17 November, 2017
  http://www.apache.org/dyn/closer.cgi/hadoop/common/hadoop-2.9.0/hadoop-2.9.0-src.tar.gz;>source
@@ -51,13 +58,6 @@
  https://dist.apache.org/repos/dist/release/hadoop/common/hadoop-2.9.0/hadoop-2.9.0.tar.gz.mds;>checksum
 file


- 
- 
- http://www.apache.org/dyn/closer.cgi/hadoop/common/hadoop-3.0.0-beta1/hadoop-3.0.0-beta1.tar.gz;>binary
- https://dist.apache.org/repos/dist/release/hadoop/common/hadoop-3.0.0-beta1/hadoop-3.0.0-beta1.tar.gz.asc;>signature
- https://dist.apache.org/repos/dist/release/hadoop/common/hadoop-3.0.0-beta1/hadoop-3.0.0-beta1.tar.gz.mds;>checksum
 file
-   
-   
  2.8.2
  24 Oct, 2017
  http://www.apache.org/dyn/closer.cgi/hadoop/common/hadoop-2.8.2/hadoop-2.8.2-src.tar.gz;>source

Modified: hadoop/common/site/main/publish/bylaws.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/bylaws.pdf?rev=1818176=1818175=1818176=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/committer_criteria.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/committer_criteria.pdf?rev=1818176=1818175=1818176=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/index.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/index.pdf?rev=1818176=1818175=1818176=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/issue_tracking.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/issue_tracking.pdf?rev=1818176=1818175=1818176=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/linkmap.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/linkmap.pdf?rev=1818176=1818175=1818176=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/mailing_lists.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/mailing_lists.pdf?rev=1818176=1818175=1818176=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/privacy_policy.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/privacy_policy.pdf?rev=1818176=1818175=1818176=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/releases.html
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/releases.html?re

[hadoop] Git Push Summary

2017-12-13 Thread wang
Repository: hadoop
Updated Tags:  refs/tags/rel/release- [created] f474b7d22

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: Update CHANGES, RELEASENOTES, jdiff for 3.0.0 release.

2017-12-13 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 6d814ae06 -> 58225e403


Update CHANGES, RELEASENOTES, jdiff for 3.0.0 release.

(cherry picked from commit d447152d4925a5f84d28a8ebd561286b39134d75)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/58225e40
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/58225e40
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/58225e40

Branch: refs/heads/branch-3.0
Commit: 58225e4030d86effe6a04abf1d21f868f6bae1d5
Parents: 6d814ae
Author: Andrew Wang <w...@apache.org>
Authored: Wed Dec 13 13:05:32 2017 -0800
Committer: Andrew Wang <w...@apache.org>
Committed: Wed Dec 13 13:05:45 2017 -0800

--
 .../markdown/release/3.0.0/CHANGES.3.0.0.md | 360 +++
 .../release/3.0.0/RELEASENOTES.3.0.0.md | 139 +++
 .../jdiff/Apache_Hadoop_HDFS_3.0.0.xml  | 324 +
 hadoop-project-dist/pom.xml |   2 +-
 4 files changed, 824 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/58225e40/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0/CHANGES.3.0.0.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0/CHANGES.3.0.0.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0/CHANGES.3.0.0.md
new file mode 100644
index 000..a0c59da
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0/CHANGES.3.0.0.md
@@ -0,0 +1,360 @@
+
+
+# Apache Hadoop Changelog
+
+## Release 3.0.0 - 2017-12-08
+
+### INCOMPATIBLE CHANGES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [YARN-6623](https://issues.apache.org/jira/browse/YARN-6623) | Add support 
to turn off launching privileged containers in the container-executor |  
Blocker | nodemanager | Varun Vasudev | Varun Vasudev |
+| [HADOOP-14816](https://issues.apache.org/jira/browse/HADOOP-14816) | Update 
Dockerfile to use Xenial |  Major | build, test | Allen Wittenauer | Allen 
Wittenauer |
+| [HADOOP-14957](https://issues.apache.org/jira/browse/HADOOP-14957) | 
ReconfigurationTaskStatus is exposing guava Optional in its public api |  Major 
| common | Haibo Chen | Xiao Chen |
+| [MAPREDUCE-6983](https://issues.apache.org/jira/browse/MAPREDUCE-6983) | 
Moving logging APIs over to slf4j in hadoop-mapreduce-client-core |  Major | . 
| Jinjiang Ling | Jinjiang Ling |
+| [HDFS-12682](https://issues.apache.org/jira/browse/HDFS-12682) | ECAdmin 
-listPolicies will always show SystemErasureCodingPolicies state as DISABLED |  
Blocker | erasure-coding | Xiao Chen | Xiao Chen |
+
+
+### NEW FEATURES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [YARN-1492](https://issues.apache.org/jira/browse/YARN-1492) | truly shared 
cache for jars (jobjar/libjar) |  Major | . | Sangjin Lee | Chris Trezzo |
+| [HDFS-10467](https://issues.apache.org/jira/browse/HDFS-10467) | 
Router-based HDFS federation |  Major | fs | Íñigo Goiri | Íñigo Goiri |
+| [YARN-5734](https://issues.apache.org/jira/browse/YARN-5734) | OrgQueue for 
easy CapacityScheduler queue configuration management |  Major | . | Min Shen | 
Min Shen |
+| [MAPREDUCE-5951](https://issues.apache.org/jira/browse/MAPREDUCE-5951) | Add 
support for the YARN Shared Cache |  Major | . | Chris Trezzo | Chris Trezzo |
+| [YARN-6871](https://issues.apache.org/jira/browse/YARN-6871) | Add 
additional deSelects params in RMWebServices#getAppReport |  Major | 
resourcemanager, router | Giovanni Matteo Fumarola | Tanuj Nayak |
+| [HADOOP-14840](https://issues.apache.org/jira/browse/HADOOP-14840) | Tool to 
estimate resource requirements of an application pipeline based on prior 
executions |  Major | tools | Subru Krishnan | Rui Li |
+| [YARN-3813](https://issues.apache.org/jira/browse/YARN-3813) | Support 
Application timeout feature in YARN. |  Major | scheduler | nijel | Rohith 
Sharma K S |
+
+
+### IMPROVEMENTS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [YARN-7045](https://issues.apache.org/jira/browse/YARN-7045) | Remove 
FSLeafQueue#addAppSchedulable |  Major | fairscheduler | Yufei Gu | Sen Zhao |
+| [YARN-7240](https://issues.apache.org/jira/browse/YARN-7240) | Add more 
states and transitions to stabilize the NM Container state machine |  Major | . 
| Arun Suresh | kartheek muthyala |
+| [HADOOP-14909](https://issues.apache.org/jira/browse/HADOOP-14909) | Fix the 
word of "erasure encoding" in the top page |  Trivial | documentation | 
Takanobu Asanuma | Takanobu A

hadoop git commit: Update CHANGES, RELEASENOTES, jdiff for 3.0.0 release.

2017-12-13 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk cb87e4dc9 -> d447152d4


Update CHANGES, RELEASENOTES, jdiff for 3.0.0 release.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d447152d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d447152d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d447152d

Branch: refs/heads/trunk
Commit: d447152d4925a5f84d28a8ebd561286b39134d75
Parents: cb87e4d
Author: Andrew Wang <w...@apache.org>
Authored: Wed Dec 13 13:05:32 2017 -0800
Committer: Andrew Wang <w...@apache.org>
Committed: Wed Dec 13 13:05:32 2017 -0800

--
 .../markdown/release/3.0.0/CHANGES.3.0.0.md | 360 +++
 .../release/3.0.0/RELEASENOTES.3.0.0.md | 139 +++
 .../jdiff/Apache_Hadoop_HDFS_3.0.0.xml  | 324 +
 hadoop-project-dist/pom.xml |   2 +-
 4 files changed, 824 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d447152d/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0/CHANGES.3.0.0.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0/CHANGES.3.0.0.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0/CHANGES.3.0.0.md
new file mode 100644
index 000..a0c59da
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0/CHANGES.3.0.0.md
@@ -0,0 +1,360 @@
+
+
+# Apache Hadoop Changelog
+
+## Release 3.0.0 - 2017-12-08
+
+### INCOMPATIBLE CHANGES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [YARN-6623](https://issues.apache.org/jira/browse/YARN-6623) | Add support 
to turn off launching privileged containers in the container-executor |  
Blocker | nodemanager | Varun Vasudev | Varun Vasudev |
+| [HADOOP-14816](https://issues.apache.org/jira/browse/HADOOP-14816) | Update 
Dockerfile to use Xenial |  Major | build, test | Allen Wittenauer | Allen 
Wittenauer |
+| [HADOOP-14957](https://issues.apache.org/jira/browse/HADOOP-14957) | 
ReconfigurationTaskStatus is exposing guava Optional in its public api |  Major 
| common | Haibo Chen | Xiao Chen |
+| [MAPREDUCE-6983](https://issues.apache.org/jira/browse/MAPREDUCE-6983) | 
Moving logging APIs over to slf4j in hadoop-mapreduce-client-core |  Major | . 
| Jinjiang Ling | Jinjiang Ling |
+| [HDFS-12682](https://issues.apache.org/jira/browse/HDFS-12682) | ECAdmin 
-listPolicies will always show SystemErasureCodingPolicies state as DISABLED |  
Blocker | erasure-coding | Xiao Chen | Xiao Chen |
+
+
+### NEW FEATURES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [YARN-1492](https://issues.apache.org/jira/browse/YARN-1492) | truly shared 
cache for jars (jobjar/libjar) |  Major | . | Sangjin Lee | Chris Trezzo |
+| [HDFS-10467](https://issues.apache.org/jira/browse/HDFS-10467) | 
Router-based HDFS federation |  Major | fs | Íñigo Goiri | Íñigo Goiri |
+| [YARN-5734](https://issues.apache.org/jira/browse/YARN-5734) | OrgQueue for 
easy CapacityScheduler queue configuration management |  Major | . | Min Shen | 
Min Shen |
+| [MAPREDUCE-5951](https://issues.apache.org/jira/browse/MAPREDUCE-5951) | Add 
support for the YARN Shared Cache |  Major | . | Chris Trezzo | Chris Trezzo |
+| [YARN-6871](https://issues.apache.org/jira/browse/YARN-6871) | Add 
additional deSelects params in RMWebServices#getAppReport |  Major | 
resourcemanager, router | Giovanni Matteo Fumarola | Tanuj Nayak |
+| [HADOOP-14840](https://issues.apache.org/jira/browse/HADOOP-14840) | Tool to 
estimate resource requirements of an application pipeline based on prior 
executions |  Major | tools | Subru Krishnan | Rui Li |
+| [YARN-3813](https://issues.apache.org/jira/browse/YARN-3813) | Support 
Application timeout feature in YARN. |  Major | scheduler | nijel | Rohith 
Sharma K S |
+
+
+### IMPROVEMENTS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [YARN-7045](https://issues.apache.org/jira/browse/YARN-7045) | Remove 
FSLeafQueue#addAppSchedulable |  Major | fairscheduler | Yufei Gu | Sen Zhao |
+| [YARN-7240](https://issues.apache.org/jira/browse/YARN-7240) | Add more 
states and transitions to stabilize the NM Container state machine |  Major | . 
| Arun Suresh | kartheek muthyala |
+| [HADOOP-14909](https://issues.apache.org/jira/browse/HADOOP-14909) | Fix the 
word of "erasure encoding" in the top page |  Trivial | documentation | 
Takanobu Asanuma | Takanobu Asanuma |
+| [HADOOP-14095](https://issues.apache.org/jira/browse/HADOOP-14095) 

[hadoop] Git Push Summary

2017-12-11 Thread wang
Repository: hadoop
Updated Tags:  refs/tags/release-3.0.0-RC1 [created] 32e5a9a97

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7290. Method canContainerBePreempted can return true when it shouldn't. (Contributed by Steven Rand)

2017-12-08 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0.0 0510ceede -> c25427cec


YARN-7290. Method canContainerBePreempted can return true when it shouldn't. 
(Contributed by Steven Rand)

(cherry picked from commit 2bde3aedf139368fc71f053d8dd6580b498ff46d)
(cherry picked from commit f335d509d3a778f11265d3f45800dd6e75f7be59)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c25427ce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c25427ce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c25427ce

Branch: refs/heads/branch-3.0.0
Commit: c25427ceca461ee979d30edd7a4b0f50718e6533
Parents: 0510cee
Author: Yufei Gu <yu...@apache.org>
Authored: Fri Nov 24 23:32:46 2017 -0800
Committer: Andrew Wang <w...@apache.org>
Committed: Fri Dec 8 11:00:22 2017 -0800

--
 .../scheduler/fair/FSAppAttempt.java| 23 +--
 .../scheduler/fair/FSPreemptionThread.java  | 68 ++--
 .../fair/TestFairSchedulerPreemption.java   | 37 ---
 3 files changed, 93 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c25427ce/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index bbd4418..2aa45b8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -588,7 +588,8 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
 }
   }
 
-  boolean canContainerBePreempted(RMContainer container) {
+  boolean canContainerBePreempted(RMContainer container,
+  Resource alreadyConsideringForPreemption) {
 if (!isPreemptable()) {
   return false;
 }
@@ -610,6 +611,15 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
 
 // Check if the app's allocation will be over its fairshare even
 // after preempting this container
+Resource usageAfterPreemption = getUsageAfterPreemptingContainer(
+container.getAllocatedResource(),
+alreadyConsideringForPreemption);
+
+return !isUsageBelowShare(usageAfterPreemption, getFairShare());
+  }
+
+  private Resource getUsageAfterPreemptingContainer(Resource 
containerResources,
+  Resource alreadyConsideringForPreemption) {
 Resource usageAfterPreemption = Resources.clone(getResourceUsage());
 
 // Subtract resources of containers already queued for preemption
@@ -617,10 +627,13 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
   Resources.subtractFrom(usageAfterPreemption, resourcesToBePreempted);
 }
 
-// Subtract this container's allocation to compute usage after preemption
-Resources.subtractFrom(
-usageAfterPreemption, container.getAllocatedResource());
-return !isUsageBelowShare(usageAfterPreemption, getFairShare());
+// Subtract resources of this container and other containers of this app
+// that the FSPreemptionThread is already considering for preemption.
+Resources.subtractFrom(usageAfterPreemption, containerResources);
+Resources.subtractFrom(usageAfterPreemption,
+alreadyConsideringForPreemption);
+
+return usageAfterPreemption;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c25427ce/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java
index b3e59c5..47e580d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/had

hadoop git commit: YARN-7469. Capacity Scheduler Intra-queue preemption: User can starve if newest app is exactly at user limit. Contributed by Eric Payne.

2017-12-08 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0.0 b1712f9c1 -> 0510ceede


YARN-7469. Capacity Scheduler Intra-queue preemption: User can starve if newest 
app is exactly at user limit. Contributed by Eric Payne.

(cherry picked from commit 61ace174cdcbca9d22abce7aa0aa71148f37ad55)
(cherry picked from commit c3fb49667a4c11d993056e9e3c8ca4ec9479538f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0510ceed
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0510ceed
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0510ceed

Branch: refs/heads/branch-3.0.0
Commit: 0510ceede63cc6a7b35a866a6e33d0ff9da1e3af
Parents: b1712f9
Author: Sunil G <sun...@apache.org>
Authored: Thu Nov 16 22:34:23 2017 +0530
Committer: Andrew Wang <w...@apache.org>
Committed: Fri Dec 8 10:59:09 2017 -0800

--
 .../FifoIntraQueuePreemptionPlugin.java |  6 
 ...alCapacityPreemptionPolicyMockFramework.java |  3 ++
 ...cityPreemptionPolicyIntraQueueUserLimit.java | 35 
 3 files changed, 44 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0510ceed/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java
index 00ae3da..3332f2a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java
@@ -203,6 +203,12 @@ public class FifoIntraQueuePreemptionPlugin
   Resources.subtractFromNonNegative(preemtableFromApp, tmpApp.selected);
   Resources.subtractFromNonNegative(preemtableFromApp, tmpApp.getAMUsed());
 
+  if (context.getIntraQueuePreemptionOrderPolicy()
+.equals(IntraQueuePreemptionOrderPolicy.USERLIMIT_FIRST)) {
+Resources.subtractFromNonNegative(preemtableFromApp,
+  
tmpApp.getFiCaSchedulerApp().getCSLeafQueue().getMinimumAllocation());
+  }
+
   // Calculate toBePreempted from apps as follows:
   // app.preemptable = min(max(app.used - app.selected - app.ideal, 0),
   // intra_q_preemptable)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0510ceed/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java
index 4fc0ea4..0bc5cb5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java
@@ -358,6 +358,9 @@ public class 
ProportionalCapacityPreemptionPolicyMockFramework {
   queue = (LeafQueue) nameToCSQueues.get(queueName);
   queue.getApplications().add(app);
   queue.getAllApplications().add(app);
+  when(queue.getMinimumAllocation())
+  .thenReturn(Resource.newInstance(1,1));
+  when(app.getCSLeafQueue()).thenReturn(queue);
 
   HashSet users = userMap.get(queueName);
   if (null == users) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0510ceed/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/had

[hadoop] Git Push Summary

2017-11-14 Thread wang
Repository: hadoop
Updated Tags:  refs/tags/release-3.0.0-RC0 [created] 92583ab14

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: Update maven versions in preparation for 3.0.0 release.

2017-11-14 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0.0 [created] 3853c3346


Update maven versions in preparation for 3.0.0 release.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3853c334
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3853c334
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3853c334

Branch: refs/heads/branch-3.0.0
Commit: 3853c33466f7aad56f6981194d1810a42d10e02e
Parents: 3f96ecf
Author: Andrew Wang <w...@apache.org>
Authored: Tue Nov 14 11:12:22 2017 -0800
Committer: Andrew Wang <w...@apache.org>
Committed: Tue Nov 14 11:12:22 2017 -0800

--
 hadoop-assemblies/pom.xml| 4 ++--
 hadoop-build-tools/pom.xml   | 2 +-
 hadoop-client-modules/hadoop-client-api/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-check-invariants/pom.xml | 4 ++--
 .../hadoop-client-check-test-invariants/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-integration-tests/pom.xml| 4 ++--
 hadoop-client-modules/hadoop-client-minicluster/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-runtime/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client/pom.xml  | 4 ++--
 hadoop-client-modules/pom.xml| 2 +-
 hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml| 4 ++--
 hadoop-cloud-storage-project/pom.xml | 4 ++--
 hadoop-common-project/hadoop-annotations/pom.xml | 4 ++--
 hadoop-common-project/hadoop-auth-examples/pom.xml   | 4 ++--
 hadoop-common-project/hadoop-auth/pom.xml| 4 ++--
 hadoop-common-project/hadoop-common/pom.xml  | 4 ++--
 hadoop-common-project/hadoop-kms/pom.xml | 4 ++--
 hadoop-common-project/hadoop-minikdc/pom.xml | 4 ++--
 hadoop-common-project/hadoop-nfs/pom.xml | 4 ++--
 hadoop-common-project/pom.xml| 4 ++--
 hadoop-dist/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml| 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client-common/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml | 4 ++--
 .../hadoop-mapreduce-client-hs-plugins/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-jobclient/pom.xml| 4 ++--
 .../hadoop-mapreduce-client-nativetask/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-shuffle/pom.xml  | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml   | 4 ++--
 hadoop-mapreduce-project/pom.xml | 4 ++--
 hadoop-maven-plugins/pom.xml | 2 +-
 hadoop-minicluster/pom.xml   | 4 ++--
 hadoop-project-dist/pom.xml  | 4 ++--
 hadoop-project/pom.xml   | 4 ++--
 hadoop-tools/hadoop-aliyun/pom.xml   | 2 +-
 hadoop-tools/hadoop-archive-logs/pom.xml | 4 ++--
 hadoop-tools/hadoop-archives/pom.xml | 4 ++--
 hadoop-tools/hadoop-aws/pom.xml  | 4 ++--
 hadoop-tools/hadoop-azure-datalake/pom.xml   | 2 +-
 hadoop-tools/hadoop-azure/pom.xml| 2 +-
 hadoop-tools/hadoop-datajoin/pom.xml | 4 ++--
 hadoop-tools/hadoop-distcp/pom.xml   | 4 ++--
 hadoop-tools/hadoop-extras/pom.xml   | 4 ++--
 hadoop-tools/hadoop-gridmix/pom.xml  | 4 ++--
 hadoop-tools/hadoop-kafka/pom.xml| 4 ++--
 hadoop-tools/hadoop-openstack/pom.xml| 4 ++--
 hadoop-tools/hadoop-pipes/pom.xml| 4 ++--
 hadoop-tools/hadoop-resourceestimator/pom.xml| 2 +-
 hadoop-tools/hadoop-rumen/pom.xml   

hadoop git commit: Preparing for 3.0.1 development

2017-11-14 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 3f96ecf5c -> 1c8d33428


Preparing for 3.0.1 development


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1c8d3342
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1c8d3342
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1c8d3342

Branch: refs/heads/branch-3.0
Commit: 1c8d334287abc10fe710bc2354f6b29de07421c4
Parents: 3f96ecf
Author: Andrew Wang <w...@apache.org>
Authored: Tue Nov 14 11:17:18 2017 -0800
Committer: Andrew Wang <w...@apache.org>
Committed: Tue Nov 14 11:17:18 2017 -0800

--
 hadoop-assemblies/pom.xml| 4 ++--
 hadoop-build-tools/pom.xml   | 2 +-
 hadoop-client-modules/hadoop-client-api/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-check-invariants/pom.xml | 4 ++--
 .../hadoop-client-check-test-invariants/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-integration-tests/pom.xml| 4 ++--
 hadoop-client-modules/hadoop-client-minicluster/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-runtime/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client/pom.xml  | 4 ++--
 hadoop-client-modules/pom.xml| 2 +-
 hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml| 4 ++--
 hadoop-cloud-storage-project/pom.xml | 4 ++--
 hadoop-common-project/hadoop-annotations/pom.xml | 4 ++--
 hadoop-common-project/hadoop-auth-examples/pom.xml   | 4 ++--
 hadoop-common-project/hadoop-auth/pom.xml| 4 ++--
 hadoop-common-project/hadoop-common/pom.xml  | 4 ++--
 hadoop-common-project/hadoop-kms/pom.xml | 4 ++--
 hadoop-common-project/hadoop-minikdc/pom.xml | 4 ++--
 hadoop-common-project/hadoop-nfs/pom.xml | 4 ++--
 hadoop-common-project/pom.xml| 4 ++--
 hadoop-dist/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml| 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client-common/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml | 4 ++--
 .../hadoop-mapreduce-client-hs-plugins/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-jobclient/pom.xml| 4 ++--
 .../hadoop-mapreduce-client-nativetask/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-shuffle/pom.xml  | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml   | 4 ++--
 hadoop-mapreduce-project/pom.xml | 4 ++--
 hadoop-maven-plugins/pom.xml | 2 +-
 hadoop-minicluster/pom.xml   | 4 ++--
 hadoop-project-dist/pom.xml  | 4 ++--
 hadoop-project/pom.xml   | 4 ++--
 hadoop-tools/hadoop-aliyun/pom.xml   | 2 +-
 hadoop-tools/hadoop-archive-logs/pom.xml | 4 ++--
 hadoop-tools/hadoop-archives/pom.xml | 4 ++--
 hadoop-tools/hadoop-aws/pom.xml  | 4 ++--
 hadoop-tools/hadoop-azure-datalake/pom.xml   | 2 +-
 hadoop-tools/hadoop-azure/pom.xml| 2 +-
 hadoop-tools/hadoop-datajoin/pom.xml | 4 ++--
 hadoop-tools/hadoop-distcp/pom.xml   | 4 ++--
 hadoop-tools/hadoop-extras/pom.xml   | 4 ++--
 hadoop-tools/hadoop-gridmix/pom.xml  | 4 ++--
 hadoop-tools/hadoop-kafka/pom.xml| 4 ++--
 hadoop-tools/hadoop-openstack/pom.xml| 4 ++--
 hadoop-tools/hadoop-pipes/pom.xml| 4 ++--
 hadoop-tools/hadoop-resourceestimator/pom.xml| 2 +-
 hadoop-tools/hadoop-rumen/pom.xml| 4 ++--
 hadoop-t

hadoop git commit: HADOOP-15037. Add site release notes for OrgQueue and resource types.

2017-11-13 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 ef212b855 -> 3f96ecf5c


HADOOP-15037. Add site release notes for OrgQueue and resource types.

(cherry picked from commit 8b125741659a825c71877bd1b1cb8f7e3ef26436)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3f96ecf5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3f96ecf5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3f96ecf5

Branch: refs/heads/branch-3.0
Commit: 3f96ecf5c3d38da181078b5704471d5b36467be7
Parents: ef212b8
Author: Andrew Wang <w...@apache.org>
Authored: Mon Nov 13 18:49:22 2017 -0800
Committer: Andrew Wang <w...@apache.org>
Committed: Mon Nov 13 18:49:50 2017 -0800

--
 hadoop-project/src/site/markdown/index.md.vm | 20 +++-
 1 file changed, 19 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3f96ecf5/hadoop-project/src/site/markdown/index.md.vm
--
diff --git a/hadoop-project/src/site/markdown/index.md.vm 
b/hadoop-project/src/site/markdown/index.md.vm
index 8e1e06f..9b2d9de 100644
--- a/hadoop-project/src/site/markdown/index.md.vm
+++ b/hadoop-project/src/site/markdown/index.md.vm
@@ -225,9 +225,27 @@ cluster for existing HDFS clients.
 
 See [HDFS-10467](https://issues.apache.org/jira/browse/HADOOP-10467) and the
 HDFS Router-based Federation
-[documentation](./hadoop-project-dist/hadoop-hdfs/HDFSRouterFederation.md) for
+[documentation](./hadoop-project-dist/hadoop-hdfs/HDFSRouterFederation.html) 
for
 more details.
 
+API-based configuration of Capacity Scheduler queue configuration
+--
+
+The OrgQueue extension to the capacity scheduler provides a programmatic way to
+change configurations by providing a REST API that users can call to modify
+queue configurations. This enables automation of queue configuration management
+by administrators in the queue's `administer_queue` ACL.
+
+See [YARN-5734](https://issues.apache.org/jira/browse/YARN-5734) and the
+[Capacity Scheduler 
documentation](./hadoop-yarn/hadoop-yarn-site/CapacityScheduler.html) for more 
information.
+
+YARN Resource Types
+---
+
+The YARN resource model has been generalized to support user-defined countable 
resource types beyond CPU and memory. For instance, the cluster administrator 
could define resources like GPUs, software licenses, or locally-attached 
storage. YARN tasks can then be scheduled based on the availability of these 
resources.
+
+See [YARN-3926](https://issues.apache.org/jira/browse/YARN-3926) and the [YARN 
resource model 
documentation](./hadoop-yarn/hadoop-yarn-site/ResourceModel.html) for more 
information.
+
 Getting Started
 ===
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15037. Add site release notes for OrgQueue and resource types.

2017-11-13 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 5323b0048 -> 8b1257416


HADOOP-15037. Add site release notes for OrgQueue and resource types.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8b125741
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8b125741
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8b125741

Branch: refs/heads/trunk
Commit: 8b125741659a825c71877bd1b1cb8f7e3ef26436
Parents: 5323b00
Author: Andrew Wang <w...@apache.org>
Authored: Mon Nov 13 18:49:22 2017 -0800
Committer: Andrew Wang <w...@apache.org>
Committed: Mon Nov 13 18:49:22 2017 -0800

--
 hadoop-project/src/site/markdown/index.md.vm | 20 +++-
 1 file changed, 19 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b125741/hadoop-project/src/site/markdown/index.md.vm
--
diff --git a/hadoop-project/src/site/markdown/index.md.vm 
b/hadoop-project/src/site/markdown/index.md.vm
index 8e1e06f..9b2d9de 100644
--- a/hadoop-project/src/site/markdown/index.md.vm
+++ b/hadoop-project/src/site/markdown/index.md.vm
@@ -225,9 +225,27 @@ cluster for existing HDFS clients.
 
 See [HDFS-10467](https://issues.apache.org/jira/browse/HADOOP-10467) and the
 HDFS Router-based Federation
-[documentation](./hadoop-project-dist/hadoop-hdfs/HDFSRouterFederation.md) for
+[documentation](./hadoop-project-dist/hadoop-hdfs/HDFSRouterFederation.html) 
for
 more details.
 
+API-based configuration of Capacity Scheduler queue configuration
+--
+
+The OrgQueue extension to the capacity scheduler provides a programmatic way to
+change configurations by providing a REST API that users can call to modify
+queue configurations. This enables automation of queue configuration management
+by administrators in the queue's `administer_queue` ACL.
+
+See [YARN-5734](https://issues.apache.org/jira/browse/YARN-5734) and the
+[Capacity Scheduler 
documentation](./hadoop-yarn/hadoop-yarn-site/CapacityScheduler.html) for more 
information.
+
+YARN Resource Types
+---
+
+The YARN resource model has been generalized to support user-defined countable 
resource types beyond CPU and memory. For instance, the cluster administrator 
could define resources like GPUs, software licenses, or locally-attached 
storage. YARN tasks can then be scheduled based on the availability of these 
resources.
+
+See [YARN-3926](https://issues.apache.org/jira/browse/YARN-3926) and the [YARN 
resource model 
documentation](./hadoop-yarn/hadoop-yarn-site/ResourceModel.html) for more 
information.
+
 Getting Started
 ===
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6909. Use LightWeightedResource when number of resource types more than two. (Sunil G via wangda)

2017-11-13 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 cb48eea92 -> 7b3cd1013


YARN-6909. Use LightWeightedResource when number of resource types more than 
two. (Sunil G via wangda)

Change-Id: I90e021c5dea7abd9ec6bd73b2287c8adebe14595


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7b3cd101
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7b3cd101
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7b3cd101

Branch: refs/heads/branch-3.0
Commit: 7b3cd10131ede9172c7682e2116aa76f05590a99
Parents: cb48eea
Author: Wangda Tan <wan...@apache.org>
Authored: Thu Nov 9 14:51:15 2017 -0800
Committer: Andrew Wang <w...@apache.org>
Committed: Mon Nov 13 14:45:40 2017 -0800

--
 .../hadoop/yarn/api/records/Resource.java   | 48 ++
 .../api/records/impl/LightWeightResource.java   | 94 +---
 .../api/records/impl/pb/ResourcePBImpl.java | 88 +-
 .../scheduler/ClusterNodeTracker.java   |  2 +-
 4 files changed, 141 insertions(+), 91 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b3cd101/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index 14131cb..e863d68 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -28,9 +28,9 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.classification.InterfaceStability.Stable;
 import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
 import org.apache.hadoop.yarn.api.records.impl.LightWeightResource;
 import org.apache.hadoop.yarn.exceptions.ResourceNotFoundException;
-import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 
 /**
@@ -75,34 +75,27 @@ public abstract class Resource implements 
Comparable {
   @Public
   @Stable
   public static Resource newInstance(int memory, int vCores) {
-if (ResourceUtils.getNumberOfKnownResourceTypes() > 2) {
-  Resource ret = Records.newRecord(Resource.class);
-  ret.setMemorySize(memory);
-  ret.setVirtualCores(vCores);
-  return ret;
-}
 return new LightWeightResource(memory, vCores);
   }
 
   @Public
   @Stable
   public static Resource newInstance(long memory, int vCores) {
-if (ResourceUtils.getNumberOfKnownResourceTypes() > 2) {
-  Resource ret = Records.newRecord(Resource.class);
-  ret.setMemorySize(memory);
-  ret.setVirtualCores(vCores);
-  return ret;
-}
 return new LightWeightResource(memory, vCores);
   }
 
   @InterfaceAudience.Private
   @InterfaceStability.Unstable
   public static Resource newInstance(Resource resource) {
-Resource ret = Resource.newInstance(resource.getMemorySize(),
-resource.getVirtualCores());
-if (ResourceUtils.getNumberOfKnownResourceTypes() > 2) {
-  Resource.copy(resource, ret);
+Resource ret;
+int numberOfKnownResourceTypes = ResourceUtils
+.getNumberOfKnownResourceTypes();
+if (numberOfKnownResourceTypes > 2) {
+  ret = new LightWeightResource(resource.getMemorySize(),
+  resource.getVirtualCores(), resource.getResources());
+} else {
+  ret = new LightWeightResource(resource.getMemorySize(),
+  resource.getVirtualCores());
 }
 return ret;
   }
@@ -411,7 +404,7 @@ public abstract class Resource implements 
Comparable {
 int arrLenOther = otherResources.length;
 
 // compare memory and vcores first(in that order) to preserve
-// existing behaviour
+// existing behavior.
 for (int i = 0; i < arrLenThis; i++) {
   ResourceInformation otherEntry;
   try {
@@ -483,4 +476,23 @@ public abstract class Resource implements 
Comparable {
 }
 return Long.valueOf(value).intValue();
   }
+
+  /**
+   * Create ResourceInformation with basic fields.
+   * @param name Resource Type Name
+   * @param unit Default unit of provided resource type
+   * @param value Value associated with giveb resource
+   * @return ResourceInformation object
+   */
+  protected static ResourceInformation newDefaultInformation(String name,
+  String unit, long val

hadoop git commit: HADOOP-15018. Update JAVA_HOME in create-release for Xenial Dockerfile.

2017-11-07 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8db9d61ac -> 51e882d5c


HADOOP-15018. Update JAVA_HOME in create-release for Xenial Dockerfile.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/51e882d5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/51e882d5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/51e882d5

Branch: refs/heads/trunk
Commit: 51e882d5c9fd2f55cd9ac2eafd3e59eb4f239d9d
Parents: 8db9d61
Author: Andrew Wang <w...@apache.org>
Authored: Tue Nov 7 16:38:53 2017 -0800
Committer: Andrew Wang <w...@apache.org>
Committed: Tue Nov 7 16:39:04 2017 -0800

--
 dev-support/bin/create-release | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/51e882d5/dev-support/bin/create-release
--
diff --git a/dev-support/bin/create-release b/dev-support/bin/create-release
index b98c058..694820b 100755
--- a/dev-support/bin/create-release
+++ b/dev-support/bin/create-release
@@ -489,9 +489,9 @@ function dockermode
 echo "RUN mkdir -p /maven"
 echo "RUN chown -R ${user_name} /maven"
 
-# we always force build with the Oracle JDK
+# we always force build with the OpenJDK JDK
 # but with the correct version
-echo "ENV JAVA_HOME /usr/lib/jvm/java-${JVM_VERSION}-oracle"
+echo "ENV JAVA_HOME /usr/lib/jvm/java-${JVM_VERSION}-openjdk-amd64"
 echo "USER ${user_name}"
 printf "\n\n"
   ) | docker build -t "${imgname}" -


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15018. Update JAVA_HOME in create-release for Xenial Dockerfile.

2017-11-07 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 4e7b5824b -> 47f2db2c7


HADOOP-15018. Update JAVA_HOME in create-release for Xenial Dockerfile.

(cherry picked from commit 51e882d5c9fd2f55cd9ac2eafd3e59eb4f239d9d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/47f2db2c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/47f2db2c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/47f2db2c

Branch: refs/heads/branch-3.0
Commit: 47f2db2c7d93c4bf336c3771f06d980d16d3aa43
Parents: 4e7b582
Author: Andrew Wang <w...@apache.org>
Authored: Tue Nov 7 16:38:53 2017 -0800
Committer: Andrew Wang <w...@apache.org>
Committed: Tue Nov 7 16:39:06 2017 -0800

--
 dev-support/bin/create-release | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/47f2db2c/dev-support/bin/create-release
--
diff --git a/dev-support/bin/create-release b/dev-support/bin/create-release
index b98c058..694820b 100755
--- a/dev-support/bin/create-release
+++ b/dev-support/bin/create-release
@@ -489,9 +489,9 @@ function dockermode
 echo "RUN mkdir -p /maven"
 echo "RUN chown -R ${user_name} /maven"
 
-# we always force build with the Oracle JDK
+# we always force build with the OpenJDK JDK
 # but with the correct version
-echo "ENV JAVA_HOME /usr/lib/jvm/java-${JVM_VERSION}-oracle"
+echo "ENV JAVA_HOME /usr/lib/jvm/java-${JVM_VERSION}-openjdk-amd64"
 echo "USER ${user_name}"
 printf "\n\n"
   ) | docker build -t "${imgname}" -


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-3661. Basic Federation UI. (Contributed by Inigo Goiri via curino)

2017-10-31 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 e785e8327 -> 1fbc72704


YARN-3661. Basic Federation UI. (Contributed by Inigo Goiri via curino)

(cherry picked from commit ceca9694f9a0c78d07cab2c382036f175183e67b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1fbc7270
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1fbc7270
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1fbc7270

Branch: refs/heads/branch-3.0
Commit: 1fbc72704e3c9bab8cbbc23fcddda6d36f09c59d
Parents: e785e83
Author: Carlo Curino <cur...@apache.org>
Authored: Mon Oct 2 13:03:32 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Tue Oct 31 13:42:36 2017 -0700

--
 .../yarn/server/router/webapp/AboutBlock.java   |  91 ++
 .../yarn/server/router/webapp/AboutPage.java|  37 
 .../yarn/server/router/webapp/AppsBlock.java| 130 ++
 .../yarn/server/router/webapp/AppsPage.java |  77 
 .../server/router/webapp/FederationBlock.java   | 176 +++
 .../server/router/webapp/FederationPage.java|  57 ++
 .../yarn/server/router/webapp/NavBlock.java |  46 +
 .../yarn/server/router/webapp/NodesBlock.java   | 109 
 .../yarn/server/router/webapp/NodesPage.java|  60 +++
 .../server/router/webapp/RouterController.java  |  60 +++
 .../yarn/server/router/webapp/RouterView.java   |  52 ++
 .../yarn/server/router/webapp/RouterWebApp.java |   6 +
 .../src/site/markdown/Federation.md |   6 +-
 13 files changed, 905 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1fbc7270/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/AboutBlock.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/AboutBlock.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/AboutBlock.java
new file mode 100644
index 000..cd588fc
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/AboutBlock.java
@@ -0,0 +1,91 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.router.webapp;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWSConsts;
+import 
org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterMetricsInfo;
+import org.apache.hadoop.yarn.server.router.Router;
+import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
+import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
+import org.apache.hadoop.yarn.webapp.view.InfoBlock;
+
+import com.google.inject.Inject;
+
+/**
+ * About block for the Router Web UI.
+ */
+public class AboutBlock extends HtmlBlock {
+
+  private static final long BYTES_IN_MB = 1024 * 1024;
+
+  private final Router router;
+
+  @Inject
+  AboutBlock(Router router, ViewContext ctx) {
+super(ctx);
+this.router = router;
+  }
+
+  @Override
+  protected void render(Block html) {
+Configuration conf = this.router.getConfig();
+String webAppAddress = WebAppUtils.getRouterWebAppURLWithScheme(conf);
+
+ClusterMetricsInfo metrics = RouterWebServiceUtil.genericForward(
+webAppAddress, null, ClusterMetricsInfo.class, HTTPMethods.GET,
+RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.METRICS, null, null);
+boolean isEnabled = conf.getBoolean(
+YarnConfiguration.FEDERATION_ENABLED,
+YarnConfiguration.DEFAULT_FEDERATION_ENABLED);
+info("Cluster Status").
+__("Federation Enabled"

hadoop git commit: YARN-7389. Make TestResourceManager Scheduler agnostic. (Robert Kanter via Haibo Chen)

2017-10-31 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 cba1fa496 -> e785e8327


YARN-7389. Make TestResourceManager Scheduler agnostic. (Robert Kanter via 
Haibo Chen)

(cherry picked from commit d7f3737f3b3236fbf8c25fdd4656251ed63a2be9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e785e832
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e785e832
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e785e832

Branch: refs/heads/branch-3.0
Commit: e785e83270efbfc6d47eee35a1a112643607f81a
Parents: cba1fa4
Author: Haibo Chen <haiboc...@apache.org>
Authored: Tue Oct 24 22:17:56 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Tue Oct 31 13:41:44 2017 -0700

--
 .../yarn/server/resourcemanager/TestResourceManager.java| 9 ++---
 1 file changed, 2 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e785e832/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
index ad8c335..941e477 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
@@ -39,7 +39,7 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
-import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
@@ -58,8 +58,6 @@ public class TestResourceManager {
   @Before
   public void setUp() throws Exception {
 Configuration conf = new YarnConfiguration();
-conf.set(YarnConfiguration.RM_SCHEDULER,
-CapacityScheduler.class.getCanonicalName());
 UserGroupInformation.setConfiguration(conf);
 resourceManager = new ResourceManager();
 resourceManager.init(conf);
@@ -133,6 +131,7 @@ public class TestResourceManager {
 
// Send a heartbeat to kick the tires on the Scheduler
 nodeUpdate(nm1);
+((AbstractYarnScheduler)resourceManager.getResourceScheduler()).update();
 
 // Get allocations from the scheduler
 application.schedule();
@@ -262,8 +261,6 @@ public class TestResourceManager {
 }
   };
   Configuration conf = new YarnConfiguration();
-  conf.set(YarnConfiguration.RM_SCHEDULER,
-CapacityScheduler.class.getCanonicalName());
   conf.set(filterInitializerConfKey, filterInitializer);
   conf.set("hadoop.security.authentication", "kerberos");
   conf.set("hadoop.http.authentication.type", "kerberos");
@@ -298,8 +295,6 @@ public class TestResourceManager {
 for (String filterInitializer : simpleFilterInitializers) {
   resourceManager = new ResourceManager();
   Configuration conf = new YarnConfiguration();
-  conf.set(YarnConfiguration.RM_SCHEDULER,
-CapacityScheduler.class.getCanonicalName());
   conf.set(filterInitializerConfKey, filterInitializer);
   try {
 UserGroupInformation.setConfiguration(conf);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14915. method name is incorrect in ConfServlet. Contributed by Bharat Viswanadham.

2017-10-31 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 0b3e3e489 -> af52a585f


HADOOP-14915. method name is incorrect in ConfServlet. Contributed by Bharat 
Viswanadham.

(cherry picked from commit 563dcdfc1de7ea9ee7ce296163cf2678dfe5349c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/af52a585
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/af52a585
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/af52a585

Branch: refs/heads/branch-3.0
Commit: af52a585f0494c29af3d87d22fdd58074a322b42
Parents: 0b3e3e4
Author: Chen Liang <cli...@apache.org>
Authored: Mon Oct 2 10:58:19 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Tue Oct 31 12:54:28 2017 -0700

--
 .../src/main/java/org/apache/hadoop/conf/ConfServlet.java| 4 ++--
 .../src/test/java/org/apache/hadoop/conf/TestConfServlet.java| 2 +-
 2 files changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/af52a585/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
index cfd7b97..2128de7 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
@@ -69,7 +69,7 @@ public class ConfServlet extends HttpServlet {
   return;
 }
 
-String format = parseAccecptHeader(request);
+String format = parseAcceptHeader(request);
 if (FORMAT_XML.equals(format)) {
   response.setContentType("text/xml; charset=utf-8");
 } else if (FORMAT_JSON.equals(format)) {
@@ -89,7 +89,7 @@ public class ConfServlet extends HttpServlet {
   }
 
   @VisibleForTesting
-  static String parseAccecptHeader(HttpServletRequest request) {
+  static String parseAcceptHeader(HttpServletRequest request) {
 String format = request.getHeader(HttpHeaders.ACCEPT);
 return format != null && format.contains(FORMAT_JSON) ?
 FORMAT_JSON : FORMAT_XML;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af52a585/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfServlet.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfServlet.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfServlet.java
index 53089ed..cf42219 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfServlet.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfServlet.java
@@ -98,7 +98,7 @@ public class TestConfServlet {
   Mockito.when(request.getHeader(HttpHeaders.ACCEPT))
   .thenReturn(contentTypeExpected);
   assertEquals(contenTypeActual,
-  ConfServlet.parseAccecptHeader(request));
+  ConfServlet.parseAcceptHeader(request));
 }
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14880. [KMS] Document missing KMS client side configs. Contributed by Gabor Bota.

2017-10-31 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 af52a585f -> cba1fa496


HADOOP-14880. [KMS] Document missing KMS client side configs. Contributed 
by Gabor Bota.

(cherry picked from commit 97c70c7ac6881f87eee1575bcbdd28b31ecac231)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cba1fa49
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cba1fa49
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cba1fa49

Branch: refs/heads/branch-3.0
Commit: cba1fa496b6f64ceb688e88d77ec0f1133ebe704
Parents: af52a58
Author: Wei-Chiu Chuang <weic...@apache.org>
Authored: Thu Oct 19 06:02:13 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Tue Oct 31 12:54:53 2017 -0700

--
 .../org/apache/hadoop/crypto/key/kms/KMSClientProvider.java | 8 +++-
 .../org/apache/hadoop/fs/CommonConfigurationKeysPublic.java | 9 +
 .../hadoop-common/src/main/resources/core-default.xml   | 8 
 .../org/apache/hadoop/crypto/key/kms/server/TestKMS.java| 3 ++-
 4 files changed, 22 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cba1fa49/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index 9bef32c..be15201 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -120,10 +120,6 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
 
   private static final String CONFIG_PREFIX = "hadoop.security.kms.client.";
 
-  /* It's possible to specify a timeout, in seconds, in the config file */
-  public static final String TIMEOUT_ATTR = CONFIG_PREFIX + "timeout";
-  public static final int DEFAULT_TIMEOUT = 60;
-
   /* Number of times to retry authentication in the event of auth failure
* (normally happens due to stale authToken) 
*/
@@ -358,7 +354,9 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
 throw new IOException(ex);
   }
 }
-int timeout = conf.getInt(TIMEOUT_ATTR, DEFAULT_TIMEOUT);
+int timeout = conf.getInt(
+CommonConfigurationKeysPublic.KMS_CLIENT_TIMEOUT_SECONDS,
+CommonConfigurationKeysPublic.KMS_CLIENT_TIMEOUT_DEFAULT);
 authRetry = conf.getInt(AUTH_RETRY, DEFAULT_AUTH_RETRY);
 configurator = new TimeoutConnConfigurator(timeout, sslFactory);
 encKeyVersionQueue =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cba1fa49/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
index 4fda2b8..3c8628c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
@@ -726,6 +726,15 @@ public class CommonConfigurationKeysPublic {
* 
* core-default.xml
*/
+  public static final String KMS_CLIENT_TIMEOUT_SECONDS =
+  "hadoop.security.kms.client.timeout";
+  public static final int KMS_CLIENT_TIMEOUT_DEFAULT = 60;
+
+  /**
+   * @see
+   * 
+   * core-default.xml
+   */
   /** Default value is the number of providers specified. */
   public static final String KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY =
   "hadoop.security.kms.client.failover.max.retries";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cba1fa49/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 22b0e0e..b013018 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -2328,6 +2328,14 @@
 key will be dropped. Default = 12hrs
   
 
+
+  hadoop.security.kms.client.time

hadoop git commit: HADOOP-14948. Document missing config key hadoop.treat.subject.external. Contributed by Ajay Kumar.

2017-10-31 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 1347e83c9 -> 0b3e3e489


HADOOP-14948. Document missing config key hadoop.treat.subject.external. 
Contributed by Ajay Kumar.

(cherry picked from commit e906108fc98a011630d12a43e557b81d7ef7ea5d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0b3e3e48
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0b3e3e48
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0b3e3e48

Branch: refs/heads/branch-3.0
Commit: 0b3e3e489535675be4edb7d3f6fea9a609047928
Parents: 1347e83
Author: Wei-Chiu Chuang <weic...@apache.org>
Authored: Mon Oct 16 16:42:59 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Tue Oct 31 12:54:03 2017 -0700

--
 .../hadoop-common/src/main/resources/core-default.xml  | 13 +
 1 file changed, 13 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b3e3e48/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 4179bf9..22b0e0e 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -2751,4 +2751,17 @@
 the ZK CLI).
 
   
+  
+hadoop.treat.subject.external
+false
+
+  When creating UGI with UserGroupInformation(Subject), treat the passed
+  subject external if set to true, and assume the owner of the subject
+  should do the credential renewal.
+
+  When true this property will introduce an incompatible change which
+  may require changes in client code. For more details, see the jiras:
+  HADOOP-13805,HADOOP-13558.
+
+  
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-12485. expunge may fail to remove trash from encryption zone. Contributed by Wei-Chiu Chuang.

2017-10-31 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 166c75a0e -> 1347e83c9


HDFS-12485. expunge may fail to remove trash from encryption zone. Contributed 
by Wei-Chiu Chuang.

(cherry picked from commit 8dbc8909c92d502d10a7f94d1de3171878a43b04)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1347e83c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1347e83c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1347e83c

Branch: refs/heads/branch-3.0
Commit: 1347e83c90158d37cb16bed79eafc18947cdf6cc
Parents: 166c75a
Author: Wei-Chiu Chuang <weic...@apache.org>
Authored: Mon Oct 16 12:57:48 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Tue Oct 31 12:52:58 2017 -0700

--
 .../main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java   | 3 +--
 .../apache/hadoop/hdfs/TestTrashWithSecureEncryptionZones.java| 1 -
 2 files changed, 1 insertion(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1347e83c/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 2f9ed19..2a6bd21 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -2837,8 +2837,7 @@ public class DistributedFileSystem extends FileSystem {
 }
   }
 } else {
-  Path userTrash = new Path(ezTrashRoot, System.getProperty(
-  "user.name"));
+  Path userTrash = new Path(ezTrashRoot, dfs.ugi.getShortUserName());
   try {
 ret.add(getFileStatus(userTrash));
   } catch (FileNotFoundException ignored) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1347e83c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithSecureEncryptionZones.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithSecureEncryptionZones.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithSecureEncryptionZones.java
index 314adfb..a8e2a71 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithSecureEncryptionZones.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestTrashWithSecureEncryptionZones.java
@@ -225,7 +225,6 @@ public class TestTrashWithSecureEncryptionZones {
 clientConf = new Configuration(conf);
 clientConf.setLong(FS_TRASH_INTERVAL_KEY, 1);
 shell = new FsShell(clientConf);
-System.setProperty("user.name", HDFS_USER_NAME);
   }
 
   @AfterClass


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-12544. SnapshotDiff - support diff generation on any snapshot root descendant directory.

2017-10-31 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 1375c7205 -> 166c75a0e


HDFS-12544. SnapshotDiff - support diff generation on any snapshot root 
descendant directory.

(cherry picked from commit 075dd45a24398dcdcddd60da995f0dc152eee321)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/166c75a0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/166c75a0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/166c75a0

Branch: refs/heads/branch-3.0
Commit: 166c75a0eba403211ed0fef92a4c609cf815eaac
Parents: 1375c72
Author: Manoj Govindassamy <manoj...@apache.org>
Authored: Wed Oct 25 10:54:40 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Tue Oct 31 12:50:54 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   7 +
 .../hdfs/server/namenode/INodeDirectory.java|  18 +
 .../snapshot/DirectorySnapshottableFeature.java |  38 +-
 .../namenode/snapshot/SnapshotDiffInfo.java |  14 +-
 .../namenode/snapshot/SnapshotManager.java  |  63 ++-
 .../src/main/resources/hdfs-default.xml |  11 +
 .../snapshot/TestSnapRootDescendantDiff.java|  80 +++
 .../snapshot/TestSnapshotDiffReport.java| 541 +--
 8 files changed, 709 insertions(+), 63 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/166c75a0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 582ae4e..0fc75f1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -374,6 +374,13 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final String 
DFS_NAMENODE_SNAPSHOT_SKIP_CAPTURE_ACCESSTIME_ONLY_CHANGE = 
"dfs.namenode.snapshot.skip.capture.accesstime-only-change";
   public static final boolean 
DFS_NAMENODE_SNAPSHOT_SKIP_CAPTURE_ACCESSTIME_ONLY_CHANGE_DEFAULT = false;
 
+  public static final String
+  DFS_NAMENODE_SNAPSHOT_DIFF_ALLOW_SNAP_ROOT_DESCENDANT =
+  "dfs.namenode.snapshotdiff.allow.snap-root-descendant";
+  public static final boolean
+  DFS_NAMENODE_SNAPSHOT_DIFF_ALLOW_SNAP_ROOT_DESCENDANT_DEFAULT =
+  true;
+
   // Whether to enable datanode's stale state detection and usage for reads
   public static final String DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY = 
"dfs.namenode.avoid.read.stale.datanode";
   public static final boolean 
DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_DEFAULT = false;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/166c75a0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
index 3b7fa4e..dfb7a0a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
@@ -253,6 +253,24 @@ public class INodeDirectory extends 
INodeWithAdditionalFields
 return getDirectorySnapshottableFeature() != null;
   }
 
+  /**
+   * Check if this directory is a descendant directory
+   * of a snapshot root directory.
+   * @param snapshotRootDir the snapshot root directory
+   * @return true if this directory is a descendant of snapshot root
+   */
+  public boolean isDescendantOfSnapshotRoot(INodeDirectory snapshotRootDir) {
+Preconditions.checkArgument(snapshotRootDir.isSnapshottable());
+INodeDirectory dir = this;
+while(dir != null) {
+  if (dir.equals(snapshotRootDir)) {
+return true;
+  }
+  dir = dir.getParent();
+}
+return false;
+  }
+
   public Snapshot getSnapshot(byte[] snapshotName) {
 return getDirectorySnapshottableFeature().getSnapshot(snapshotName);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/166c75a0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Direc

hadoop git commit: HDFS-12573. Divide the total blocks metrics into replicated and erasure coded. Contributed by Takanobu Asanuma.

2017-10-31 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 014184127 -> 1375c7205


HDFS-12573. Divide the total blocks metrics into replicated and erasure coded. 
Contributed by Takanobu Asanuma.

(cherry picked from commit 78af6cdc5359404139665d81447f28d26b7bb43b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1375c720
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1375c720
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1375c720

Branch: refs/heads/branch-3.0
Commit: 1375c7205feb8b7b1c366fee8645913e6e31c7e9
Parents: 0141841
Author: Manoj Govindassamy <manoj...@apache.org>
Authored: Tue Oct 10 14:23:29 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Tue Oct 31 12:50:21 2017 -0700

--
 .../server/blockmanagement/BlockManager.java|  10 ++
 .../hdfs/server/blockmanagement/BlocksMap.java  |  40 ++-
 .../hdfs/server/namenode/FSNamesystem.java  |  12 +++
 .../namenode/metrics/ECBlockGroupsMBean.java|   5 +
 .../namenode/metrics/ReplicatedBlocksMBean.java |   5 +
 .../server/namenode/TestNameNodeMXBean.java | 107 ++-
 6 files changed, 177 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1375c720/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 49b385b..6cd67f6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -257,6 +257,11 @@ public class BlockManager implements BlockStatsMXBean {
   }
 
   /** Used by metrics. */
+  public long getTotalReplicatedBlocks() {
+return blocksMap.getReplicatedBlocks();
+  }
+
+  /** Used by metrics. */
   public long getLowRedundancyECBlockGroups() {
 return neededReconstruction.getLowRedundancyECBlockGroups();
   }
@@ -276,6 +281,11 @@ public class BlockManager implements BlockStatsMXBean {
 return invalidateBlocks.getECBlocks();
   }
 
+  /** Used by metrics. */
+  public long getTotalECBlockGroups() {
+return blocksMap.getECBlockGroups();
+  }
+
   /**
* redundancyRecheckInterval is how often namenode checks for new
* reconstruction work.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1375c720/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
index f7cde90..6f13da9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import java.util.Collections;
 import java.util.Iterator;
+import java.util.concurrent.atomic.LongAdder;
 
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.namenode.INodeId;
@@ -37,6 +38,9 @@ class BlocksMap {
   
   private GSet<Block, BlockInfo> blocks;
 
+  private final LongAdder totalReplicatedBlocks = new LongAdder();
+  private final LongAdder totalECBlockGroups = new LongAdder();
+
   BlocksMap(int capacity) {
 // Use 2% of total memory to size the GSet capacity
 this.capacity = capacity;
@@ -65,6 +69,8 @@ class BlocksMap {
   void clear() {
 if (blocks != null) {
   blocks.clear();
+  totalReplicatedBlocks.reset();
+  totalECBlockGroups.reset();
 }
   }
 
@@ -76,6 +82,7 @@ class BlocksMap {
 if (info != b) {
   info = b;
   blocks.put(info);
+  incrementBlockStat(info);
 }
 info.setBlockCollectionId(bc.getId());
 return info;
@@ -88,8 +95,10 @@ class BlocksMap {
*/
   void removeBlock(Block block) {
 BlockInfo blockInfo = blocks.remove(block);
-if (blockInfo == null)
+if (blockInfo == null) {
   return;
+}
+decrementBlockStat(block);
 
 assert blockInfo.getBlockCollectionId() == INodeId.INVALID_INODE_ID;
 final int size = blockInfo.isStriped() ?
@@ -166,6 +175,7 @@ class BlocksMap {
 

hadoop git commit: HDFS-12614. FSPermissionChecker#getINodeAttrs() throws NPE when INodeAttributesProvider configured.

2017-10-31 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 579612c9d -> 014184127


HDFS-12614. FSPermissionChecker#getINodeAttrs() throws NPE when 
INodeAttributesProvider configured.

(cherry picked from commit b406d8e3755d24ce72c443fd893a5672fd56babc)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/01418412
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/01418412
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/01418412

Branch: refs/heads/branch-3.0
Commit: 014184127ede1fb2e0b7b9663e005bcf90aec36b
Parents: 579612c
Author: Manoj Govindassamy <manoj...@apache.org>
Authored: Mon Oct 16 17:42:41 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Tue Oct 31 12:49:41 2017 -0700

--
 .../server/namenode/FSPermissionChecker.java| 12 +++-
 .../namenode/TestINodeAttributeProvider.java| 60 ++--
 2 files changed, 54 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/01418412/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
index f745a6c..c854b49 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
@@ -275,8 +275,16 @@ class FSPermissionChecker implements AccessControlEnforcer 
{
 INodeAttributes inodeAttrs = inode.getSnapshotINode(snapshotId);
 if (getAttributesProvider() != null) {
   String[] elements = new String[pathIdx + 1];
-  for (int i = 0; i < elements.length; i++) {
-elements[i] = DFSUtil.bytes2String(pathByNameArr[i]);
+  /**
+   * {@link INode#getPathComponents(String)} returns a null component
+   * for the root only path "/". Assign an empty string if so.
+   */
+  if (pathByNameArr.length == 1 && pathByNameArr[0] == null) {
+elements[0] = "";
+  } else {
+for (int i = 0; i < elements.length; i++) {
+  elements[i] = DFSUtil.bytes2String(pathByNameArr[i]);
+}
   }
   inodeAttrs = getAttributesProvider().getAttributes(elements, inodeAttrs);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/01418412/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
index bbc5fa0..9c7dcd3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
@@ -313,31 +313,59 @@ public class TestINodeAttributeProvider {
 testBypassProviderHelper(users, HDFS_PERMISSION, true);
   }
 
-  @Test
-  public void testCustomProvider() throws Exception {
+  private void verifyFileStatus(UserGroupInformation ugi) throws IOException {
 FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
-fs.mkdirs(new Path("/user/xxx"));
-FileStatus status = fs.getFileStatus(new Path("/user/xxx"));
-Assert.assertEquals(System.getProperty("user.name"), status.getOwner());
+
+FileStatus status = fs.getFileStatus(new Path("/"));
+LOG.info("Path '/' is owned by: "
++ status.getOwner() + ":" + status.getGroup());
+
+Path userDir = new Path("/user/" + ugi.getShortUserName());
+fs.mkdirs(userDir);
+status = fs.getFileStatus(userDir);
+Assert.assertEquals(ugi.getShortUserName(), status.getOwner());
 Assert.assertEquals("supergroup", status.getGroup());
 Assert.assertEquals(new FsPermission((short) 0755), 
status.getPermission());
-fs.mkdirs(new Path("/user/authz"));
-Path p = new Path("/user/authz");
-status = fs.getFileStatus(p);
+
+Path authzDir = new Path("/user/authz");
+fs.mkdirs(authzDir);
+status = fs.getFileStatus(authzDir);
 Assert.assertEquals("foo", status.getOwner());
 Asser

hadoop git commit: HDFS-12619. Do not catch and throw unchecked exceptions if IBRs fail to process. Contributed by Wei-Chiu Chuang.

2017-10-31 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 143dbf849 -> 579612c9d


HDFS-12619. Do not catch and throw unchecked exceptions if IBRs fail to 
process. Contributed by Wei-Chiu Chuang.

(cherry picked from commit 4ab0c8f96a41c573cc1f1e71c18871d243f952b9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/579612c9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/579612c9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/579612c9

Branch: refs/heads/branch-3.0
Commit: 579612c9d250578c100de30fbffdf2716466bc41
Parents: 143dbf8
Author: Wei-Chiu Chuang <weic...@apache.org>
Authored: Thu Oct 19 06:17:59 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Tue Oct 31 12:49:08 2017 -0700

--
 .../hadoop/hdfs/server/blockmanagement/BlockManager.java  | 10 +++---
 1 file changed, 7 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/579612c9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index be6575d..49b385b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -3883,11 +3883,15 @@ public class BlockManager implements BlockStatsMXBean {
   throw new IOException(
   "Got incremental block report from unregistered or dead node");
 }
+
+boolean successful = false;
 try {
   processIncrementalBlockReport(node, srdb);
-} catch (Exception ex) {
-  node.setForceRegistration(true);
-  throw ex;
+  successful = true;
+} finally {
+  if (!successful) {
+node.setForceRegistration(true);
+  }
 }
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-12622. Fix enumerate in HDFSErasureCoding.md. Contributed by Yiqun Lin.

2017-10-31 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 0d9503d6b -> 143dbf849


HDFS-12622. Fix enumerate in HDFSErasureCoding.md. Contributed by Yiqun Lin.

(cherry picked from commit dc63a6a52b7fdf076ab83a774b0378f77c1c0cd3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/143dbf84
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/143dbf84
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/143dbf84

Branch: refs/heads/branch-3.0
Commit: 143dbf8497d442a25f93612b7d88a2fbb01895de
Parents: 0d9503d
Author: Akira Ajisaka <aajis...@apache.org>
Authored: Wed Oct 11 18:09:53 2017 +0900
Committer: Andrew Wang <w...@apache.org>
Committed: Tue Oct 31 12:48:29 2017 -0700

--
 .../hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md  | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/143dbf84/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
index 47b15ba..270201a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
@@ -140,6 +140,7 @@ Deployment
 ### Enable Intel ISA-L
 
   HDFS native implementation of default RS codec leverages Intel ISA-L library 
to improve the encoding and decoding calculation. To enable and use Intel 
ISA-L, there are three steps.
+
   1. Build ISA-L library. Please refer to the official site 
"https://github.com/01org/isa-l/; for detail information.
   2. Build Hadoop with ISA-L support. Please refer to "Intel ISA-L build 
options" section in "Build instructions for Hadoop" in (BUILDING.txt) in the 
source code.
   3. Use `-Dbundle.isal` to copy the contents of the `isal.lib` directory into 
the final tar file. Deploy Hadoop with the tar file. Make sure ISA-L is 
available on HDFS clients and DataNodes.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: Revert "HDFS-12499. dfs.namenode.shared.edits.dir property is currently namenode specific key. Contributed by Bharat Viswanadham."

2017-10-31 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk b922ba739 -> 5f681fa82


Revert "HDFS-12499. dfs.namenode.shared.edits.dir property is currently 
namenode specific key. Contributed by Bharat Viswanadham."

This reverts commit b922ba7393bd97b98e90f50f01b4cc664c44adb9.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5f681fa8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5f681fa8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5f681fa8

Branch: refs/heads/trunk
Commit: 5f681fa8216fb43dff8a3d21bf21e91d6c6f6d9c
Parents: b922ba7
Author: Andrew Wang <w...@apache.org>
Authored: Tue Oct 31 10:46:10 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Tue Oct 31 10:46:10 2017 -0700

--
 .../java/org/apache/hadoop/hdfs/server/namenode/NameNode.java| 4 ++--
 .../src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java| 2 +-
 2 files changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f681fa8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index 6125dea..32b873b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -248,6 +248,7 @@ public class NameNode extends ReconfigurableBase implements
 DFS_NAMENODE_RPC_BIND_HOST_KEY,
 DFS_NAMENODE_NAME_DIR_KEY,
 DFS_NAMENODE_EDITS_DIR_KEY,
+DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
 DFS_NAMENODE_CHECKPOINT_DIR_KEY,
 DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY,
 DFS_NAMENODE_LIFELINE_RPC_ADDRESS_KEY,
@@ -277,8 +278,7 @@ public class NameNode extends ReconfigurableBase implements
* for a specific namenode.
*/
   public static final String[] NAMESERVICE_SPECIFIC_KEYS = {
-  DFS_HA_AUTO_FAILOVER_ENABLED_KEY,
-  DFS_NAMENODE_SHARED_EDITS_DIR_KEY
+DFS_HA_AUTO_FAILOVER_ENABLED_KEY
   };
 
   private String ipcClientRPCBackoffEnable;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f681fa8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
index 64d2322..39f76a5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
@@ -402,7 +402,7 @@ public class TestDFSUtil {
   public void testSomeConfsNNSpecificSomeNSSpecific() {
 final HdfsConfiguration conf = new HdfsConfiguration();
 
-String key = DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY;
+String key = DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY;
 conf.set(key, "global-default");
 conf.set(key + ".ns1", "ns1-override");
 conf.set(key + ".ns1.nn1", "nn1-override");


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: Revert "HDFS-12499. dfs.namenode.shared.edits.dir property is currently namenode specific key. Contributed by Bharat Viswanadham."

2017-10-31 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 e25903006 -> ae08fe518


Revert "HDFS-12499. dfs.namenode.shared.edits.dir property is currently 
namenode specific key. Contributed by Bharat Viswanadham."

This reverts commit f09725537e4aef28c3e192a7aacad206900aa7b5.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ae08fe51
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ae08fe51
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ae08fe51

Branch: refs/heads/branch-3.0
Commit: ae08fe518b832c23a69fe859a62bb9cff32b6496
Parents: e259030
Author: Andrew Wang <w...@apache.org>
Authored: Tue Oct 31 10:46:19 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Tue Oct 31 10:46:19 2017 -0700

--
 .../java/org/apache/hadoop/hdfs/server/namenode/NameNode.java| 4 ++--
 .../src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java| 2 +-
 2 files changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae08fe51/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index 6125dea..32b873b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -248,6 +248,7 @@ public class NameNode extends ReconfigurableBase implements
 DFS_NAMENODE_RPC_BIND_HOST_KEY,
 DFS_NAMENODE_NAME_DIR_KEY,
 DFS_NAMENODE_EDITS_DIR_KEY,
+DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
 DFS_NAMENODE_CHECKPOINT_DIR_KEY,
 DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY,
 DFS_NAMENODE_LIFELINE_RPC_ADDRESS_KEY,
@@ -277,8 +278,7 @@ public class NameNode extends ReconfigurableBase implements
* for a specific namenode.
*/
   public static final String[] NAMESERVICE_SPECIFIC_KEYS = {
-  DFS_HA_AUTO_FAILOVER_ENABLED_KEY,
-  DFS_NAMENODE_SHARED_EDITS_DIR_KEY
+DFS_HA_AUTO_FAILOVER_ENABLED_KEY
   };
 
   private String ipcClientRPCBackoffEnable;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae08fe51/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
index 64d2322..39f76a5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
@@ -402,7 +402,7 @@ public class TestDFSUtil {
   public void testSomeConfsNNSpecificSomeNSSpecific() {
 final HdfsConfiguration conf = new HdfsConfiguration();
 
-String key = DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY;
+String key = DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY;
 conf.set(key, "global-default");
 conf.set(key + ".ns1", "ns1-override");
 conf.set(key + ".ns1.nn1", "nn1-override");


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-12497. Re-enable TestDFSStripedOutputStreamWithFailure tests. Contributed by Huafeng Wang.

2017-10-20 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 d375645d5 -> ea149c10c


HDFS-12497. Re-enable TestDFSStripedOutputStreamWithFailure tests. Contributed 
by Huafeng Wang.

(cherry picked from commit 0477eff8be4505ad2730ec16621105b6df9099ae)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ea149c10
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ea149c10
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ea149c10

Branch: refs/heads/branch-3.0
Commit: ea149c10cf33df33f4fdf9c2d4136de41c56d98f
Parents: d375645
Author: Andrew Wang <w...@apache.org>
Authored: Fri Oct 20 13:27:21 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Fri Oct 20 13:27:29 2017 -0700

--
 .../TestDFSStripedOutputStreamWithFailure.java  | 40 +++-
 ...tputStreamWithFailureWithRandomECPolicy.java | 14 +++
 2 files changed, 29 insertions(+), 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea149c10/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
index e7fa278..afb4d63 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
@@ -38,6 +39,7 @@ import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.io.erasurecode.CodecUtil;
+import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.io.erasurecode.ErasureCodeNative;
 import 
org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory;
 import org.apache.hadoop.security.token.Token;
@@ -77,30 +79,29 @@ public class TestDFSStripedOutputStreamWithFailure {
 .getLogger().setLevel(Level.ALL);
   }
 
+  private final int cellSize = 64 * 1024; //64k
+  private final int stripesPerBlock = 4;
   private ErasureCodingPolicy ecPolicy;
   private int dataBlocks;
   private int parityBlocks;
-  private int cellSize;
-  private final int stripesPerBlock = 4;
   private int blockSize;
   private int blockGroupSize;
 
   private static final int FLUSH_POS =
   9 * DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT + 1;
 
-  public ErasureCodingPolicy getEcPolicy() {
-return StripedFileTestUtil.getDefaultECPolicy();
+  public ECSchema getEcSchema() {
+return StripedFileTestUtil.getDefaultECPolicy().getSchema();
   }
 
   /*
* Initialize erasure coding policy.
*/
   @Before
-  public void init(){
-ecPolicy = getEcPolicy();
+  public void init() {
+ecPolicy = new ErasureCodingPolicy(getEcSchema(), cellSize);
 dataBlocks = ecPolicy.getNumDataUnits();
 parityBlocks = ecPolicy.getNumParityUnits();
-cellSize = ecPolicy.getCellSize();
 blockSize = cellSize * stripesPerBlock;
 blockGroupSize = blockSize * dataBlocks;
 dnIndexSuite = getDnIndexSuite();
@@ -189,7 +190,7 @@ public class TestDFSStripedOutputStreamWithFailure {
   private List lengths;
 
   Integer getLength(int i) {
-return i >= 0 && i < lengths.size()? lengths.get(i): null;
+return i >= 0 && i < lengths.size() ? lengths.get(i): null;
   }
 
   private static final Random RANDOM = new Random();
@@ -220,6 +221,10 @@ public class TestDFSStripedOutputStreamWithFailure {
 cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
 cluster.waitActive();
 dfs = cluster.getFileSystem();
+AddErasureCodingPolicyResponse[] res =
+dfs.addErasureCodingPolicies(new ErasureCodingPolicy[]{ecPolicy});
+ecPolicy = res[0].getPolicy();
+dfs.enableErasureCodingPolicy(ecPolicy.getName());
 DFSTestUtil.enableAllECPolicies(dfs);
 dfs.mkdirs(dir);
 dfs.setErasureCodingPolicy(dir, ecPolicy.g

hadoop git commit: HDFS-12497. Re-enable TestDFSStripedOutputStreamWithFailure tests. Contributed by Huafeng Wang.

2017-10-20 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6b7c87c94 -> 0477eff8b


HDFS-12497. Re-enable TestDFSStripedOutputStreamWithFailure tests. Contributed 
by Huafeng Wang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0477eff8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0477eff8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0477eff8

Branch: refs/heads/trunk
Commit: 0477eff8be4505ad2730ec16621105b6df9099ae
Parents: 6b7c87c
Author: Andrew Wang <w...@apache.org>
Authored: Fri Oct 20 13:27:21 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Fri Oct 20 13:27:21 2017 -0700

--
 .../TestDFSStripedOutputStreamWithFailure.java  | 40 +++-
 ...tputStreamWithFailureWithRandomECPolicy.java | 14 +++
 2 files changed, 29 insertions(+), 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0477eff8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
index e7fa278..afb4d63 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
@@ -38,6 +39,7 @@ import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.io.erasurecode.CodecUtil;
+import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.io.erasurecode.ErasureCodeNative;
 import 
org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory;
 import org.apache.hadoop.security.token.Token;
@@ -77,30 +79,29 @@ public class TestDFSStripedOutputStreamWithFailure {
 .getLogger().setLevel(Level.ALL);
   }
 
+  private final int cellSize = 64 * 1024; //64k
+  private final int stripesPerBlock = 4;
   private ErasureCodingPolicy ecPolicy;
   private int dataBlocks;
   private int parityBlocks;
-  private int cellSize;
-  private final int stripesPerBlock = 4;
   private int blockSize;
   private int blockGroupSize;
 
   private static final int FLUSH_POS =
   9 * DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT + 1;
 
-  public ErasureCodingPolicy getEcPolicy() {
-return StripedFileTestUtil.getDefaultECPolicy();
+  public ECSchema getEcSchema() {
+return StripedFileTestUtil.getDefaultECPolicy().getSchema();
   }
 
   /*
* Initialize erasure coding policy.
*/
   @Before
-  public void init(){
-ecPolicy = getEcPolicy();
+  public void init() {
+ecPolicy = new ErasureCodingPolicy(getEcSchema(), cellSize);
 dataBlocks = ecPolicy.getNumDataUnits();
 parityBlocks = ecPolicy.getNumParityUnits();
-cellSize = ecPolicy.getCellSize();
 blockSize = cellSize * stripesPerBlock;
 blockGroupSize = blockSize * dataBlocks;
 dnIndexSuite = getDnIndexSuite();
@@ -189,7 +190,7 @@ public class TestDFSStripedOutputStreamWithFailure {
   private List lengths;
 
   Integer getLength(int i) {
-return i >= 0 && i < lengths.size()? lengths.get(i): null;
+return i >= 0 && i < lengths.size() ? lengths.get(i): null;
   }
 
   private static final Random RANDOM = new Random();
@@ -220,6 +221,10 @@ public class TestDFSStripedOutputStreamWithFailure {
 cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
 cluster.waitActive();
 dfs = cluster.getFileSystem();
+AddErasureCodingPolicyResponse[] res =
+dfs.addErasureCodingPolicies(new ErasureCodingPolicy[]{ecPolicy});
+ecPolicy = res[0].getPolicy();
+dfs.enableErasureCodingPolicy(ecPolicy.getName());
 DFSTestUtil.enableAllECPolicies(dfs);
 dfs.mkdirs(dir);
 dfs.setErasureCodingPolicy(dir, ecPolicy.getName());
@@ -241,7 +246,7 @@ public class TestDFSStripedOutputStreamWithFailure {
 return conf;
   }

hadoop git commit: HDFS-12547. Extend TestQuotaWithStripedBlocks with a random EC policy. Contributed by Takanobu Asanuma.

2017-10-10 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 c0d56cad5 -> a26da9dcf


HDFS-12547. Extend TestQuotaWithStripedBlocks with a random EC policy. 
Contributed by Takanobu Asanuma.

(cherry picked from commit a297fb08866305860dc17813c3db5701e9515101)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a26da9dc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a26da9dc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a26da9dc

Branch: refs/heads/branch-3.0
Commit: a26da9dcf27eb79405090f4c91ef17124dbc5459
Parents: c0d56ca
Author: Andrew Wang <w...@apache.org>
Authored: Tue Oct 10 17:35:49 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Tue Oct 10 17:35:56 2017 -0700

--
 .../namenode/TestQuotaWithStripedBlocks.java| 40 ++--
 ...uotaWithStripedBlocksWithRandomECPolicy.java | 50 
 2 files changed, 76 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a26da9dc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java
index 9995393..38b98a4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java
@@ -44,27 +44,39 @@ import java.io.IOException;
  * Make sure we correctly update the quota usage with the striped blocks.
  */
 public class TestQuotaWithStripedBlocks {
-  private static final int BLOCK_SIZE = 1024 * 1024;
-  private static final long DISK_QUOTA = BLOCK_SIZE * 10;
-  private final ErasureCodingPolicy ecPolicy =
-  StripedFileTestUtil.getDefaultECPolicy();
-  private final int dataBlocks = ecPolicy.getNumDataUnits();
-  private final int parityBlocsk = ecPolicy.getNumParityUnits();
-  private final int groupSize = dataBlocks + parityBlocsk;
-  private final int cellSize = ecPolicy.getCellSize();
-  private static final Path ecDir = new Path("/ec");
+  private int blockSize;
+  private ErasureCodingPolicy ecPolicy;
+  private int dataBlocks;
+  private int parityBlocsk;
+  private int groupSize;
+  private int cellSize;
+  private Path ecDir;
+  private long diskQuota;
 
   private MiniDFSCluster cluster;
   private FSDirectory dir;
   private DistributedFileSystem dfs;
 
+  public ErasureCodingPolicy getEcPolicy() {
+return StripedFileTestUtil.getDefaultECPolicy();
+  }
+
   @Rule
   public Timeout globalTimeout = new Timeout(30);
 
   @Before
   public void setUp() throws IOException {
+blockSize = 1024 * 1024;
+ecPolicy = getEcPolicy();
+dataBlocks = ecPolicy.getNumDataUnits();
+parityBlocsk = ecPolicy.getNumParityUnits();
+groupSize = dataBlocks + parityBlocsk;
+cellSize = ecPolicy.getCellSize();
+ecDir = new Path("/ec");
+diskQuota = blockSize * (groupSize + 1);
+
 final Configuration conf = new Configuration();
-conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
+conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
 cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize).build();
 cluster.waitActive();
 
@@ -75,8 +87,8 @@ public class TestQuotaWithStripedBlocks {
 dfs.mkdirs(ecDir);
 dfs.getClient()
 .setErasureCodingPolicy(ecDir.toString(), ecPolicy.getName());
-dfs.setQuota(ecDir, Long.MAX_VALUE - 1, DISK_QUOTA);
-dfs.setQuotaByStorageType(ecDir, StorageType.DISK, DISK_QUOTA);
+dfs.setQuota(ecDir, Long.MAX_VALUE - 1, diskQuota);
+dfs.setQuotaByStorageType(ecDir, StorageType.DISK, diskQuota);
 dfs.setStoragePolicy(ecDir, HdfsConstants.HOT_STORAGE_POLICY_NAME);
   }
 
@@ -112,8 +124,8 @@ public class TestQuotaWithStripedBlocks {
   final long diskUsed = dirNode.getDirectoryWithQuotaFeature()
   .getSpaceConsumed().getTypeSpaces().get(StorageType.DISK);
   // When we add a new block we update the quota using the full block size.
-  Assert.assertEquals(BLOCK_SIZE * groupSize, spaceUsed);
-  Assert.assertEquals(BLOCK_SIZE * groupSize, diskUsed);
+  Assert.assertEquals(blockSize * groupSize, spaceUsed);
+  Assert.assertEquals(blockSize * groupSize, diskUsed);
 
   dfs.getClient().getNamenode().complete(file.toString(),
   dfs.getClient().getClientName(), previous, fileNode.getId());

http://git-wip-us.apache.org/re

hadoop git commit: HDFS-12547. Extend TestQuotaWithStripedBlocks with a random EC policy. Contributed by Takanobu Asanuma.

2017-10-10 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 78af6cdc5 -> a297fb088


HDFS-12547. Extend TestQuotaWithStripedBlocks with a random EC policy. 
Contributed by Takanobu Asanuma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a297fb08
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a297fb08
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a297fb08

Branch: refs/heads/trunk
Commit: a297fb08866305860dc17813c3db5701e9515101
Parents: 78af6cd
Author: Andrew Wang <w...@apache.org>
Authored: Tue Oct 10 17:35:49 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Tue Oct 10 17:35:49 2017 -0700

--
 .../namenode/TestQuotaWithStripedBlocks.java| 40 ++--
 ...uotaWithStripedBlocksWithRandomECPolicy.java | 50 
 2 files changed, 76 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a297fb08/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java
index 9995393..38b98a4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java
@@ -44,27 +44,39 @@ import java.io.IOException;
  * Make sure we correctly update the quota usage with the striped blocks.
  */
 public class TestQuotaWithStripedBlocks {
-  private static final int BLOCK_SIZE = 1024 * 1024;
-  private static final long DISK_QUOTA = BLOCK_SIZE * 10;
-  private final ErasureCodingPolicy ecPolicy =
-  StripedFileTestUtil.getDefaultECPolicy();
-  private final int dataBlocks = ecPolicy.getNumDataUnits();
-  private final int parityBlocsk = ecPolicy.getNumParityUnits();
-  private final int groupSize = dataBlocks + parityBlocsk;
-  private final int cellSize = ecPolicy.getCellSize();
-  private static final Path ecDir = new Path("/ec");
+  private int blockSize;
+  private ErasureCodingPolicy ecPolicy;
+  private int dataBlocks;
+  private int parityBlocsk;
+  private int groupSize;
+  private int cellSize;
+  private Path ecDir;
+  private long diskQuota;
 
   private MiniDFSCluster cluster;
   private FSDirectory dir;
   private DistributedFileSystem dfs;
 
+  public ErasureCodingPolicy getEcPolicy() {
+return StripedFileTestUtil.getDefaultECPolicy();
+  }
+
   @Rule
   public Timeout globalTimeout = new Timeout(30);
 
   @Before
   public void setUp() throws IOException {
+blockSize = 1024 * 1024;
+ecPolicy = getEcPolicy();
+dataBlocks = ecPolicy.getNumDataUnits();
+parityBlocsk = ecPolicy.getNumParityUnits();
+groupSize = dataBlocks + parityBlocsk;
+cellSize = ecPolicy.getCellSize();
+ecDir = new Path("/ec");
+diskQuota = blockSize * (groupSize + 1);
+
 final Configuration conf = new Configuration();
-conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
+conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
 cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize).build();
 cluster.waitActive();
 
@@ -75,8 +87,8 @@ public class TestQuotaWithStripedBlocks {
 dfs.mkdirs(ecDir);
 dfs.getClient()
 .setErasureCodingPolicy(ecDir.toString(), ecPolicy.getName());
-dfs.setQuota(ecDir, Long.MAX_VALUE - 1, DISK_QUOTA);
-dfs.setQuotaByStorageType(ecDir, StorageType.DISK, DISK_QUOTA);
+dfs.setQuota(ecDir, Long.MAX_VALUE - 1, diskQuota);
+dfs.setQuotaByStorageType(ecDir, StorageType.DISK, diskQuota);
 dfs.setStoragePolicy(ecDir, HdfsConstants.HOT_STORAGE_POLICY_NAME);
   }
 
@@ -112,8 +124,8 @@ public class TestQuotaWithStripedBlocks {
   final long diskUsed = dirNode.getDirectoryWithQuotaFeature()
   .getSpaceConsumed().getTypeSpaces().get(StorageType.DISK);
   // When we add a new block we update the quota using the full block size.
-  Assert.assertEquals(BLOCK_SIZE * groupSize, spaceUsed);
-  Assert.assertEquals(BLOCK_SIZE * groupSize, diskUsed);
+  Assert.assertEquals(blockSize * groupSize, spaceUsed);
+  Assert.assertEquals(blockSize * groupSize, diskUsed);
 
   dfs.getClient().getNamenode().complete(file.toString(),
   dfs.getClient().getClientName(), previous, fileNode.getId());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a297fb08/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org

hadoop git commit: HADOOP-14939. Update project release notes with HDFS-10467 for 3.0.0. Contributed by Inigo Goiri.

2017-10-10 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 41351b05f -> 132cdac0d


HADOOP-14939. Update project release notes with HDFS-10467 for 3.0.0. 
Contributed by Inigo Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/132cdac0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/132cdac0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/132cdac0

Branch: refs/heads/trunk
Commit: 132cdac0ddb5c38205a96579a23b55689ea5a8e3
Parents: 41351b0
Author: Andrew Wang <w...@apache.org>
Authored: Tue Oct 10 10:16:36 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Tue Oct 10 10:16:36 2017 -0700

--
 hadoop-project/src/site/markdown/index.md.vm | 15 +++
 1 file changed, 15 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/132cdac0/hadoop-project/src/site/markdown/index.md.vm
--
diff --git a/hadoop-project/src/site/markdown/index.md.vm 
b/hadoop-project/src/site/markdown/index.md.vm
index d9e645b..8e1e06f 100644
--- a/hadoop-project/src/site/markdown/index.md.vm
+++ b/hadoop-project/src/site/markdown/index.md.vm
@@ -213,6 +213,21 @@ metadata.
 
 See [S3Guard](./hadoop-aws/tools/hadoop-aws/s3guard.html) for more details.
 
+HDFS Router-Based Federation
+-
+HDFS Router-Based Federation adds a RPC routing layer that provides a federated
+view of multiple HDFS namespaces. This is similar to the existing
+[ViewFs](./hadoop-project-dist/hadoop-hdfs/ViewFs.html)) and
+[HDFS Federation](./hadoop-project-dist/hadoop-hdfs/Federation.html)
+functionality, except the mount table is managed on the server-side by the
+routing layer rather than on the client. This simplifies access to a federated
+cluster for existing HDFS clients.
+
+See [HDFS-10467](https://issues.apache.org/jira/browse/HADOOP-10467) and the
+HDFS Router-based Federation
+[documentation](./hadoop-project-dist/hadoop-hdfs/HDFSRouterFederation.md) for
+more details.
+
 Getting Started
 ===
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14939. Update project release notes with HDFS-10467 for 3.0.0. Contributed by Inigo Goiri.

2017-10-10 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 f1cfb2a8d -> 61ac7c82f


HADOOP-14939. Update project release notes with HDFS-10467 for 3.0.0. 
Contributed by Inigo Goiri.

(cherry picked from commit 132cdac0ddb5c38205a96579a23b55689ea5a8e3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/61ac7c82
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/61ac7c82
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/61ac7c82

Branch: refs/heads/branch-3.0
Commit: 61ac7c82f2c1d25db584878cc106f59f1430f334
Parents: f1cfb2a
Author: Andrew Wang <w...@apache.org>
Authored: Tue Oct 10 10:16:36 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Tue Oct 10 10:16:39 2017 -0700

--
 hadoop-project/src/site/markdown/index.md.vm | 15 +++
 1 file changed, 15 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/61ac7c82/hadoop-project/src/site/markdown/index.md.vm
--
diff --git a/hadoop-project/src/site/markdown/index.md.vm 
b/hadoop-project/src/site/markdown/index.md.vm
index d9e645b..8e1e06f 100644
--- a/hadoop-project/src/site/markdown/index.md.vm
+++ b/hadoop-project/src/site/markdown/index.md.vm
@@ -213,6 +213,21 @@ metadata.
 
 See [S3Guard](./hadoop-aws/tools/hadoop-aws/s3guard.html) for more details.
 
+HDFS Router-Based Federation
+-
+HDFS Router-Based Federation adds a RPC routing layer that provides a federated
+view of multiple HDFS namespaces. This is similar to the existing
+[ViewFs](./hadoop-project-dist/hadoop-hdfs/ViewFs.html)) and
+[HDFS Federation](./hadoop-project-dist/hadoop-hdfs/Federation.html)
+functionality, except the mount table is managed on the server-side by the
+routing layer rather than on the client. This simplifies access to a federated
+cluster for existing HDFS clients.
+
+See [HDFS-10467](https://issues.apache.org/jira/browse/HADOOP-10467) and the
+HDFS Router-based Federation
+[documentation](./hadoop-project-dist/hadoop-hdfs/HDFSRouterFederation.md) for
+more details.
+
 Getting Started
 ===
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: Revert "HDFS-12603. Enable async edit logging by default."

2017-10-09 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk f9ff17f51 -> 82cd85d86


Revert "HDFS-12603. Enable async edit logging by default."

This reverts commit afb42aeabf1317b755ab79e0265bc90920c896ac.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/82cd85d8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/82cd85d8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/82cd85d8

Branch: refs/heads/trunk
Commit: 82cd85d8680912b8d268c875bd51598b20f3313c
Parents: f9ff17f
Author: Andrew Wang <w...@apache.org>
Authored: Mon Oct 9 17:53:56 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Mon Oct 9 17:53:56 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  2 +-
 .../hadoop/hdfs/server/namenode/FSEditLog.java  |  7 +--
 .../hdfs/server/namenode/FSEditLogAsync.java|  5 -
 .../src/main/resources/hdfs-default.xml |  2 +-
 .../namenode/ha/TestFailureToReadEdits.java | 22 +++-
 5 files changed, 10 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/82cd85d8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 395b192..1f96763 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -326,7 +326,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
 
   public static final String  DFS_NAMENODE_EDITS_ASYNC_LOGGING =
   "dfs.namenode.edits.asynclogging";
-  public static final boolean DFS_NAMENODE_EDITS_ASYNC_LOGGING_DEFAULT = true;
+  public static final boolean DFS_NAMENODE_EDITS_ASYNC_LOGGING_DEFAULT = false;
 
   public static final String  DFS_LIST_LIMIT = "dfs.ls.limit";
   public static final int DFS_LIST_LIMIT_DEFAULT = 1000;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82cd85d8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
index 7d19b04..a8f5bfa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -130,7 +130,7 @@ public class FSEditLog implements LogsPurgeable {
* 
* In a non-HA setup:
* 
-   * The log starts in UNINITIALIZED state upon construction. Once it's
+   * The log starts in UNITIALIZED state upon construction. Once it's
* initialized, it is usually in IN_SEGMENT state, indicating that edits may
* be written. In the middle of a roll, or while saving the namespace, it
* briefly enters the BETWEEN_LOG_SEGMENTS state, indicating that the 
previous
@@ -1837,9 +1837,4 @@ public class FSEditLog implements LogsPurgeable {
 }
 return count;
   }
-
-  @Override
-  public String toString() {
-return super.toString();
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82cd85d8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
index 1f5dc75..c14a310 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
@@ -319,9 +319,4 @@ class FSEditLogAsync extends FSEditLog implements Runnable {
   return "["+getClass().getSimpleName()+" op:"+op+" call:"+call+"]";
 }
   }
-
-  @Override
-  public String toString() {
-return super.toString();
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82cd85d8/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-proje

hadoop git commit: Revert "HDFS-12603. Enable async edit logging by default."

2017-10-09 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 fa676ba06 -> f1cfb2a8d


Revert "HDFS-12603. Enable async edit logging by default."

This reverts commit e9f3f49b6dabca540ce48a633c1767fbdb49b6da.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f1cfb2a8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f1cfb2a8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f1cfb2a8

Branch: refs/heads/branch-3.0
Commit: f1cfb2a8dc0cc48d5418bd73db0ce982922b3eda
Parents: fa676ba
Author: Andrew Wang <w...@apache.org>
Authored: Mon Oct 9 17:54:19 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Mon Oct 9 17:54:19 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  2 +-
 .../hadoop/hdfs/server/namenode/FSEditLog.java  |  7 +--
 .../hdfs/server/namenode/FSEditLogAsync.java|  5 -
 .../src/main/resources/hdfs-default.xml |  2 +-
 .../namenode/ha/TestFailureToReadEdits.java | 22 +++-
 5 files changed, 10 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f1cfb2a8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index fbc49fa..6d39de6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -323,7 +323,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
 
   public static final String  DFS_NAMENODE_EDITS_ASYNC_LOGGING =
   "dfs.namenode.edits.asynclogging";
-  public static final boolean DFS_NAMENODE_EDITS_ASYNC_LOGGING_DEFAULT = true;
+  public static final boolean DFS_NAMENODE_EDITS_ASYNC_LOGGING_DEFAULT = false;
 
   public static final String  DFS_LIST_LIMIT = "dfs.ls.limit";
   public static final int DFS_LIST_LIMIT_DEFAULT = 1000;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f1cfb2a8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
index 7d19b04..a8f5bfa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -130,7 +130,7 @@ public class FSEditLog implements LogsPurgeable {
* 
* In a non-HA setup:
* 
-   * The log starts in UNINITIALIZED state upon construction. Once it's
+   * The log starts in UNITIALIZED state upon construction. Once it's
* initialized, it is usually in IN_SEGMENT state, indicating that edits may
* be written. In the middle of a roll, or while saving the namespace, it
* briefly enters the BETWEEN_LOG_SEGMENTS state, indicating that the 
previous
@@ -1837,9 +1837,4 @@ public class FSEditLog implements LogsPurgeable {
 }
 return count;
   }
-
-  @Override
-  public String toString() {
-return super.toString();
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f1cfb2a8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
index 1f5dc75..c14a310 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
@@ -319,9 +319,4 @@ class FSEditLogAsync extends FSEditLog implements Runnable {
   return "["+getClass().getSimpleName()+" op:"+op+" call:"+call+"]";
 }
   }
-
-  @Override
-  public String toString() {
-return super.toString();
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f1cfb2a8/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoo

hadoop git commit: Revert "HDFS-12603. Enable async edit logging by default."

2017-10-09 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 42d3229e6 -> 1ddc49527


Revert "HDFS-12603. Enable async edit logging by default."

This reverts commit 42d3229e6f604d13a0419ac32979500dae306066.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1ddc4952
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1ddc4952
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1ddc4952

Branch: refs/heads/branch-2
Commit: 1ddc49527a7d9b0fd56ee69a7e62e21488f5874d
Parents: 42d3229
Author: Andrew Wang <w...@apache.org>
Authored: Mon Oct 9 13:29:52 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Mon Oct 9 13:29:52 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  2 +-
 .../hadoop/hdfs/server/namenode/FSEditLog.java  |  7 +--
 .../hdfs/server/namenode/FSEditLogAsync.java|  5 -
 .../src/main/resources/hdfs-default.xml |  2 +-
 .../namenode/ha/TestFailureToReadEdits.java | 22 +++-
 5 files changed, 10 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ddc4952/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 6bec228..e3ce0ab 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -286,7 +286,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
 
   public static final String  DFS_NAMENODE_EDITS_ASYNC_LOGGING =
   "dfs.namenode.edits.asynclogging";
-  public static final boolean DFS_NAMENODE_EDITS_ASYNC_LOGGING_DEFAULT = true;
+  public static final boolean DFS_NAMENODE_EDITS_ASYNC_LOGGING_DEFAULT = false;
 
   public static final String  DFS_LIST_LIMIT = "dfs.ls.limit";
   public static final int DFS_LIST_LIMIT_DEFAULT = 1000;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ddc4952/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
index 7c1b9b3..2ab15fd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -126,7 +126,7 @@ public class FSEditLog implements LogsPurgeable {
* 
* In a non-HA setup:
* 
-   * The log starts in UNINITIALIZED state upon construction. Once it's
+   * The log starts in UNITIALIZED state upon construction. Once it's
* initialized, it is usually in IN_SEGMENT state, indicating that edits may
* be written. In the middle of a roll, or while saving the namespace, it
* briefly enters the BETWEEN_LOG_SEGMENTS state, indicating that the 
previous
@@ -1746,9 +1746,4 @@ public class FSEditLog implements LogsPurgeable {
 }
 return count;
   }
-
-  @Override
-  public String toString() {
-return super.toString();
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ddc4952/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
index 1f5dc75..c14a310 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
@@ -319,9 +319,4 @@ class FSEditLogAsync extends FSEditLog implements Runnable {
   return "["+getClass().getSimpleName()+" op:"+op+" call:"+call+"]";
 }
   }
-
-  @Override
-  public String toString() {
-return super.toString();
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ddc4952/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoo

hadoop git commit: HDFS-12541. Extend TestSafeModeWithStripedFile with a random EC policy. Contributed by Takanobu Asanuma.

2017-10-09 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk afb42aeab -> 793820823


HDFS-12541. Extend TestSafeModeWithStripedFile with a random EC policy. 
Contributed by Takanobu Asanuma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/79382082
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/79382082
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/79382082

Branch: refs/heads/trunk
Commit: 793820823325e390bb671f4cc1b3bc6920bba5de
Parents: afb42ae
Author: Andrew Wang <w...@apache.org>
Authored: Mon Oct 9 13:03:32 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Mon Oct 9 13:03:32 2017 -0700

--
 .../hdfs/TestSafeModeWithStripedFile.java   | 29 
 ...feModeWithStripedFileWithRandomECPolicy.java | 49 
 2 files changed, 68 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/79382082/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java
index 3d3ec9c..a43cc52 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java
@@ -42,13 +42,12 @@ import static org.junit.Assert.assertTrue;
 
 public class TestSafeModeWithStripedFile {
 
-  private final ErasureCodingPolicy ecPolicy =
-  StripedFileTestUtil.getDefaultECPolicy();
-  private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
-  private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
-  private final int numDNs = dataBlocks + parityBlocks;
-  private final int cellSize = ecPolicy.getCellSize();
-  private final int blockSize = cellSize * 2;
+  private ErasureCodingPolicy ecPolicy;
+  private short dataBlocks;
+  private short parityBlocks;
+  private int numDNs;
+  private int cellSize;
+  private int blockSize;
 
   private MiniDFSCluster cluster;
   private Configuration conf;
@@ -56,17 +55,27 @@ public class TestSafeModeWithStripedFile {
   @Rule
   public Timeout globalTimeout = new Timeout(30);
 
+  public ErasureCodingPolicy getEcPolicy() {
+return StripedFileTestUtil.getDefaultECPolicy();
+  }
+
   @Before
   public void setup() throws IOException {
+ecPolicy = getEcPolicy();
+dataBlocks = (short) ecPolicy.getNumDataUnits();
+parityBlocks = (short) ecPolicy.getNumParityUnits();
+numDNs = dataBlocks + parityBlocks;
+cellSize = ecPolicy.getCellSize();
+blockSize = cellSize * 2;
+
 conf = new HdfsConfiguration();
 conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
 conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 100);
 cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
+cluster.getFileSystem().enableErasureCodingPolicy(getEcPolicy().getName());
 cluster.getFileSystem().getClient().setErasureCodingPolicy("/",
-StripedFileTestUtil.getDefaultECPolicy().getName());
+getEcPolicy().getName());
 cluster.waitActive();
-cluster.getFileSystem().enableErasureCodingPolicy(
-StripedFileTestUtil.getDefaultECPolicy().getName());
   }
 
   @After

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79382082/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFileWithRandomECPolicy.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFileWithRandomECPolicy.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFileWithRandomECPolicy.java
new file mode 100644
index 000..00c29dc
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFileWithRandomECPolicy.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable la

hadoop git commit: HDFS-12541. Extend TestSafeModeWithStripedFile with a random EC policy. Contributed by Takanobu Asanuma.

2017-10-09 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 e9f3f49b6 -> 1d11701e5


HDFS-12541. Extend TestSafeModeWithStripedFile with a random EC policy. 
Contributed by Takanobu Asanuma.

(cherry picked from commit 793820823325e390bb671f4cc1b3bc6920bba5de)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1d11701e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1d11701e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1d11701e

Branch: refs/heads/branch-3.0
Commit: 1d11701e554f6f312b1b099628992ebab4053df0
Parents: e9f3f49
Author: Andrew Wang <w...@apache.org>
Authored: Mon Oct 9 13:03:32 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Mon Oct 9 13:03:39 2017 -0700

--
 .../hdfs/TestSafeModeWithStripedFile.java   | 29 
 ...feModeWithStripedFileWithRandomECPolicy.java | 49 
 2 files changed, 68 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d11701e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java
index 3d3ec9c..a43cc52 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java
@@ -42,13 +42,12 @@ import static org.junit.Assert.assertTrue;
 
 public class TestSafeModeWithStripedFile {
 
-  private final ErasureCodingPolicy ecPolicy =
-  StripedFileTestUtil.getDefaultECPolicy();
-  private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
-  private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
-  private final int numDNs = dataBlocks + parityBlocks;
-  private final int cellSize = ecPolicy.getCellSize();
-  private final int blockSize = cellSize * 2;
+  private ErasureCodingPolicy ecPolicy;
+  private short dataBlocks;
+  private short parityBlocks;
+  private int numDNs;
+  private int cellSize;
+  private int blockSize;
 
   private MiniDFSCluster cluster;
   private Configuration conf;
@@ -56,17 +55,27 @@ public class TestSafeModeWithStripedFile {
   @Rule
   public Timeout globalTimeout = new Timeout(30);
 
+  public ErasureCodingPolicy getEcPolicy() {
+return StripedFileTestUtil.getDefaultECPolicy();
+  }
+
   @Before
   public void setup() throws IOException {
+ecPolicy = getEcPolicy();
+dataBlocks = (short) ecPolicy.getNumDataUnits();
+parityBlocks = (short) ecPolicy.getNumParityUnits();
+numDNs = dataBlocks + parityBlocks;
+cellSize = ecPolicy.getCellSize();
+blockSize = cellSize * 2;
+
 conf = new HdfsConfiguration();
 conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
 conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 100);
 cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
+cluster.getFileSystem().enableErasureCodingPolicy(getEcPolicy().getName());
 cluster.getFileSystem().getClient().setErasureCodingPolicy("/",
-StripedFileTestUtil.getDefaultECPolicy().getName());
+getEcPolicy().getName());
 cluster.waitActive();
-cluster.getFileSystem().enableErasureCodingPolicy(
-StripedFileTestUtil.getDefaultECPolicy().getName());
   }
 
   @After

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d11701e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFileWithRandomECPolicy.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFileWithRandomECPolicy.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFileWithRandomECPolicy.java
new file mode 100644
index 000..00c29dc
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFileWithRandomECPolicy.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://

hadoop git commit: HDFS-12603. Enable async edit logging by default.

2017-10-09 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 4d377c89b -> 42d3229e6


HDFS-12603. Enable async edit logging by default.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/42d3229e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/42d3229e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/42d3229e

Branch: refs/heads/branch-2
Commit: 42d3229e6f604d13a0419ac32979500dae306066
Parents: 4d377c8
Author: Andrew Wang <w...@apache.org>
Authored: Mon Oct 9 11:21:43 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Mon Oct 9 11:21:43 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  2 +-
 .../hadoop/hdfs/server/namenode/FSEditLog.java  |  7 ++-
 .../hdfs/server/namenode/FSEditLogAsync.java|  5 +
 .../src/main/resources/hdfs-default.xml |  2 +-
 .../namenode/ha/TestFailureToReadEdits.java | 22 +---
 5 files changed, 28 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/42d3229e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index e3ce0ab..6bec228 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -286,7 +286,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
 
   public static final String  DFS_NAMENODE_EDITS_ASYNC_LOGGING =
   "dfs.namenode.edits.asynclogging";
-  public static final boolean DFS_NAMENODE_EDITS_ASYNC_LOGGING_DEFAULT = false;
+  public static final boolean DFS_NAMENODE_EDITS_ASYNC_LOGGING_DEFAULT = true;
 
   public static final String  DFS_LIST_LIMIT = "dfs.ls.limit";
   public static final int DFS_LIST_LIMIT_DEFAULT = 1000;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/42d3229e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
index 2ab15fd..7c1b9b3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -126,7 +126,7 @@ public class FSEditLog implements LogsPurgeable {
* 
* In a non-HA setup:
* 
-   * The log starts in UNITIALIZED state upon construction. Once it's
+   * The log starts in UNINITIALIZED state upon construction. Once it's
* initialized, it is usually in IN_SEGMENT state, indicating that edits may
* be written. In the middle of a roll, or while saving the namespace, it
* briefly enters the BETWEEN_LOG_SEGMENTS state, indicating that the 
previous
@@ -1746,4 +1746,9 @@ public class FSEditLog implements LogsPurgeable {
 }
 return count;
   }
+
+  @Override
+  public String toString() {
+return super.toString();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/42d3229e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
index c14a310..1f5dc75 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
@@ -319,4 +319,9 @@ class FSEditLogAsync extends FSEditLog implements Runnable {
   return "["+getClass().getSimpleName()+" op:"+op+" call:"+call+"]";
 }
   }
+
+  @Override
+  public String toString() {
+return super.toString();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/42d3229e/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/ha

hadoop git commit: HDFS-12603. Enable async edit logging by default.

2017-10-09 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 015eb628b -> e9f3f49b6


HDFS-12603. Enable async edit logging by default.

(cherry picked from commit afb42aeabf1317b755ab79e0265bc90920c896ac)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e9f3f49b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e9f3f49b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e9f3f49b

Branch: refs/heads/branch-3.0
Commit: e9f3f49b6dabca540ce48a633c1767fbdb49b6da
Parents: 015eb62
Author: Andrew Wang <w...@apache.org>
Authored: Mon Oct 9 11:20:00 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Mon Oct 9 11:20:04 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  2 +-
 .../hadoop/hdfs/server/namenode/FSEditLog.java  |  7 ++-
 .../hdfs/server/namenode/FSEditLogAsync.java|  5 +
 .../src/main/resources/hdfs-default.xml |  2 +-
 .../namenode/ha/TestFailureToReadEdits.java | 22 +---
 5 files changed, 28 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9f3f49b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 6d39de6..fbc49fa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -323,7 +323,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
 
   public static final String  DFS_NAMENODE_EDITS_ASYNC_LOGGING =
   "dfs.namenode.edits.asynclogging";
-  public static final boolean DFS_NAMENODE_EDITS_ASYNC_LOGGING_DEFAULT = false;
+  public static final boolean DFS_NAMENODE_EDITS_ASYNC_LOGGING_DEFAULT = true;
 
   public static final String  DFS_LIST_LIMIT = "dfs.ls.limit";
   public static final int DFS_LIST_LIMIT_DEFAULT = 1000;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9f3f49b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
index a8f5bfa..7d19b04 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -130,7 +130,7 @@ public class FSEditLog implements LogsPurgeable {
* 
* In a non-HA setup:
* 
-   * The log starts in UNITIALIZED state upon construction. Once it's
+   * The log starts in UNINITIALIZED state upon construction. Once it's
* initialized, it is usually in IN_SEGMENT state, indicating that edits may
* be written. In the middle of a roll, or while saving the namespace, it
* briefly enters the BETWEEN_LOG_SEGMENTS state, indicating that the 
previous
@@ -1837,4 +1837,9 @@ public class FSEditLog implements LogsPurgeable {
 }
 return count;
   }
+
+  @Override
+  public String toString() {
+return super.toString();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9f3f49b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
index c14a310..1f5dc75 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
@@ -319,4 +319,9 @@ class FSEditLogAsync extends FSEditLog implements Runnable {
   return "["+getClass().getSimpleName()+" op:"+op+" call:"+call+"]";
 }
   }
+
+  @Override
+  public String toString() {
+return super.toString();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9f3f49b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-pro

hadoop git commit: HDFS-12603. Enable async edit logging by default.

2017-10-09 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 09ad848b6 -> afb42aeab


HDFS-12603. Enable async edit logging by default.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/afb42aea
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/afb42aea
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/afb42aea

Branch: refs/heads/trunk
Commit: afb42aeabf1317b755ab79e0265bc90920c896ac
Parents: 09ad848
Author: Andrew Wang <w...@apache.org>
Authored: Mon Oct 9 11:20:00 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Mon Oct 9 11:20:00 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  2 +-
 .../hadoop/hdfs/server/namenode/FSEditLog.java  |  7 ++-
 .../hdfs/server/namenode/FSEditLogAsync.java|  5 +
 .../src/main/resources/hdfs-default.xml |  2 +-
 .../namenode/ha/TestFailureToReadEdits.java | 22 +---
 5 files changed, 28 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/afb42aea/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 1f96763..395b192 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -326,7 +326,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
 
   public static final String  DFS_NAMENODE_EDITS_ASYNC_LOGGING =
   "dfs.namenode.edits.asynclogging";
-  public static final boolean DFS_NAMENODE_EDITS_ASYNC_LOGGING_DEFAULT = false;
+  public static final boolean DFS_NAMENODE_EDITS_ASYNC_LOGGING_DEFAULT = true;
 
   public static final String  DFS_LIST_LIMIT = "dfs.ls.limit";
   public static final int DFS_LIST_LIMIT_DEFAULT = 1000;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/afb42aea/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
index a8f5bfa..7d19b04 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -130,7 +130,7 @@ public class FSEditLog implements LogsPurgeable {
* 
* In a non-HA setup:
* 
-   * The log starts in UNITIALIZED state upon construction. Once it's
+   * The log starts in UNINITIALIZED state upon construction. Once it's
* initialized, it is usually in IN_SEGMENT state, indicating that edits may
* be written. In the middle of a roll, or while saving the namespace, it
* briefly enters the BETWEEN_LOG_SEGMENTS state, indicating that the 
previous
@@ -1837,4 +1837,9 @@ public class FSEditLog implements LogsPurgeable {
 }
 return count;
   }
+
+  @Override
+  public String toString() {
+return super.toString();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/afb42aea/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
index c14a310..1f5dc75 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java
@@ -319,4 +319,9 @@ class FSEditLogAsync extends FSEditLog implements Runnable {
   return "["+getClass().getSimpleName()+" op:"+op+" call:"+call+"]";
 }
   }
+
+  @Override
+  public String toString() {
+return super.toString();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/afb42aea/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/ha

hadoop git commit: HDFS-12567. BlockPlacementPolicyRackFaultTolerant fails with racks with very few nodes.

2017-10-05 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk c071aad5d -> 644c2f692


HDFS-12567. BlockPlacementPolicyRackFaultTolerant fails with racks with very 
few nodes.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/644c2f69
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/644c2f69
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/644c2f69

Branch: refs/heads/trunk
Commit: 644c2f6924f341f51d809c91dccfff88fc82f6f0
Parents: c071aad
Author: Andrew Wang <w...@apache.org>
Authored: Thu Oct 5 16:58:43 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Thu Oct 5 16:58:43 2017 -0700

--
 .../blockmanagement/BlockPlacementPolicy.java   |   2 +-
 .../BlockPlacementPolicyRackFaultTolerant.java  |  49 +++--
 .../hdfs/TestErasureCodingMultipleRacks.java| 107 +++
 3 files changed, 146 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/644c2f69/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
index 732a2dc..23e3e40 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
@@ -43,7 +43,7 @@ import org.slf4j.LoggerFactory;
  */
 @InterfaceAudience.Private
 public abstract class BlockPlacementPolicy {
-  static final Logger LOG = LoggerFactory.getLogger(
+  public static final Logger LOG = LoggerFactory.getLogger(
   BlockPlacementPolicy.class);
 
   @InterfaceAudience.Private

http://git-wip-us.apache.org/repos/asf/hadoop/blob/644c2f69/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolerant.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolerant.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolerant.java
index c0d981c..1eac3ea 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolerant.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolerant.java
@@ -46,9 +46,12 @@ public class BlockPlacementPolicyRackFaultTolerant extends 
BlockPlacementPolicyD
 if (numOfRacks == 1 || totalNumOfReplicas <= 1) {
   return new int[] {numOfReplicas, totalNumOfReplicas};
 }
-if(totalNumOfReplicas<numOfRacks){
+// If more racks than replicas, put one replica per rack.
+if (totalNumOfReplicas < numOfRacks) {
   return new int[] {numOfReplicas, 1};
 }
+// If more replicas than racks, evenly spread the replicas.
+// This calculation rounds up.
 int maxNodesPerRack = (totalNumOfReplicas - 1) / numOfRacks + 1;
 return new int[] {numOfReplicas, maxNodesPerRack};
   }
@@ -109,18 +112,42 @@ public class BlockPlacementPolicyRackFaultTolerant 
extends BlockPlacementPolicyD
 numOfReplicas = Math.min(totalReplicaExpected - results.size(),
 (maxNodesPerRack -1) * numOfRacks - (results.size() - excess));
 
-// Fill each rack exactly (maxNodesPerRack-1) replicas.
-writer = chooseOnce(numOfReplicas, writer, new HashSet<>(excludedNodes),
-blocksize, maxNodesPerRack -1, results, avoidStaleNodes, storageTypes);
+try {
+  // Try to spread the replicas as evenly as possible across racks.
+  // This is done by first placing with (maxNodesPerRack-1), then spreading
+  // the remainder by calling again with maxNodesPerRack.
+  writer = chooseOnce(numOfReplicas, writer, new HashSet<>(excludedNodes),
+  blocksize, maxNodesPerRack - 1, results, avoidStaleNodes,
+  storageTypes);
 
-for (DatanodeStorageInfo resultStorage : results) {
-  addToExcludedNodes(resultStorage.getDatanodeDescriptor(), excludedNodes);
-}
+  // Exclude the chosen nodes
+  for (DatanodeStorageInfo resultStorage : results) {
+addToExcludedNodes(resultStorage.getDatanodeDescriptor(),
+excludedNodes);
+  }
+  LOG.trace("Chosen nodes: {}&quo

hadoop git commit: HDFS-12567. BlockPlacementPolicyRackFaultTolerant fails with racks with very few nodes.

2017-10-05 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 c021c8a19 -> e15927200


HDFS-12567. BlockPlacementPolicyRackFaultTolerant fails with racks with very 
few nodes.

(cherry picked from commit 644c2f6924f341f51d809c91dccfff88fc82f6f0)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e1592720
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e1592720
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e1592720

Branch: refs/heads/branch-3.0
Commit: e1592720032d725bb04bbbd8715bf0188d8faca5
Parents: c021c8a
Author: Andrew Wang <w...@apache.org>
Authored: Thu Oct 5 16:58:43 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Thu Oct 5 16:58:46 2017 -0700

--
 .../blockmanagement/BlockPlacementPolicy.java   |   2 +-
 .../BlockPlacementPolicyRackFaultTolerant.java  |  49 +++--
 .../hdfs/TestErasureCodingMultipleRacks.java| 107 +++
 3 files changed, 146 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1592720/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
index 732a2dc..23e3e40 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
@@ -43,7 +43,7 @@ import org.slf4j.LoggerFactory;
  */
 @InterfaceAudience.Private
 public abstract class BlockPlacementPolicy {
-  static final Logger LOG = LoggerFactory.getLogger(
+  public static final Logger LOG = LoggerFactory.getLogger(
   BlockPlacementPolicy.class);
 
   @InterfaceAudience.Private

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1592720/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolerant.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolerant.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolerant.java
index c0d981c..1eac3ea 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolerant.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolerant.java
@@ -46,9 +46,12 @@ public class BlockPlacementPolicyRackFaultTolerant extends 
BlockPlacementPolicyD
 if (numOfRacks == 1 || totalNumOfReplicas <= 1) {
   return new int[] {numOfReplicas, totalNumOfReplicas};
 }
-if(totalNumOfReplicas<numOfRacks){
+// If more racks than replicas, put one replica per rack.
+if (totalNumOfReplicas < numOfRacks) {
   return new int[] {numOfReplicas, 1};
 }
+// If more replicas than racks, evenly spread the replicas.
+// This calculation rounds up.
 int maxNodesPerRack = (totalNumOfReplicas - 1) / numOfRacks + 1;
 return new int[] {numOfReplicas, maxNodesPerRack};
   }
@@ -109,18 +112,42 @@ public class BlockPlacementPolicyRackFaultTolerant 
extends BlockPlacementPolicyD
 numOfReplicas = Math.min(totalReplicaExpected - results.size(),
 (maxNodesPerRack -1) * numOfRacks - (results.size() - excess));
 
-// Fill each rack exactly (maxNodesPerRack-1) replicas.
-writer = chooseOnce(numOfReplicas, writer, new HashSet<>(excludedNodes),
-blocksize, maxNodesPerRack -1, results, avoidStaleNodes, storageTypes);
+try {
+  // Try to spread the replicas as evenly as possible across racks.
+  // This is done by first placing with (maxNodesPerRack-1), then spreading
+  // the remainder by calling again with maxNodesPerRack.
+  writer = chooseOnce(numOfReplicas, writer, new HashSet<>(excludedNodes),
+  blocksize, maxNodesPerRack - 1, results, avoidStaleNodes,
+  storageTypes);
 
-for (DatanodeStorageInfo resultStorage : results) {
-  addToExcludedNodes(resultStorage.getDatanodeDescriptor(), excludedNodes);
-}
+  // Exclude the chosen nodes
+  for (DatanodeStorageInfo resultStorage : results) {
+addToExcludedNodes(resultStorage.getDatanodeDesc

svn commit: r1811152 - in /hadoop/common/site/main: author/src/documentation/content/xdocs/ publish/

2017-10-04 Thread wang
Author: wang
Date: Wed Oct  4 22:03:44 2017
New Revision: 1811152

URL: http://svn.apache.org/viewvc?rev=1811152=rev
Log:
Update the releases page links for 3.0.0-beta1

Modified:
hadoop/common/site/main/author/src/documentation/content/xdocs/releases.xml
hadoop/common/site/main/publish/bylaws.pdf
hadoop/common/site/main/publish/committer_criteria.pdf
hadoop/common/site/main/publish/index.pdf
hadoop/common/site/main/publish/issue_tracking.pdf
hadoop/common/site/main/publish/linkmap.pdf
hadoop/common/site/main/publish/mailing_lists.pdf
hadoop/common/site/main/publish/privacy_policy.pdf
hadoop/common/site/main/publish/releases.html
hadoop/common/site/main/publish/releases.pdf
hadoop/common/site/main/publish/version_control.pdf
hadoop/common/site/main/publish/versioning.pdf
hadoop/common/site/main/publish/who.pdf

Modified: 
hadoop/common/site/main/author/src/documentation/content/xdocs/releases.xml
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/author/src/documentation/content/xdocs/releases.xml?rev=1811152=1811151=1811152=diff
==
--- hadoop/common/site/main/author/src/documentation/content/xdocs/releases.xml 
(original)
+++ hadoop/common/site/main/author/src/documentation/content/xdocs/releases.xml 
Wed Oct  4 22:03:44 2017
@@ -31,7 +31,7 @@


  3.0.0-beta1
- 07 July, 2017
+ 03 October, 2017
  http://www.apache.org/dyn/closer.cgi/hadoop/common/hadoop-3.0.0-beta1/hadoop-3.0.0-beta1-src.tar.gz;>source
  https://dist.apache.org/repos/dist/release/hadoop/common/hadoop-3.0.0-beta1/hadoop-3.0.0-beta1-src.tar.gz.asc;>signature
  https://dist.apache.org/repos/dist/release/hadoop/common/hadoop-3.0.0-beta1/hadoop-3.0.0-beta1-src.tar.gz.mds;>checksum
 file

Modified: hadoop/common/site/main/publish/bylaws.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/bylaws.pdf?rev=1811152=1811151=1811152=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/committer_criteria.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/committer_criteria.pdf?rev=1811152=1811151=1811152=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/index.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/index.pdf?rev=1811152=1811151=1811152=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/issue_tracking.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/issue_tracking.pdf?rev=1811152=1811151=1811152=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/linkmap.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/linkmap.pdf?rev=1811152=1811151=1811152=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/mailing_lists.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/mailing_lists.pdf?rev=1811152=1811151=1811152=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/privacy_policy.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/privacy_policy.pdf?rev=1811152=1811151=1811152=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/releases.html
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/releases.html?rev=1811152=1811151=1811152=diff
==
--- hadoop/common/site/main/publish/releases.html (original)
+++ hadoop/common/site/main/publish/releases.html Wed Oct  4 22:03:44 2017
@@ -282,7 +282,7 @@ document.write("Last Published: " + docu
 
  
 3.0.0-beta1
- 07 July, 2017
+ 03 October, 2017
  http://www.apache.org/dyn/closer.cgi/hadoop/common/hadoop-3.0.0-beta1/hadoop-3.0.0-beta1-src.tar.gz;>source
  https://dist.apache.org/repos/dist/release/hadoop/common/hadoop-3.0.0-beta1/hadoop-3.0.0-beta1-src.tar.gz.asc;>signature
  https://dist.apache.org/repos/dist/release/hadoop/common/hadoop-3.0.0-beta1/hadoop-3.0.0-beta1-src.tar.gz.mds;>checksum
 file

Modified: hadoop/common/site/main/publish/releases.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/releases.pdf?rev=

hadoop git commit: HADOOP-14928. Update site release notes for 3.0.0 GA.

2017-10-04 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 2df1b2ac0 -> cae1c734f


HADOOP-14928. Update site release notes for 3.0.0 GA.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cae1c734
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cae1c734
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cae1c734

Branch: refs/heads/trunk
Commit: cae1c734febc67fdecb754e386bf7df4241832db
Parents: 2df1b2a
Author: Andrew Wang <w...@apache.org>
Authored: Wed Oct 4 14:59:40 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Wed Oct 4 14:59:40 2017 -0700

--
 hadoop-project/src/site/markdown/index.md.vm | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cae1c734/hadoop-project/src/site/markdown/index.md.vm
--
diff --git a/hadoop-project/src/site/markdown/index.md.vm 
b/hadoop-project/src/site/markdown/index.md.vm
index 1526f59..d9e645b 100644
--- a/hadoop-project/src/site/markdown/index.md.vm
+++ b/hadoop-project/src/site/markdown/index.md.vm
@@ -18,9 +18,8 @@ Apache Hadoop ${project.version}
 Apache Hadoop ${project.version} incorporates a number of significant
 enhancements over the previous major release line (hadoop-2.x).
 
-This is an alpha release to facilitate testing and the collection of
-feedback from downstream application developers and users. There are
-no guarantees regarding API stability or quality.
+This release is generally available (GA), meaning that it represents a point of
+API stability and quality that we consider production-ready.
 
 Overview
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14928. Update site release notes for 3.0.0 GA.

2017-10-04 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 fe0d6f5b1 -> c8c2c8928


HADOOP-14928. Update site release notes for 3.0.0 GA.

(cherry picked from commit cae1c734febc67fdecb754e386bf7df4241832db)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c8c2c892
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c8c2c892
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c8c2c892

Branch: refs/heads/branch-3.0
Commit: c8c2c892823b012c28d69811660224b9ae1d86a5
Parents: fe0d6f5
Author: Andrew Wang <w...@apache.org>
Authored: Wed Oct 4 14:59:40 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Wed Oct 4 14:59:48 2017 -0700

--
 hadoop-project/src/site/markdown/index.md.vm | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8c2c892/hadoop-project/src/site/markdown/index.md.vm
--
diff --git a/hadoop-project/src/site/markdown/index.md.vm 
b/hadoop-project/src/site/markdown/index.md.vm
index 1526f59..d9e645b 100644
--- a/hadoop-project/src/site/markdown/index.md.vm
+++ b/hadoop-project/src/site/markdown/index.md.vm
@@ -18,9 +18,8 @@ Apache Hadoop ${project.version}
 Apache Hadoop ${project.version} incorporates a number of significant
 enhancements over the previous major release line (hadoop-2.x).
 
-This is an alpha release to facilitate testing and the collection of
-feedback from downstream application developers and users. There are
-no guarantees regarding API stability or quality.
+This release is generally available (GA), meaning that it represents a point of
+API stability and quality that we consider production-ready.
 
 Overview
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



svn commit: r1811150 - in /hadoop/common/site/main: author/src/documentation/content/xdocs/ publish/

2017-10-04 Thread wang
Author: wang
Date: Wed Oct  4 20:51:19 2017
New Revision: 1811150

URL: http://svn.apache.org/viewvc?rev=1811150=rev
Log:
Update the releases page links for 3.0.0-beta1

Modified:
hadoop/common/site/main/author/src/documentation/content/xdocs/releases.xml
hadoop/common/site/main/publish/bylaws.html
hadoop/common/site/main/publish/bylaws.pdf
hadoop/common/site/main/publish/committer_criteria.html
hadoop/common/site/main/publish/committer_criteria.pdf
hadoop/common/site/main/publish/index.html
hadoop/common/site/main/publish/index.pdf
hadoop/common/site/main/publish/issue_tracking.html
hadoop/common/site/main/publish/issue_tracking.pdf
hadoop/common/site/main/publish/linkmap.html
hadoop/common/site/main/publish/linkmap.pdf
hadoop/common/site/main/publish/mailing_lists.html
hadoop/common/site/main/publish/mailing_lists.pdf
hadoop/common/site/main/publish/privacy_policy.html
hadoop/common/site/main/publish/privacy_policy.pdf
hadoop/common/site/main/publish/releases.html
hadoop/common/site/main/publish/releases.pdf
hadoop/common/site/main/publish/version_control.html
hadoop/common/site/main/publish/version_control.pdf
hadoop/common/site/main/publish/versioning.html
hadoop/common/site/main/publish/versioning.pdf
hadoop/common/site/main/publish/who.html
hadoop/common/site/main/publish/who.pdf

Modified: 
hadoop/common/site/main/author/src/documentation/content/xdocs/releases.xml
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/author/src/documentation/content/xdocs/releases.xml?rev=1811150=1811149=1811150=diff
==
--- hadoop/common/site/main/author/src/documentation/content/xdocs/releases.xml 
(original)
+++ hadoop/common/site/main/author/src/documentation/content/xdocs/releases.xml 
Wed Oct  4 20:51:19 2017
@@ -30,18 +30,18 @@
  SHA-256


- 3.0.0-alpha4
+ 3.0.0-beta1
  07 July, 2017
- http://www.apache.org/dyn/closer.cgi/hadoop/common/hadoop-3.0.0-alpha4/hadoop-3.0.0-alpha4-src.tar.gz;>source
- https://dist.apache.org/repos/dist/release/hadoop/common/hadoop-3.0.0-alpha4/hadoop-3.0.0-alpha4-src.tar.gz.asc;>signature
- https://dist.apache.org/repos/dist/release/hadoop/common/hadoop-3.0.0-alpha4/hadoop-3.0.0-alpha4-src.tar.gz.mds;>checksum
 file
+ http://www.apache.org/dyn/closer.cgi/hadoop/common/hadoop-3.0.0-beta1/hadoop-3.0.0-beta1-src.tar.gz;>source
+ https://dist.apache.org/repos/dist/release/hadoop/common/hadoop-3.0.0-beta1/hadoop-3.0.0-beta1-src.tar.gz.asc;>signature
+ https://dist.apache.org/repos/dist/release/hadoop/common/hadoop-3.0.0-beta1/hadoop-3.0.0-beta1-src.tar.gz.mds;>checksum
 file


  
  
- http://www.apache.org/dyn/closer.cgi/hadoop/common/hadoop-3.0.0-alpha4/hadoop-3.0.0-alpha4.tar.gz;>binary
- https://dist.apache.org/repos/dist/release/hadoop/common/hadoop-3.0.0-alpha4/hadoop-3.0.0-alpha4.tar.gz.asc;>signature
- https://dist.apache.org/repos/dist/release/hadoop/common/hadoop-3.0.0-alpha4/hadoop-3.0.0-alpha4.tar.gz.mds;>checksum
 file
+ http://www.apache.org/dyn/closer.cgi/hadoop/common/hadoop-3.0.0-beta1/hadoop-3.0.0-beta1.tar.gz;>binary
+ https://dist.apache.org/repos/dist/release/hadoop/common/hadoop-3.0.0-beta1/hadoop-3.0.0-beta1.tar.gz.asc;>signature
+ https://dist.apache.org/repos/dist/release/hadoop/common/hadoop-3.0.0-beta1/hadoop-3.0.0-beta1.tar.gz.mds;>checksum
 file


  2.8.1

Modified: hadoop/common/site/main/publish/bylaws.html
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/bylaws.html?rev=1811150=1811149=1811150=diff
==
--- hadoop/common/site/main/publish/bylaws.html (original)
+++ hadoop/common/site/main/publish/bylaws.html Wed Oct  4 20:51:19 2017
@@ -178,7 +178,7 @@ document.write("Last Published: " + docu
 http://hadoop.apache.org/docs/stable/;>Stable
 
 
-http://hadoop.apache.org/docs/r3.0.0-alpha4;>Release 3.0.0-alpha4
+http://hadoop.apache.org/docs/r3.0.0-beta1;>Release 3.0.0-beta1
 
 
 http://hadoop.apache.org/docs/r2.8.1/;>Release 2.8.1

Modified: hadoop/common/site/main/publish/bylaws.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/bylaws.pdf?rev=1811150=1811149=1811150=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/committer_criteria.html
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/committer_criteria.html?rev=1811150=1811149=1811150=diff
==
--- hadoop/common/site/main/publish/committer_criteria.html (original)
+++ h

svn commit: r1811149 - in /hadoop/common/site/main: author/src/documentation/content/xdocs/ publish/ publish/docs/ publish/docs/r3.0.0-beta1/

2017-10-04 Thread wang
Author: wang
Date: Wed Oct  4 20:32:34 2017
New Revision: 1811149

URL: http://svn.apache.org/viewvc?rev=1811149=rev
Log:
Update website for 3.0.0-beta1

Modified:
hadoop/common/site/main/author/src/documentation/content/xdocs/index.xml
hadoop/common/site/main/author/src/documentation/content/xdocs/releases.xml
hadoop/common/site/main/author/src/documentation/content/xdocs/site.xml
hadoop/common/site/main/publish/bylaws.pdf
hadoop/common/site/main/publish/committer_criteria.pdf
hadoop/common/site/main/publish/docs/current3
hadoop/common/site/main/publish/docs/r3.0.0-beta1/index.html
hadoop/common/site/main/publish/index.html
hadoop/common/site/main/publish/index.pdf
hadoop/common/site/main/publish/issue_tracking.pdf
hadoop/common/site/main/publish/linkmap.pdf
hadoop/common/site/main/publish/mailing_lists.pdf
hadoop/common/site/main/publish/privacy_policy.pdf
hadoop/common/site/main/publish/releases.html
hadoop/common/site/main/publish/releases.pdf
hadoop/common/site/main/publish/version_control.pdf
hadoop/common/site/main/publish/versioning.pdf
hadoop/common/site/main/publish/who.pdf

Modified: 
hadoop/common/site/main/author/src/documentation/content/xdocs/index.xml
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/author/src/documentation/content/xdocs/index.xml?rev=1811149=1811148=1811149=diff
==
--- hadoop/common/site/main/author/src/documentation/content/xdocs/index.xml 
(original)
+++ hadoop/common/site/main/author/src/documentation/content/xdocs/index.xml 
Wed Oct  4 20:32:34 2017
@@ -139,6 +139,20 @@
 News
 
   
+03 October 2017: Release 3.0.0-beta1 available
+
+  This is the first beta release in the 3.0.0 release line. It 
consists of 576 bug fixes, improvements, and other enhancements since 
3.0.0-alpha4. This is planned to be the final alpha release, with the next 
release being 3.0.0 GA.
+
+
+  Please note that beta releases are API stable but come with no 
guarantees of quality, and are not intended for production use.
+
+
+  Users are encouraged to read the http://hadoop.apache.org/docs/r3.0.0-beta1/index.html;>overview of major 
changes coming in 3.0.0.
+  The beta1 http://hadoop.apache.org/docs/r3.0.0-beta1/hadoop-project-dist/hadoop-common/release/3.0.0-beta1/RELEASENOTES.3.0.0-beta1.html;>release
 notes and http://hadoop.apache.org/docs/r3.0.0-beta1/hadoop-project-dist/hadoop-common/release/3.0.0-beta1/CHANGES.3.0.0-beta1.html;>changelog
 detail the changes since 3.0.0-alpha4.
+
+  
+
+  
 04 August, 2017: Release 2.7.4 available 
  This is the next release of Apache Hadoop 2.7 line. 
  Please see the

Modified: 
hadoop/common/site/main/author/src/documentation/content/xdocs/releases.xml
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/author/src/documentation/content/xdocs/releases.xml?rev=1811149=1811148=1811149=diff
==
--- hadoop/common/site/main/author/src/documentation/content/xdocs/releases.xml 
(original)
+++ hadoop/common/site/main/author/src/documentation/content/xdocs/releases.xml 
Wed Oct  4 20:32:34 2017
@@ -130,6 +130,20 @@
   Release Notes
 
   
+03 October 2017: Release 3.0.0-beta1 available
+
+  This is the first beta release in the 3.0.0 release line. It 
consists of 576 bug fixes, improvements, and other enhancements since 
3.0.0-alpha4. This is planned to be the final alpha release, with the next 
release being 3.0.0 GA.
+
+
+  Please note that beta releases are API stable but come with no 
guarantees of quality, and are not intended for production use.
+
+
+  Users are encouraged to read the http://hadoop.apache.org/docs/r3.0.0-beta1/index.html;>overview of major 
changes coming in 3.0.0.
+  The beta1 http://hadoop.apache.org/docs/r3.0.0-beta1/hadoop-project-dist/hadoop-common/release/3.0.0-beta1/RELEASENOTES.3.0.0-beta1.html;>release
 notes and http://hadoop.apache.org/docs/r3.0.0-beta1/hadoop-project-dist/hadoop-common/release/3.0.0-beta1/CHANGES.3.0.0-beta1.html;>changelog
 detail the changes since 3.0.0-alpha4.
+
+  
+
+  
 04 August, 2017: Release 2.7.4 available 
  This is the next release of Apache Hadoop 2.7 line. 
  Please see the

Modified: 
hadoop/common/site/main/author/src/documentation/content/xdocs/site.xml
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/author/src/documentation/content/xdocs/site.xml?rev=1811149=1811148=1811149=diff
==
--- hadoop/common/site/main/author/src/documentation/content/xdocs/site.xml 
(original)
+++ hadoop/common/si

svn commit: r1811147 - in /hadoop/common/site/main/publish/docs/r3.0.0-beta1: ./ api/ api/org/ api/org/apache/ api/org/apache/hadoop/ api/org/apache/hadoop/class-use/ api/org/apache/hadoop/classificat

2017-10-04 Thread wang
Author: wang
Date: Wed Oct  4 20:12:10 2017
New Revision: 1811147

URL: http://svn.apache.org/viewvc?rev=1811147=rev
Log:
Add docs for 3.0.0-beta1.


[This commit notification would consist of 4275 parts, 
which exceeds the limit of 50 ones, so it was shortened to the summary.]

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[3/3] hadoop git commit: Add 3.0.0-beta1 CHANGES and RELEASENOTES.

2017-10-03 Thread wang
Add 3.0.0-beta1 CHANGES and RELEASENOTES.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b34b3ff9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b34b3ff9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b34b3ff9

Branch: refs/heads/trunk
Commit: b34b3ff94baf1d343d60d3f3924e3d28dace13d2
Parents: 79e37dc
Author: Andrew Wang <w...@apache.org>
Authored: Tue Oct 3 17:14:32 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Tue Oct 3 17:14:40 2017 -0700

--
 .../release/3.0.0-beta1/CHANGES.3.0.0-beta1.md  | 646 +++
 .../3.0.0-beta1/RELEASENOTES.3.0.0-beta1.md | 377 +++
 2 files changed, 1023 insertions(+)
--



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/3] hadoop git commit: Add 3.0.0-beta1 CHANGES and RELEASENOTES.

2017-10-03 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 79e37dc6d -> b34b3ff94


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b34b3ff9/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-beta1/RELEASENOTES.3.0.0-beta1.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-beta1/RELEASENOTES.3.0.0-beta1.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-beta1/RELEASENOTES.3.0.0-beta1.md
new file mode 100644
index 000..c799933
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-beta1/RELEASENOTES.3.0.0-beta1.md
@@ -0,0 +1,377 @@
+
+
+# Apache Hadoop  3.0.0-beta1 Release Notes
+
+These release notes cover new developer and user-facing incompatibilities, 
important issues, features, and major improvements.
+
+
+---
+
+* [HADOOP-14535](https://issues.apache.org/jira/browse/HADOOP-14535) | *Major* 
| **wasb: implement high-performance random access and seek of block blobs**
+
+Random access and seek improvements for the wasb:// (Azure) file system.
+
+
+---
+
+* [YARN-6798](https://issues.apache.org/jira/browse/YARN-6798) | *Major* | 
**Fix NM startup failure with old state store due to version mismatch**
+
+
+
+This fixes the LevelDB state store for the NodeManager.  As of this patch, the 
state store versions now correspond to the following table.
+
+* Previous Patch: YARN-5049
+  * LevelDB Key: queued
+  * Hadoop Versions: 2.9.0, 3.0.0-alpha1
+  * Corresponding LevelDB Version: 1.2
+* Previous Patch: YARN-6127
+  * LevelDB Key: AMRMProxy/NextMasterKey
+  * Hadoop Versions: 2.9.0, 3.0.0-alpha4
+  * Corresponding LevelDB Version: 1.1
+
+
+---
+
+* [HADOOP-14539](https://issues.apache.org/jira/browse/HADOOP-14539) | *Major* 
| **Move commons logging APIs over to slf4j in hadoop-common**
+
+In Hadoop common, fatal log level is changed to error because slf4j API does 
not support fatal log level.
+
+
+---
+
+* [HADOOP-14518](https://issues.apache.org/jira/browse/HADOOP-14518) | *Minor* 
| **Customize User-Agent header sent in HTTP/HTTPS requests by WASB.**
+
+WASB now includes the current Apache Hadoop version in the User-Agent string 
passed to Azure Blob service. Users also may include optional additional 
information to identify their application. See the documentation of 
configuration property fs.wasb.user.agent.id for further details.
+
+
+---
+
+* [HADOOP-11875](https://issues.apache.org/jira/browse/HADOOP-11875) | *Major* 
| **[JDK9] Add a second copy of Hamlet without \_ as a one-character 
identifier**
+
+Added org.apache.hadoop.yarn.webapp.hamlet2 package without \_ as a 
one-character identifier. Please use this package instead of 
org.apache.hadoop.yarn.webapp.hamlet.
+
+
+---
+
+* [HDFS-12206](https://issues.apache.org/jira/browse/HDFS-12206) | *Major* | 
**Rename the split EC / replicated block metrics**
+
+The metrics and MBeans introduced in HDFS-10999 have been renamed for brevity 
and clarity.
+
+
+---
+
+* [HADOOP-13595](https://issues.apache.org/jira/browse/HADOOP-13595) | 
*Blocker* | **Rework hadoop\_usage to be broken up by clients/daemons/etc.**
+
+This patch changes how usage output is generated to now require a sub-command 
type.  This allows users to see who the intended audience for  a command is or 
it is a daemon.
+
+
+---
+
+* [HDFS-6984](https://issues.apache.org/jira/browse/HDFS-6984) | *Major* | 
**Serialize FileStatus via protobuf**
+
+FileStatus and FsPermission Writable serialization is deprecated and its 
implementation (incompatibly) replaced with protocol buffers. The 
FsPermissionProto record moved from hdfs.proto to acl.proto. HdfsFileStatus is 
now a subtype of FileStatus. FsPermissionExtension with its associated flags 
for ACLs, encryption, and erasure coding has been deprecated; users should 
query these attributes on the FileStatus object directly. The FsPermission 
instance in AclStatus no longer retains or reports these extended attributes 
(likely unused).
+
+
+---
+
+* [HADOOP-14722](https://issues.apache.org/jira/browse/HADOOP-14722) | *Major* 
| **Azure: BlockBlobInputStream position incorrect after seek**
+
+Bug fix to Azure Filesystem related to HADOOP-14535.
+
+
+---
+
+* [YARN-6961](https://issues.apache.org/jira/browse/YARN-6961) | *Minor* | 
**Remove commons-logging dependency from 
hadoop-yarn-server-applicationhistoryservice module**
+
+commons-logging dependency was removed from 
hadoop-yarn-server-applicationhistoryservice. If you rely on the transitive 
commons-logging dependency, please define the dependency explicitly.
+
+
+---
+
+* [HADOOP-14680](https://issues.apache.org/jira/browse/HADOOP-14680) | *Minor* 
| **Azure: IndexOutOfBoundsException in BlockBlobInputStream**
+
+Bug fix to Azure Filesystem related to HADOOP-14535
+
+
+---
+
+* [HDFS-10326](https://issues.apache.org/jira/browse/HDFS-10326) | *Major* | 
**Disable setting tcp socket send/receive buffers 

[2/3] hadoop git commit: Add 3.0.0-beta1 CHANGES and RELEASENOTES.

2017-10-03 Thread wang
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b34b3ff9/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-beta1/CHANGES.3.0.0-beta1.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-beta1/CHANGES.3.0.0-beta1.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-beta1/CHANGES.3.0.0-beta1.md
new file mode 100644
index 000..3ddf2ee
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-beta1/CHANGES.3.0.0-beta1.md
@@ -0,0 +1,646 @@
+
+
+# Apache Hadoop Changelog
+
+## Release 3.0.0-beta1 - 2017-09-28
+
+### INCOMPATIBLE CHANGES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HADOOP-14539](https://issues.apache.org/jira/browse/HADOOP-14539) | Move 
commons logging APIs over to slf4j in hadoop-common |  Major | . | Akira 
Ajisaka | Wenxin He |
+| [HDFS-12206](https://issues.apache.org/jira/browse/HDFS-12206) | Rename the 
split EC / replicated block metrics |  Major | metrics | Andrew Wang | Andrew 
Wang |
+| [HADOOP-13595](https://issues.apache.org/jira/browse/HADOOP-13595) | Rework 
hadoop\_usage to be broken up by clients/daemons/etc. |  Blocker | scripts | 
Allen Wittenauer | Allen Wittenauer |
+| [HDFS-6984](https://issues.apache.org/jira/browse/HDFS-6984) | Serialize 
FileStatus via protobuf |  Major | . | Colin P. McCabe | Chris Douglas |
+| [YARN-6961](https://issues.apache.org/jira/browse/YARN-6961) | Remove 
commons-logging dependency from hadoop-yarn-server-applicationhistoryservice 
module |  Minor | build | Akira Ajisaka | Yeliang Cang |
+| [HDFS-11957](https://issues.apache.org/jira/browse/HDFS-11957) | Enable 
POSIX ACL inheritance by default |  Major | security | John Zhuge | John Zhuge |
+| [MAPREDUCE-6870](https://issues.apache.org/jira/browse/MAPREDUCE-6870) | Add 
configuration for MR job to finish when all reducers are complete (even with 
unfinished mappers) |  Major | . | Zhe Zhang | Peter Bacsko |
+| [HADOOP-14260](https://issues.apache.org/jira/browse/HADOOP-14260) | 
Configuration.dumpConfiguration should redact sensitive information |  Major | 
conf, security | Vihang Karajgaonkar | John Zhuge |
+| [HADOOP-14726](https://issues.apache.org/jira/browse/HADOOP-14726) | Mark 
FileStatus::isDir as final |  Minor | fs | Chris Douglas | Chris Douglas |
+| [HDFS-12303](https://issues.apache.org/jira/browse/HDFS-12303) | Change 
default EC cell size to 1MB for better performance |  Blocker | . | Wei Zhou | 
Wei Zhou |
+| [HDFS-12258](https://issues.apache.org/jira/browse/HDFS-12258) | ec 
-listPolicies should list all policies in system, no matter it's enabled or 
disabled |  Major | . | SammiChen | Wei Zhou |
+| [MAPREDUCE-6892](https://issues.apache.org/jira/browse/MAPREDUCE-6892) | 
Issues with the count of failed/killed tasks in the jhist file |  Major | 
client, jobhistoryserver | Peter Bacsko | Peter Bacsko |
+| [HADOOP-14414](https://issues.apache.org/jira/browse/HADOOP-14414) | Calling 
maven-site-plugin directly for docs profile is unnecessary |  Minor | . | 
Andras Bokor | Andras Bokor |
+| [HDFS-12218](https://issues.apache.org/jira/browse/HDFS-12218) | Rename 
split EC / replicated block metrics in BlockManager |  Blocker | 
erasure-coding, metrics | Andrew Wang | Andrew Wang |
+| [HADOOP-14847](https://issues.apache.org/jira/browse/HADOOP-14847) | Remove 
Guava Supplier and change to java Supplier in AMRMClient and AMRMClientAysnc |  
Blocker | . | Bharat Viswanadham | Bharat Viswanadham |
+| [HDFS-12414](https://issues.apache.org/jira/browse/HDFS-12414) | Ensure to 
use CLI command to enable/disable erasure coding policy |  Major | . | 
SammiChen | SammiChen |
+| [HDFS-12438](https://issues.apache.org/jira/browse/HDFS-12438) | Rename 
dfs.datanode.ec.reconstruction.stripedblock.threads.size to 
dfs.datanode.ec.reconstruction.threads |  Major | . | Andrew Wang | Andrew Wang 
|
+| [HADOOP-14738](https://issues.apache.org/jira/browse/HADOOP-14738) | Remove 
S3N and obsolete bits of S3A; rework docs |  Blocker | fs/s3 | Steve Loughran | 
Steve Loughran |
+| [HDFS-7859](https://issues.apache.org/jira/browse/HDFS-7859) | Erasure 
Coding: Persist erasure coding policies in NameNode |  Major | . | Kai Zheng | 
SammiChen |
+| [HDFS-12395](https://issues.apache.org/jira/browse/HDFS-12395) | Support 
erasure coding policy operations in namenode edit log |  Major | erasure-coding 
| SammiChen | SammiChen |
+| [HADOOP-14670](https://issues.apache.org/jira/browse/HADOOP-14670) | 
Increase minimum cmake version for all platforms |  Major | build | Allen 
Wittenauer | Allen Wittenauer |
+| [HDFS-12447](https://issues.apache.org/jira/browse/HDFS-12447) | Rename 
AddECPolicyResponse to AddErasureCodingPolicyResponse |  Major | . | SammiChen 
| SammiChen |
+| [HDFS-7337](https://issues.apache.org/jira/browse/HDFS-7337) | Configurable

[hadoop] Git Push Summary

2017-10-03 Thread wang
Repository: hadoop
Updated Tags:  refs/tags/rel/release-3.0.0-beta1 [created] 276a57f20

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14909. Fix the word of erasure encoding in the top page. Contributed by Takanobu Asanuma.

2017-09-29 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 337506190 -> 575763fb7


HADOOP-14909. Fix the word of erasure encoding in the top page. Contributed by 
Takanobu Asanuma.

(cherry picked from commit 373d0a51955cabff77e934a28ba2de308207374a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/575763fb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/575763fb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/575763fb

Branch: refs/heads/branch-3.0
Commit: 575763fb7fcf1d6adbb0cafd6d6acf27bffabef5
Parents: 3375061
Author: Andrew Wang <w...@apache.org>
Authored: Fri Sep 29 13:34:36 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Fri Sep 29 13:34:39 2017 -0700

--
 hadoop-project/src/site/markdown/index.md.vm | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/575763fb/hadoop-project/src/site/markdown/index.md.vm
--
diff --git a/hadoop-project/src/site/markdown/index.md.vm 
b/hadoop-project/src/site/markdown/index.md.vm
index d9443d6..1526f59 100644
--- a/hadoop-project/src/site/markdown/index.md.vm
+++ b/hadoop-project/src/site/markdown/index.md.vm
@@ -34,7 +34,7 @@ Minimum required Java version increased from Java 7 to Java 8
 All Hadoop JARs are now compiled targeting a runtime version of Java 8.
 Users still using Java 7 or below must upgrade to Java 8.
 
-Support for erasure encoding in HDFS
+Support for erasure coding in HDFS
 --
 
 Erasure coding is a method for durably storing data with significant space


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14909. Fix the word of erasure encoding in the top page. Contributed by Takanobu Asanuma.

2017-09-29 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8aca46e53 -> 373d0a519


HADOOP-14909. Fix the word of erasure encoding in the top page. Contributed by 
Takanobu Asanuma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/373d0a51
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/373d0a51
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/373d0a51

Branch: refs/heads/trunk
Commit: 373d0a51955cabff77e934a28ba2de308207374a
Parents: 8aca46e
Author: Andrew Wang <w...@apache.org>
Authored: Fri Sep 29 13:34:36 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Fri Sep 29 13:34:36 2017 -0700

--
 hadoop-project/src/site/markdown/index.md.vm | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/373d0a51/hadoop-project/src/site/markdown/index.md.vm
--
diff --git a/hadoop-project/src/site/markdown/index.md.vm 
b/hadoop-project/src/site/markdown/index.md.vm
index d9443d6..1526f59 100644
--- a/hadoop-project/src/site/markdown/index.md.vm
+++ b/hadoop-project/src/site/markdown/index.md.vm
@@ -34,7 +34,7 @@ Minimum required Java version increased from Java 7 to Java 8
 All Hadoop JARs are now compiled targeting a runtime version of Java 8.
 Users still using Java 7 or below must upgrade to Java 8.
 
-Support for erasure encoding in HDFS
+Support for erasure coding in HDFS
 --
 
 Erasure coding is a method for durably storing data with significant space


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] Git Push Summary

2017-09-28 Thread wang
Repository: hadoop
Updated Tags:  refs/tags/release-3.0.0-beta1-RC0 [created] 228787ae5

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: Updating maven version to 3.0.0-beta1 for release

2017-09-28 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0.0-beta1 2223393ad -> 1002c582d


Updating maven version to 3.0.0-beta1 for release


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1002c582
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1002c582
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1002c582

Branch: refs/heads/branch-3.0.0-beta1
Commit: 1002c582d86ae8689c497c3d31b73f1ab92d5e29
Parents: 2223393
Author: Andrew Wang <w...@apache.org>
Authored: Thu Sep 28 11:32:31 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Thu Sep 28 11:32:31 2017 -0700

--
 hadoop-assemblies/pom.xml| 4 ++--
 hadoop-build-tools/pom.xml   | 2 +-
 hadoop-client-modules/hadoop-client-api/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-check-invariants/pom.xml | 4 ++--
 .../hadoop-client-check-test-invariants/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-integration-tests/pom.xml| 4 ++--
 hadoop-client-modules/hadoop-client-minicluster/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-runtime/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client/pom.xml  | 4 ++--
 hadoop-client-modules/pom.xml| 2 +-
 hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml| 4 ++--
 hadoop-cloud-storage-project/pom.xml | 4 ++--
 hadoop-common-project/hadoop-annotations/pom.xml | 4 ++--
 hadoop-common-project/hadoop-auth-examples/pom.xml   | 4 ++--
 hadoop-common-project/hadoop-auth/pom.xml| 4 ++--
 hadoop-common-project/hadoop-common/pom.xml  | 4 ++--
 hadoop-common-project/hadoop-kms/pom.xml | 4 ++--
 hadoop-common-project/hadoop-minikdc/pom.xml | 4 ++--
 hadoop-common-project/hadoop-nfs/pom.xml | 4 ++--
 hadoop-common-project/pom.xml| 4 ++--
 hadoop-dist/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml| 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client-common/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml | 4 ++--
 .../hadoop-mapreduce-client-hs-plugins/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-jobclient/pom.xml| 4 ++--
 .../hadoop-mapreduce-client-nativetask/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-shuffle/pom.xml  | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml   | 4 ++--
 hadoop-mapreduce-project/pom.xml | 4 ++--
 hadoop-maven-plugins/pom.xml | 2 +-
 hadoop-minicluster/pom.xml   | 4 ++--
 hadoop-project-dist/pom.xml  | 4 ++--
 hadoop-project/pom.xml   | 4 ++--
 hadoop-tools/hadoop-aliyun/pom.xml   | 2 +-
 hadoop-tools/hadoop-archive-logs/pom.xml | 4 ++--
 hadoop-tools/hadoop-archives/pom.xml | 4 ++--
 hadoop-tools/hadoop-aws/pom.xml  | 4 ++--
 hadoop-tools/hadoop-azure-datalake/pom.xml   | 2 +-
 hadoop-tools/hadoop-azure/pom.xml| 2 +-
 hadoop-tools/hadoop-datajoin/pom.xml | 4 ++--
 hadoop-tools/hadoop-distcp/pom.xml   | 4 ++--
 hadoop-tools/hadoop-extras/pom.xml   | 4 ++--
 hadoop-tools/hadoop-gridmix/pom.xml  | 4 ++--
 hadoop-tools/hadoop-kafka/pom.xml| 4 ++--
 hadoop-tools/hadoop-openstack/pom.xml| 4 ++--
 hadoop-tools/hadoop-pipes/pom.xml| 4 ++--
 hadoop-tools/hadoop-rumen/pom.xml| 4 ++--
 hadoop-tools/had

hadoop git commit: Preparing for 3.0.0 GA development

2017-09-28 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 59453dad8 -> 5a4f37019


Preparing for 3.0.0 GA development


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5a4f3701
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5a4f3701
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5a4f3701

Branch: refs/heads/branch-3.0
Commit: 5a4f37019ae911f25dc98dbd74899d99f31ff1c5
Parents: 59453da
Author: Andrew Wang <w...@apache.org>
Authored: Thu Sep 28 11:31:54 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Thu Sep 28 11:32:01 2017 -0700

--
 hadoop-assemblies/pom.xml| 4 ++--
 hadoop-build-tools/pom.xml   | 2 +-
 hadoop-client-modules/hadoop-client-api/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-check-invariants/pom.xml | 4 ++--
 .../hadoop-client-check-test-invariants/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-integration-tests/pom.xml| 4 ++--
 hadoop-client-modules/hadoop-client-minicluster/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-runtime/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client/pom.xml  | 4 ++--
 hadoop-client-modules/pom.xml| 2 +-
 hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml| 4 ++--
 hadoop-cloud-storage-project/pom.xml | 4 ++--
 hadoop-common-project/hadoop-annotations/pom.xml | 4 ++--
 hadoop-common-project/hadoop-auth-examples/pom.xml   | 4 ++--
 hadoop-common-project/hadoop-auth/pom.xml| 4 ++--
 hadoop-common-project/hadoop-common/pom.xml  | 4 ++--
 hadoop-common-project/hadoop-kms/pom.xml | 4 ++--
 hadoop-common-project/hadoop-minikdc/pom.xml | 4 ++--
 hadoop-common-project/hadoop-nfs/pom.xml | 4 ++--
 hadoop-common-project/pom.xml| 4 ++--
 hadoop-dist/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml| 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client-common/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml | 4 ++--
 .../hadoop-mapreduce-client-hs-plugins/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-jobclient/pom.xml| 4 ++--
 .../hadoop-mapreduce-client-nativetask/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-shuffle/pom.xml  | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml   | 4 ++--
 hadoop-mapreduce-project/pom.xml | 4 ++--
 hadoop-maven-plugins/pom.xml | 2 +-
 hadoop-minicluster/pom.xml   | 4 ++--
 hadoop-project-dist/pom.xml  | 4 ++--
 hadoop-project/pom.xml   | 4 ++--
 hadoop-tools/hadoop-aliyun/pom.xml   | 2 +-
 hadoop-tools/hadoop-archive-logs/pom.xml | 4 ++--
 hadoop-tools/hadoop-archives/pom.xml | 4 ++--
 hadoop-tools/hadoop-aws/pom.xml  | 4 ++--
 hadoop-tools/hadoop-azure-datalake/pom.xml   | 2 +-
 hadoop-tools/hadoop-azure/pom.xml| 2 +-
 hadoop-tools/hadoop-datajoin/pom.xml | 4 ++--
 hadoop-tools/hadoop-distcp/pom.xml   | 4 ++--
 hadoop-tools/hadoop-extras/pom.xml   | 4 ++--
 hadoop-tools/hadoop-gridmix/pom.xml  | 4 ++--
 hadoop-tools/hadoop-kafka/pom.xml| 4 ++--
 hadoop-tools/hadoop-openstack/pom.xml| 4 ++--
 hadoop-tools/hadoop-pipes/pom.xml| 4 ++--
 hadoop-tools/hadoop-rumen/pom.xml| 4 ++--
 hadoop-tools/hadoop-sls/pom.xml  | 4 ++-

[hadoop] Git Push Summary

2017-09-28 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0.0-beta1 [created] 2223393ad

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14851 LambdaTestUtils.eventually() doesn't spin on Assertion failures. Contributed by Steve Loughran

2017-09-28 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 46031d84f -> 2223393ad


HADOOP-14851 LambdaTestUtils.eventually() doesn't spin on Assertion failures.  
Contributed by Steve Loughran

(cherry picked from commit 180e814b081d3707c95641171d649b547db41a04)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2223393a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2223393a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2223393a

Branch: refs/heads/branch-3.0
Commit: 2223393ad1d5ffdd62da79e1546de79c6259dc12
Parents: 46031d8
Author: Aaron Fabbri <fab...@cloudera.com>
Authored: Fri Sep 8 19:26:27 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Thu Sep 28 10:25:17 2017 -0700

--
 .../org/apache/hadoop/test/LambdaTestUtils.java |  68 +++---
 .../apache/hadoop/test/TestLambdaTestUtils.java | 127 +--
 2 files changed, 163 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2223393a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java
index 00cfa44..3ea9ab8 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java
@@ -70,7 +70,7 @@ public final class LambdaTestUtils {
  * @throws Exception if the handler wishes to raise an exception
  * that way.
  */
-Exception evaluate(int timeoutMillis, Exception caught) throws Exception;
+Throwable evaluate(int timeoutMillis, Throwable caught) throws Throwable;
   }
 
   /**
@@ -116,7 +116,7 @@ public final class LambdaTestUtils {
 Preconditions.checkNotNull(timeoutHandler);
 
 long endTime = Time.now() + timeoutMillis;
-Exception ex = null;
+Throwable ex = null;
 boolean running = true;
 int iterations = 0;
 while (running) {
@@ -128,9 +128,11 @@ public final class LambdaTestUtils {
 // the probe failed but did not raise an exception. Reset any
 // exception raised by a previous probe failure.
 ex = null;
-  } catch (InterruptedException | FailFastException e) {
+  } catch (InterruptedException
+  | FailFastException
+  | VirtualMachineError e) {
 throw e;
-  } catch (Exception e) {
+  } catch (Throwable e) {
 LOG.debug("eventually() iteration {}", iterations, e);
 ex = e;
   }
@@ -145,15 +147,20 @@ public final class LambdaTestUtils {
   }
 }
 // timeout
-Exception evaluate = timeoutHandler.evaluate(timeoutMillis, ex);
-if (evaluate == null) {
-  // bad timeout handler logic; fall back to GenerateTimeout so the
-  // underlying problem isn't lost.
-  LOG.error("timeout handler {} did not throw an exception ",
-  timeoutHandler);
-  evaluate = new GenerateTimeout().evaluate(timeoutMillis, ex);
+Throwable evaluate;
+try {
+  evaluate = timeoutHandler.evaluate(timeoutMillis, ex);
+  if (evaluate == null) {
+// bad timeout handler logic; fall back to GenerateTimeout so the
+// underlying problem isn't lost.
+LOG.error("timeout handler {} did not throw an exception ",
+timeoutHandler);
+evaluate = new GenerateTimeout().evaluate(timeoutMillis, ex);
+  }
+} catch (Throwable throwable) {
+  evaluate = throwable;
 }
-throw evaluate;
+return raise(evaluate);
   }
 
   /**
@@ -217,6 +224,7 @@ public final class LambdaTestUtils {
* @throws Exception the last exception thrown before timeout was triggered
* @throws FailFastException if raised -without any retry attempt.
* @throws InterruptedException if interrupted during the sleep operation.
+   * @throws OutOfMemoryError you've run out of memory.
*/
   public static  T eventually(int timeoutMillis,
   Callable eval,
@@ -224,7 +232,7 @@ public final class LambdaTestUtils {
 Preconditions.checkArgument(timeoutMillis >= 0,
 "timeoutMillis must be >= 0");
 long endTime = Time.now() + timeoutMillis;
-Exception ex;
+Throwable ex;
 boolean running;
 int sleeptime;
 int iterations = 0;
@@ -232,10 +240,12 @@ public final class LambdaTestUtils {
   iterations++;
   try {
 return eval.call();
-  } catch (InterruptedException | FailFastException e) {
+  } catch (InterruptedException
+   

hadoop git commit: HDFS-12404. Rename hdfs config authorization.provider.bypass.users to attributes.provider.bypass.users.

2017-09-28 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 d9dd85673 -> 46031d84f


HDFS-12404. Rename hdfs config authorization.provider.bypass.users to 
attributes.provider.bypass.users.

(cherry picked from commit 3b3be355b35d08a78d9dcd647650812a2d28207b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/46031d84
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/46031d84
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/46031d84

Branch: refs/heads/branch-3.0
Commit: 46031d84fb071ff1bfda9e41a82e551cf0a198c2
Parents: d9dd856
Author: Manoj Govindassamy <manoj...@apache.org>
Authored: Thu Sep 7 17:20:42 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Thu Sep 28 10:22:25 2017 -0700

--
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml  | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/46031d84/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index d778d45..43e87c9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -4158,11 +4158,11 @@
 
 
 
-  dfs.namenode.authorization.provider.bypass.users
+  dfs.namenode.inode.attributes.provider.bypass.users
   
   
 A list of user principals (in secure cluster) or user names (in insecure
-cluster) for whom the external attribute provider will be bypassed for all
+cluster) for whom the external attributes provider will be bypassed for all
 operations. This means file attributes stored in HDFS instead of the
 external provider will be used for permission checking and be returned when
 requested.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-12409. Add metrics of execution time of different stages in EC recovery task. (Lei (Eddy) Xu)

2017-09-28 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 58eacdbb4 -> d9dd85673


HDFS-12409. Add metrics of execution time of different stages in EC recovery 
task. (Lei (Eddy) Xu)

(cherry picked from commit 73aed34dffa5e79f6f819137b69054c1dee2d4dd)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d9dd8567
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d9dd8567
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d9dd8567

Branch: refs/heads/branch-3.0
Commit: d9dd85673962ba22c57c307efe8ad3fca3a1604a
Parents: 58eacdb
Author: Lei Xu <l...@apache.org>
Authored: Wed Sep 13 17:10:16 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Thu Sep 28 10:21:54 2017 -0700

--
 .../erasurecode/StripedBlockReconstructor.java| 11 +++
 .../server/datanode/metrics/DataNodeMetrics.java  | 18 ++
 .../TestDataNodeErasureCodingMetrics.java |  7 +++
 3 files changed, 36 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9dd8567/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java
index bac013a..34e58ae 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java
@@ -22,6 +22,7 @@ import java.nio.ByteBuffer;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
+import org.apache.hadoop.util.Time;
 
 /**
  * StripedBlockReconstructor reconstruct one or more missed striped block in
@@ -83,18 +84,28 @@ class StripedBlockReconstructor extends StripedReconstructor
   final int toReconstructLen =
   (int) Math.min(getStripedReader().getBufferSize(), remaining);
 
+  long start = Time.monotonicNow();
   // step1: read from minimum source DNs required for reconstruction.
   // The returned success list is the source DNs we do real read from
   getStripedReader().readMinimumSources(toReconstructLen);
+  long readEnd = Time.monotonicNow();
 
   // step2: decode to reconstruct targets
   reconstructTargets(toReconstructLen);
+  long decodeEnd = Time.monotonicNow();
 
   // step3: transfer data
   if (stripedWriter.transferData2Targets() == 0) {
 String error = "Transfer failed for all targets.";
 throw new IOException(error);
   }
+  long writeEnd = Time.monotonicNow();
+
+  // Only the succeed reconstructions are recorded.
+  final DataNodeMetrics metrics = getDatanode().getMetrics();
+  metrics.incrECReconstructionReadTime(readEnd - start);
+  metrics.incrECReconstructionDecodingTime(decodeEnd - readEnd);
+  metrics.incrECReconstructionWriteTime(writeEnd - decodeEnd);
 
   updatePositionInBlock(toReconstructLen);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9dd8567/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
index a8a6919..58a2f65 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
@@ -151,6 +151,12 @@ public class DataNodeMetrics {
   MutableCounterLong ecReconstructionBytesWritten;
   @Metric("Bytes remote read by erasure coding worker")
   MutableCounterLong ecReconstructionRemoteBytesRead;
+  @Metric("Milliseconds spent on read by erasure coding worker")
+  private MutableCounterLong ecReconstructionReadTimeMillis;
+  @Metric("Milliseconds spent on decoding by erasure coding worker")
+  private MutableCounterLong ecReconstructionDecodingTimeMillis;
+  @Metric("Milliseconds spent on write by erasure coding worker")
+  private MutableCounterLong ecReconstructionWriteTimeMillis;
 
   final Met

hadoop git commit: HDFS-12412. Change ErasureCodingWorker.stripedReadPool to cached thread pool. (Lei (Eddy) Xu)

2017-09-28 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 7f371da52 -> 58eacdbb4


HDFS-12412. Change ErasureCodingWorker.stripedReadPool to cached thread pool. 
(Lei (Eddy) Xu)

(cherry picked from commit 123342cd0759ff88801d4f5ab10987f6e3f344b0)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/58eacdbb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/58eacdbb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/58eacdbb

Branch: refs/heads/branch-3.0
Commit: 58eacdbb42471dbad21330bbb96433d0f95f85d5
Parents: 7f371da
Author: Lei Xu <l...@apache.org>
Authored: Tue Sep 12 18:12:07 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Thu Sep 28 10:21:24 2017 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java|  2 --
 .../datanode/erasurecode/ErasureCodingWorker.java | 14 +++---
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml   |  9 -
 .../src/site/markdown/HDFSErasureCoding.md|  1 -
 4 files changed, 7 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/58eacdbb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 512ca20..b056e29 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -569,8 +569,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   "dfs.namenode.ec.system.default.policy";
   public static final String  DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY_DEFAULT =
   "RS-6-3-1024k";
-  public static final String  
DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_THREADS_KEY = 
"dfs.datanode.ec.reconstruction.stripedread.threads";
-  public static final int 
DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_THREADS_DEFAULT = 20;
   public static final String  
DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_BUFFER_SIZE_KEY = 
"dfs.datanode.ec.reconstruction.stripedread.buffer.size";
   public static final int 
DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_BUFFER_SIZE_DEFAULT = 64 * 1024;
   public static final String  
DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_TIMEOUT_MILLIS_KEY = 
"dfs.datanode.ec.reconstruction.stripedread.timeout.millis";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/58eacdbb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
index 70c5378..63498bc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
@@ -55,19 +55,19 @@ public final class ErasureCodingWorker {
 this.datanode = datanode;
 this.conf = conf;
 
-initializeStripedReadThreadPool(conf.getInt(
-DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_THREADS_KEY,
-DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_THREADS_DEFAULT));
+initializeStripedReadThreadPool();
 initializeStripedBlkReconstructionThreadPool(conf.getInt(
 DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_THREADS_KEY,
 DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_THREADS_DEFAULT));
   }
 
-  private void initializeStripedReadThreadPool(int num) {
-LOG.debug("Using striped reads; pool threads={}", num);
+  private void initializeStripedReadThreadPool() {
+LOG.debug("Using striped reads");
 
-stripedReadPool = new ThreadPoolExecutor(1, num, 60, TimeUnit.SECONDS,
-new SynchronousQueue(),
+// Essentially, this is a cachedThreadPool.
+stripedReadPool = new ThreadPoolExecutor(0, Integer.MAX_VALUE,
+60, TimeUnit.SECONDS,
+new SynchronousQueue<>(),
 new Daemon.DaemonFactory() {
   private final AtomicInteger threadIndex = new AtomicInteger(0);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/58eacdbb/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
-

hadoop git commit: HDFS-12496. Make QuorumJournalManager timeout properties configurable. Contributed by Ajay Kumar.

2017-09-28 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 f8df655f3 -> 14a05ee4c


HDFS-12496. Make QuorumJournalManager timeout properties configurable. 
Contributed by Ajay Kumar.

(cherry picked from commit b9e423fa8d30ea89244f6ec018a8064cc87d94a9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/14a05ee4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/14a05ee4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/14a05ee4

Branch: refs/heads/branch-3.0
Commit: 14a05ee4c1e97eedf1949ed3ba5f1683034262bd
Parents: f8df655
Author: Arpit Agarwal <a...@apache.org>
Authored: Thu Sep 21 08:44:43 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Thu Sep 28 10:19:11 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  4 ++
 .../qjournal/client/QuorumJournalManager.java   | 39 +---
 .../src/main/resources/hdfs-default.xml | 11 ++
 3 files changed, 33 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/14a05ee4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 319654c..512ca20 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -727,6 +727,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   "dfs.edit.log.transfer.bandwidthPerSec";
   public static final long DFS_EDIT_LOG_TRANSFER_RATE_DEFAULT = 0; //no 
throttling
 
+  public static final String DFS_QJM_OPERATIONS_TIMEOUT =
+  "dfs.qjm.operations.timeout";
+  public static final long DFS_QJM_OPERATIONS_TIMEOUT_DEFAULT = 6;
+
   // Datanode File IO Stats
   public static final String DFS_DATANODE_ENABLE_FILEIO_FAULT_INJECTION_KEY =
   "dfs.datanode.enable.fileio.fault.injection";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/14a05ee4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
index 97c0050..f66e2c0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
@@ -27,6 +27,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.PriorityQueue;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 
 import org.apache.commons.logging.Log;
@@ -76,18 +77,10 @@ public class QuorumJournalManager implements JournalManager 
{
   private final int newEpochTimeoutMs;
   private final int writeTxnsTimeoutMs;
 
-  // Since these don't occur during normal operation, we can
-  // use rather lengthy timeouts, and don't need to make them
-  // configurable.
-  private static final int FORMAT_TIMEOUT_MS= 6;
-  private static final int HASDATA_TIMEOUT_MS   = 6;
-  private static final int CAN_ROLL_BACK_TIMEOUT_MS = 6;
-  private static final int FINALIZE_TIMEOUT_MS  = 6;
-  private static final int PRE_UPGRADE_TIMEOUT_MS   = 6;
-  private static final int ROLL_BACK_TIMEOUT_MS = 6;
-  private static final int DISCARD_SEGMENTS_TIMEOUT_MS  = 6;
-  private static final int UPGRADE_TIMEOUT_MS   = 6;
-  private static final int GET_JOURNAL_CTIME_TIMEOUT_MS = 6;
+  // This timeout is used for calls that don't occur during normal operation
+  // e.g. format, upgrade operations and a few others. So we can use rather
+  // lengthy timeouts by default.
+  private final int timeoutMs;
   
   private final Configuration conf;
   private final URI uri;
@@ -141,6 +134,10 @@ public class QuorumJournalManager implements 
JournalManager {
 this.writeTxnsTimeoutMs = conf.getInt(
 DFSConfigKeys.DFS_QJOURNAL_WRITE_TXNS_TIMEOUT_KEY,
 DFSConfigKeys.DFS_QJOURNAL_WRITE_TXNS_TIMEOUT_DEFAULT);
+this.timeoutMs = (int) conf.getTimeDuration(DFSConfigKeys
+.DFS_QJM_OPERATIONS_TIMEOUT,
+DFSConfigKeys

hadoop git commit: HDFS-12470. DiskBalancer: Some tests create plan files under system directory. Contributed by Hanisha Koneru.

2017-09-28 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 14a05ee4c -> 7f371da52


HDFS-12470. DiskBalancer: Some tests create plan files under system directory. 
Contributed by Hanisha Koneru.

(cherry picked from commit a2dcba18531c6fa4b76325f5132773f12ddfc6d5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7f371da5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7f371da5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7f371da5

Branch: refs/heads/branch-3.0
Commit: 7f371da52ab6c130d5d6a579e8007dcbaee8
Parents: 14a05ee
Author: Arpit Agarwal <a...@apache.org>
Authored: Mon Sep 18 09:53:24 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Thu Sep 28 10:19:49 2017 -0700

--
 .../server/diskbalancer/command/TestDiskBalancerCommand.java| 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f371da5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
index b0b0b0c..1cebae0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
@@ -476,9 +476,12 @@ public class TestDiskBalancerCommand {
   public void testPlanJsonNode() throws Exception {
 final String planArg = String.format("-%s %s", PLAN,
 "a87654a9-54c7-4693-8dd9-c9c7021dc340");
+final Path testPath = new Path(
+PathUtils.getTestPath(getClass()),
+GenericTestUtils.getMethodName());
 final String cmdLine = String
 .format(
-"hdfs diskbalancer %s", planArg);
+"hdfs diskbalancer -out %s %s", testPath, planArg);
 runCommand(cmdLine);
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-12530. Processor argument in Offline Image Viewer should be case insensitive. Contributed by Hanisha Koneru.

2017-09-28 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 7b49de5e3 -> f8df655f3


HDFS-12530. Processor argument in Offline Image Viewer should be case 
insensitive. Contributed by Hanisha Koneru.

(cherry picked from commit 08fca508e66e8eddc5d8fd1608ec0c9cd54fc990)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f8df655f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f8df655f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f8df655f

Branch: refs/heads/branch-3.0
Commit: f8df655f315d517a62e75962718da8bb30c3c0ec
Parents: 7b49de5
Author: Arpit Agarwal <a...@apache.org>
Authored: Fri Sep 22 09:47:57 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Thu Sep 28 10:18:22 2017 -0700

--
 .../tools/offlineImageViewer/OfflineImageViewerPB.java   | 11 ++-
 1 file changed, 6 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8df655f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java
index c1141f3..0f2ac81 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java
@@ -33,6 +33,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.util.StringUtils;
 
 /**
  * OfflineImageViewerPB to dump the contents of an Hadoop image file to XML or
@@ -174,8 +175,8 @@ public class OfflineImageViewerPB {
 Configuration conf = new Configuration();
 try (PrintStream out = outputFile.equals("-") ?
 System.out : new PrintStream(outputFile, "UTF-8")) {
-  switch (processor) {
-  case "FileDistribution":
+  switch (StringUtils.toUpperCase(processor)) {
+  case "FILEDISTRIBUTION":
 long maxSize = Long.parseLong(cmd.getOptionValue("maxSize", "0"));
 int step = Integer.parseInt(cmd.getOptionValue("step", "0"));
 boolean formatOutput = cmd.hasOption("format");
@@ -186,7 +187,7 @@ public class OfflineImageViewerPB {
 new PBImageXmlWriter(conf, out).visit(new RandomAccessFile(inputFile,
 "r"));
 break;
-  case "ReverseXML":
+  case "REVERSEXML":
 try {
   OfflineImageReconstructor.run(inputFile, outputFile);
 } catch (Exception e) {
@@ -196,14 +197,14 @@ public class OfflineImageViewerPB {
   System.exit(1);
 }
 break;
-  case "Web":
+  case "WEB":
 String addr = cmd.getOptionValue("addr", "localhost:5978");
 try (WebImageViewer viewer =
 new WebImageViewer(NetUtils.createSocketAddr(addr))) {
   viewer.start(inputFile);
 }
 break;
-  case "Delimited":
+  case "DELIMITED":
 try (PBImageDelimitedTextWriter writer =
 new PBImageDelimitedTextWriter(out, delimiter, tempPath)) {
   writer.visit(new RandomAccessFile(inputFile, "r"));


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6622. Document Docker work as experimental (Contributed by Varun Vasudev)

2017-09-28 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 2ab2a9438 -> 7b49de5e3


YARN-6622. Document Docker work as experimental (Contributed by Varun Vasudev)

(cherry picked from commit 6651cbcc72d92caf86b744fa76fba513b36b12c7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7b49de5e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7b49de5e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7b49de5e

Branch: refs/heads/branch-3.0
Commit: 7b49de5e300f2a51dee33e52026207870fc34ffb
Parents: 2ab2a94
Author: Daniel Templeton <templ...@apache.org>
Authored: Mon Sep 11 16:14:31 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Thu Sep 28 10:16:45 2017 -0700

--
 .../hadoop-yarn-site/src/site/markdown/DockerContainers.md | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b49de5e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
index bf94169..23f4134 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
@@ -17,6 +17,12 @@ Launching Applications Using Docker Containers
 
 
 
+Notice
+--
+This feature is experimental and is not complete. Enabling this feature and
+running Docker containers in your cluster has security implications.
+Please do a security analysis before enabling this feature.
+
 Overview
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: Revert "HADOOP-14655. Update httpcore version to 4.4.6. (rchiang)"

2017-09-22 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 b58cc1617 -> 0704a5b5c


Revert "HADOOP-14655. Update httpcore version to 4.4.6. (rchiang)"

This reverts commit 0b69a22f8688ae98e3c506b0bd1d26f97ccb391e.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0704a5b5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0704a5b5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0704a5b5

Branch: refs/heads/branch-3.0
Commit: 0704a5b5cb3e4962d5ed0bab16ae39df44ae16ed
Parents: b58cc16
Author: Andrew Wang <w...@apache.org>
Authored: Fri Sep 22 14:43:58 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Fri Sep 22 14:43:58 2017 -0700

--
 hadoop-project/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0704a5b5/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 3f2ef25..1c74c02 100755
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -578,7 +578,7 @@
   
 org.apache.httpcomponents
 httpcore
-4.4.6
+4.4.4
   
   
 commons-codec


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: Revert "HADOOP-14655. Update httpcore version to 4.4.6. (rchiang)"

2017-09-22 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 4002bf0a9 -> 8d29bf57c


Revert "HADOOP-14655. Update httpcore version to 4.4.6. (rchiang)"

This reverts commit 088a18b44d87c6994b571696edbf87c95528dcaa.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8d29bf57
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8d29bf57
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8d29bf57

Branch: refs/heads/trunk
Commit: 8d29bf57ca97a94e6f6ee663bcaa5b7bc390f850
Parents: 4002bf0
Author: Andrew Wang <w...@apache.org>
Authored: Fri Sep 22 14:43:44 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Fri Sep 22 14:43:44 2017 -0700

--
 hadoop-project/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d29bf57/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index a698126..7f657ca 100755
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -578,7 +578,7 @@
   
 org.apache.httpcomponents
 httpcore
-4.4.6
+4.4.4
   
   
 commons-codec


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: Revert "HADOOP-14771. hadoop-client does not include hadoop-yarn-client. (Ajay Kumar via Haibo Chen)" HADOOP-14879 Build failure due to failing hadoop-client-check-invariants This r

2017-09-20 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 835717210 -> 4767d1bb1


Revert "HADOOP-14771. hadoop-client does not include hadoop-yarn-client. (Ajay 
Kumar via Haibo Chen)"
HADOOP-14879 Build failure due to failing hadoop-client-check-invariants
This reverts commit 1ee25278c891e95ba2ab142e5b78aebd752ea163.

(cherry picked from commit aa6e8d2dff533c3d0c86776567c860548723c21c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4767d1bb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4767d1bb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4767d1bb

Branch: refs/heads/branch-3.0
Commit: 4767d1bb1fca46e572d81db79bea83d5289879bc
Parents: 8357172
Author: Steve Loughran <ste...@apache.org>
Authored: Tue Sep 19 11:53:11 2017 +0100
Committer: Andrew Wang <w...@apache.org>
Committed: Wed Sep 20 21:49:34 2017 -0700

--
 hadoop-client-modules/hadoop-client/pom.xml | 31 
 1 file changed, 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4767d1bb/hadoop-client-modules/hadoop-client/pom.xml
--
diff --git a/hadoop-client-modules/hadoop-client/pom.xml 
b/hadoop-client-modules/hadoop-client/pom.xml
index 8505d50..c465d07 100644
--- a/hadoop-client-modules/hadoop-client/pom.xml
+++ b/hadoop-client-modules/hadoop-client/pom.xml
@@ -179,37 +179,6 @@
 
 
   org.apache.hadoop
-  hadoop-yarn-client
-  compile
-  
-
-
-  org.apache.hadoop
-  hadoop-yarn-api
-
-
-  org.apache.hadoop
-  hadoop-annotations
-
-
-  com.google.guava
-  guava
-
-
-  commons-cli
-  commons-cli
-
-
-  log4j
-  log4j
-
-  
-
-
-
-  org.apache.hadoop
   hadoop-mapreduce-client-core
   compile
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-12447. Rename AddECPolicyResponse to AddErasureCodingPolicyResponse. Contributed by SammiChen.

2017-09-20 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 0006ee681 -> 1792093ba


HDFS-12447. Rename AddECPolicyResponse to AddErasureCodingPolicyResponse. 
Contributed by SammiChen.

(cherry picked from commit a12f09ba3c4a3aa4c4558090c5e1b7bcaebe3b94)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1792093b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1792093b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1792093b

Branch: refs/heads/branch-3.0
Commit: 1792093bae273bf2e07b9ddb3628265aee9c747e
Parents: 0006ee6
Author: Andrew Wang <w...@apache.org>
Authored: Wed Sep 20 11:51:17 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Wed Sep 20 11:51:21 2017 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  | 10 +--
 .../hadoop/hdfs/DistributedFileSystem.java  |  4 +-
 .../apache/hadoop/hdfs/client/HdfsAdmin.java|  4 +-
 .../hdfs/protocol/AddECPolicyResponse.java  | 68 
 .../AddErasureCodingPolicyResponse.java | 68 
 .../hadoop/hdfs/protocol/ClientProtocol.java|  2 +-
 .../ClientNamenodeProtocolTranslatorPB.java | 11 ++--
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  | 22 ---
 .../src/main/proto/erasurecoding.proto  |  2 +-
 .../src/main/proto/hdfs.proto   |  2 +-
 ...tNamenodeProtocolServerSideTranslatorPB.java | 13 ++--
 .../hdfs/server/namenode/FSNamesystem.java  | 15 +++--
 .../hdfs/server/namenode/NameNodeRpcServer.java |  9 +--
 .../org/apache/hadoop/hdfs/tools/ECAdmin.java   |  7 +-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  4 +-
 .../hadoop/hdfs/TestErasureCodingPolicies.java  |  7 +-
 .../hadoop/hdfs/protocolPB/TestPBHelper.java| 25 +++
 .../server/namenode/TestFSEditLogLoader.java|  4 +-
 .../hdfs/server/namenode/TestFSImage.java   |  5 +-
 19 files changed, 147 insertions(+), 135 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1792093b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 7e8e95b..8d51a9c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -102,7 +102,7 @@ import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
 import org.apache.hadoop.hdfs.client.impl.LeaseRenewer;
 import org.apache.hadoop.hdfs.net.Peer;
 import org.apache.hadoop.hdfs.protocol.AclException;
-import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse;
+import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
@@ -2807,13 +2807,14 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 }
   }
 
-  public AddECPolicyResponse[] addErasureCodingPolicies(
+  public AddErasureCodingPolicyResponse[] addErasureCodingPolicies(
   ErasureCodingPolicy[] policies) throws IOException {
 checkOpen();
 try (TraceScope ignored = tracer.newScope("addErasureCodingPolicies")) {
   return namenode.addErasureCodingPolicies(policies);
 } catch (RemoteException re) {
-  throw re.unwrapRemoteException(AccessControlException.class);
+  throw re.unwrapRemoteException(AccessControlException.class,
+  SafeModeException.class);
 }
   }
 
@@ -2823,7 +2824,8 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 try (TraceScope ignored = tracer.newScope("removeErasureCodingPolicy")) {
   namenode.removeErasureCodingPolicy(ecPolicyName);
 } catch (RemoteException re) {
-  throw re.unwrapRemoteException(AccessControlException.class);
+  throw re.unwrapRemoteException(AccessControlException.class,
+  SafeModeException.class);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1792093b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index f6331cf..c9f4490 100644
--- 
a/hadoop-hdfs-project/h

hadoop git commit: HDFS-12447. Rename AddECPolicyResponse to AddErasureCodingPolicyResponse. Contributed by SammiChen.

2017-09-20 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk ce943eb17 -> a12f09ba3


HDFS-12447. Rename AddECPolicyResponse to AddErasureCodingPolicyResponse. 
Contributed by SammiChen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a12f09ba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a12f09ba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a12f09ba

Branch: refs/heads/trunk
Commit: a12f09ba3c4a3aa4c4558090c5e1b7bcaebe3b94
Parents: ce943eb
Author: Andrew Wang <w...@apache.org>
Authored: Wed Sep 20 11:51:17 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Wed Sep 20 11:51:17 2017 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  | 10 +--
 .../hadoop/hdfs/DistributedFileSystem.java  |  4 +-
 .../apache/hadoop/hdfs/client/HdfsAdmin.java|  4 +-
 .../hdfs/protocol/AddECPolicyResponse.java  | 68 
 .../AddErasureCodingPolicyResponse.java | 68 
 .../hadoop/hdfs/protocol/ClientProtocol.java|  2 +-
 .../ClientNamenodeProtocolTranslatorPB.java | 11 ++--
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  | 22 ---
 .../src/main/proto/erasurecoding.proto  |  2 +-
 .../src/main/proto/hdfs.proto   |  2 +-
 ...tNamenodeProtocolServerSideTranslatorPB.java | 13 ++--
 .../hdfs/server/namenode/FSNamesystem.java  | 15 +++--
 .../hdfs/server/namenode/NameNodeRpcServer.java |  9 +--
 .../org/apache/hadoop/hdfs/tools/ECAdmin.java   |  7 +-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  4 +-
 .../hadoop/hdfs/TestErasureCodingPolicies.java  |  7 +-
 .../hadoop/hdfs/protocolPB/TestPBHelper.java| 25 +++
 .../server/namenode/TestFSEditLogLoader.java|  4 +-
 .../hdfs/server/namenode/TestFSImage.java   |  5 +-
 19 files changed, 147 insertions(+), 135 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a12f09ba/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 7e8e95b..8d51a9c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -102,7 +102,7 @@ import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
 import org.apache.hadoop.hdfs.client.impl.LeaseRenewer;
 import org.apache.hadoop.hdfs.net.Peer;
 import org.apache.hadoop.hdfs.protocol.AclException;
-import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse;
+import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
@@ -2807,13 +2807,14 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 }
   }
 
-  public AddECPolicyResponse[] addErasureCodingPolicies(
+  public AddErasureCodingPolicyResponse[] addErasureCodingPolicies(
   ErasureCodingPolicy[] policies) throws IOException {
 checkOpen();
 try (TraceScope ignored = tracer.newScope("addErasureCodingPolicies")) {
   return namenode.addErasureCodingPolicies(policies);
 } catch (RemoteException re) {
-  throw re.unwrapRemoteException(AccessControlException.class);
+  throw re.unwrapRemoteException(AccessControlException.class,
+  SafeModeException.class);
 }
   }
 
@@ -2823,7 +2824,8 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 try (TraceScope ignored = tracer.newScope("removeErasureCodingPolicy")) {
   namenode.removeErasureCodingPolicy(ecPolicyName);
 } catch (RemoteException re) {
-  throw re.unwrapRemoteException(AccessControlException.class);
+  throw re.unwrapRemoteException(AccessControlException.class,
+  SafeModeException.class);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a12f09ba/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index f6331cf..c9f4490 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++

hadoop git commit: HDFS-12437. Fix test setup in TestLeaseRecoveryStriped.

2017-09-19 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 5c8331c23 -> f4082bbb2


HDFS-12437. Fix test setup in TestLeaseRecoveryStriped.

(cherry picked from commit 12d9d7bc509bca82b8f40301e3dc5ca764be45eb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f4082bbb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f4082bbb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f4082bbb

Branch: refs/heads/branch-3.0
Commit: f4082bbb2338fa51c77d3704720803968cbb0d18
Parents: 5c8331c
Author: Andrew Wang <w...@apache.org>
Authored: Tue Sep 19 16:42:20 2017 -0700
Committer: Andrew Wang <w...@apache.org>
Committed: Tue Sep 19 16:42:23 2017 -0700

--
 .../hadoop/hdfs/TestLeaseRecoveryStriped.java   | 156 ++-
 1 file changed, 113 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4082bbb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
index 2846dbf..36ac8b3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
@@ -19,8 +19,7 @@ package org.apache.hadoop.hdfs;
 
 import com.google.common.base.Preconditions;
 import com.google.common.base.Supplier;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.apache.commons.lang.builder.ToStringBuilder;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
@@ -28,6 +27,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.util.StripedBlockUtil;
 import org.apache.hadoop.io.IOUtils;
@@ -40,34 +40,41 @@ import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 import org.mockito.internal.util.reflection.Whitebox;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.DataOutputStream;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.HashSet;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Random;
+import java.util.Set;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeoutException;
 
 public class TestLeaseRecoveryStriped {
-  public static final Log LOG = LogFactory
-  .getLog(TestLeaseRecoveryStriped.class);
+  public static final Logger LOG = LoggerFactory
+  .getLogger(TestLeaseRecoveryStriped.class);
 
   private final ErasureCodingPolicy ecPolicy =
   StripedFileTestUtil.getDefaultECPolicy();
   private final int dataBlocks = ecPolicy.getNumDataUnits();
   private final int parityBlocks = ecPolicy.getNumParityUnits();
   private final int cellSize = ecPolicy.getCellSize();
-  private final int stripSize = dataBlocks * cellSize;
-  private final int stripesPerBlock = 15;
+  private final int stripeSize = dataBlocks * cellSize;
+  private final int stripesPerBlock = 4;
   private final int blockSize = cellSize * stripesPerBlock;
   private final int blockGroupSize = blockSize * dataBlocks;
   private static final int bytesPerChecksum = 512;
 
   static {
 GenericTestUtils.setLogLevel(DataNode.LOG, Level.ALL);
+GenericTestUtils.setLogLevel(DFSStripedOutputStream.LOG, Level.DEBUG);
+GenericTestUtils.setLogLevel(BlockRecoveryWorker.LOG, Level.DEBUG);
+GenericTestUtils.setLogLevel(DataStreamer.LOG, Level.DEBUG);
   }
 
   static private final String fakeUsername = "fakeUser1";
@@ -83,7 +90,7 @@ public class TestLeaseRecoveryStriped {
   public void setup() throws IOException {
 conf = new HdfsConfiguration();
 conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
-conf.setLong(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 6000L);
+conf.setLong(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 6L);
 conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
 false);
 conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
@@ -104,78 +111,118 @@ public class TestLeaseRecoveryStriped {
 }
   }
 
-  private int[]

  1   2   3   4   5   6   7   8   9   10   >