hbase git commit: HBASE-19392 TestReplicaWithCluster#testReplicaGetWithPrimaryAndMetaDown failure in master Signed-off-by: Huaxiang Sun

2017-11-30 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2 df3668818 -> 752be198e


HBASE-19392 TestReplicaWithCluster#testReplicaGetWithPrimaryAndMetaDown failure 
in master
Signed-off-by: Huaxiang Sun 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/752be198
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/752be198
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/752be198

Branch: refs/heads/branch-2
Commit: 752be198e7f51a2771d49b74044879dc0f762fa2
Parents: df36688
Author: Michael Stack 
Authored: Thu Nov 30 22:44:57 2017 -0800
Committer: Michael Stack 
Committed: Thu Nov 30 22:46:00 2017 -0800

--
 .../apache/hadoop/hbase/client/TestReplicaWithCluster.java| 7 ++-
 1 file changed, 2 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/752be198/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java
index 82355c8..22d0e8e 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java
@@ -47,7 +47,6 @@ import org.apache.hadoop.hbase.Waiter;
 
 import org.apache.hadoop.hbase.client.replication.ReplicationAdmin;
 import org.apache.hadoop.hbase.coprocessor.CoreCoprocessor;
-import org.apache.hadoop.hbase.coprocessor.HasRegionServerServices;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
@@ -193,8 +192,7 @@ public class TestReplicaWithCluster {
 if (!e.getEnvironment().getRegion().getRegionInfo().isMetaRegion() && 
(replicaId == 0)) {
   LOG.info("Get, throw Region Server Stopped Exceptoin for region " + 
e.getEnvironment()
   .getRegion().getRegionInfo());
-  throw new RegionServerStoppedException("Server " +
-
((HasRegionServerServices)e.getEnvironment()).getRegionServerServices().getServerName()
+  throw new RegionServerStoppedException("Server " + 
e.getEnvironment().getServerName()
   + " not running");
 }
   } else {
@@ -224,8 +222,7 @@ public class TestReplicaWithCluster {
   LOG.info("Scan, throw Region Server Stopped Exceptoin for replica " 
+ e.getEnvironment()
   .getRegion().getRegionInfo());
 
-  throw new RegionServerStoppedException("Server " +
-
((HasRegionServerServices)e.getEnvironment()).getRegionServerServices().getServerName()
+  throw new RegionServerStoppedException("Server " + 
e.getEnvironment().getServerName()
+ " not running");
 } else {
   LOG.info("Scan, We're replica region " + replicaId);



hbase git commit: HBASE-19392 TestReplicaWithCluster#testReplicaGetWithPrimaryAndMetaDown failure in master Signed-off-by: Huaxiang Sun

2017-11-30 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master 49a9fe488 -> 2e8bd0036


HBASE-19392 TestReplicaWithCluster#testReplicaGetWithPrimaryAndMetaDown failure 
in master
Signed-off-by: Huaxiang Sun 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2e8bd003
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2e8bd003
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2e8bd003

Branch: refs/heads/master
Commit: 2e8bd0036dbdf3a99786e5531495d8d4cb51b86c
Parents: 49a9fe4
Author: Michael Stack 
Authored: Thu Nov 30 22:44:57 2017 -0800
Committer: Michael Stack 
Committed: Thu Nov 30 22:45:39 2017 -0800

--
 .../apache/hadoop/hbase/client/TestReplicaWithCluster.java| 7 ++-
 1 file changed, 2 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2e8bd003/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java
index 82355c8..22d0e8e 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java
@@ -47,7 +47,6 @@ import org.apache.hadoop.hbase.Waiter;
 
 import org.apache.hadoop.hbase.client.replication.ReplicationAdmin;
 import org.apache.hadoop.hbase.coprocessor.CoreCoprocessor;
-import org.apache.hadoop.hbase.coprocessor.HasRegionServerServices;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
@@ -193,8 +192,7 @@ public class TestReplicaWithCluster {
 if (!e.getEnvironment().getRegion().getRegionInfo().isMetaRegion() && 
(replicaId == 0)) {
   LOG.info("Get, throw Region Server Stopped Exceptoin for region " + 
e.getEnvironment()
   .getRegion().getRegionInfo());
-  throw new RegionServerStoppedException("Server " +
-
((HasRegionServerServices)e.getEnvironment()).getRegionServerServices().getServerName()
+  throw new RegionServerStoppedException("Server " + 
e.getEnvironment().getServerName()
   + " not running");
 }
   } else {
@@ -224,8 +222,7 @@ public class TestReplicaWithCluster {
   LOG.info("Scan, throw Region Server Stopped Exceptoin for replica " 
+ e.getEnvironment()
   .getRegion().getRegionInfo());
 
-  throw new RegionServerStoppedException("Server " +
-
((HasRegionServerServices)e.getEnvironment()).getRegionServerServices().getServerName()
+  throw new RegionServerStoppedException("Server " + 
e.getEnvironment().getServerName()
+ " not running");
 } else {
   LOG.info("Scan, We're replica region " + replicaId);



hbase git commit: HBASE-19385 [1.3] TestReplicator failed 1.3 nightly

2017-11-30 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 6c490625a -> 7968da0f9


HBASE-19385 [1.3] TestReplicator failed 1.3 nightly


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7968da0f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7968da0f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7968da0f

Branch: refs/heads/branch-1.4
Commit: 7968da0f9e3b51980392bc8122e6284b0844e58e
Parents: 6c49062
Author: Michael Stack 
Authored: Wed Nov 29 23:06:31 2017 -0800
Committer: Michael Stack 
Committed: Thu Nov 30 22:32:17 2017 -0800

--
 .../replication/regionserver/TestReplicator.java | 19 +--
 1 file changed, 13 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7968da0f/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java
index dbe7031..4b5d331 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java
@@ -22,6 +22,7 @@ import static org.junit.Assert.assertEquals;
 import java.io.IOException;
 import java.util.List;
 import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -97,6 +98,7 @@ public class TestReplicator extends TestReplicationBase {
   Waiter.waitFor(conf1, 6, new Waiter.ExplainingPredicate() 
{
 @Override
 public boolean evaluate() throws Exception {
+  LOG.info("Count=" + ReplicationEndpointForTest.getBatchCount());
   return ReplicationEndpointForTest.getBatchCount() >= NUM_ROWS;
 }
 
@@ -178,7 +180,7 @@ public class TestReplicator extends TestReplicationBase {
 
   public static class ReplicationEndpointForTest extends 
HBaseInterClusterReplicationEndpoint {
 
-private static int batchCount;
+private static AtomicInteger batchCount = new AtomicInteger(0);
 private static int entriesCount;
 private static final Object latch = new Object();
 private static AtomicBoolean useLatch = new AtomicBoolean(false);
@@ -197,17 +199,20 @@ public class TestReplicator extends TestReplicationBase {
 public static void await() throws InterruptedException {
   if (useLatch.get()) {
 LOG.info("Waiting on latch");
-latch.wait();
+synchronized(latch) {
+  latch.wait();
+}
 LOG.info("Waited on latch, now proceeding");
   }
 }
 
 public static int getBatchCount() {
-  return batchCount;
+  return batchCount.get();
 }
 
 public static void setBatchCount(int i) {
-  batchCount = i;
+  LOG.info("SetBatchCount=" + i + ", old=" + getBatchCount());
+  batchCount.set(i);
 }
 
 public static int getEntriesCount() {
@@ -215,6 +220,7 @@ public class TestReplicator extends TestReplicationBase {
 }
 
 public static void setEntriesCount(int i) {
+  LOG.info("SetEntriesCount=" + i);
   entriesCount = i;
 }
 
@@ -240,8 +246,9 @@ public class TestReplicator extends TestReplicationBase {
   super.replicateEntries(rrs, entries, replicationClusterId, 
baseNamespaceDir,
 hfileArchiveDir);
   entriesCount += entries.size();
-  batchCount++;
-  LOG.info("Completed replicating batch " + 
System.identityHashCode(entries));
+  int count = batchCount.incrementAndGet();
+  LOG.info("Completed replicating batch " + 
System.identityHashCode(entries) +
+  " count=" + count);
 } catch (IOException e) {
   LOG.info("Failed to replicate batch " + 
System.identityHashCode(entries), e);
   throw e;



svn commit: r23352 - in /dev/hbase: hbase-1.1.13/ hbase-1.1.13RC0/

2017-11-30 Thread ndimiduk
Author: ndimiduk
Date: Fri Dec  1 06:17:35 2017
New Revision: 23352

Log:
correcting directory name for HBase 1.1.13RC0

Added:
dev/hbase/hbase-1.1.13RC0/
  - copied from r23351, dev/hbase/hbase-1.1.13/
Removed:
dev/hbase/hbase-1.1.13/



[hbase] Git Push Summary

2017-11-30 Thread ndimiduk
Repository: hbase
Updated Tags:  refs/tags/1.1.13RC0 [created] 16a04e662


svn commit: r23351 - /dev/hbase/hbase-1.1.13/

2017-11-30 Thread ndimiduk
Author: ndimiduk
Date: Fri Dec  1 06:09:44 2017
New Revision: 23351

Log:
HBase-1.1.13 RC0 artifacts

Added:
dev/hbase/hbase-1.1.13/
dev/hbase/hbase-1.1.13/hbase-1.1.13-bin.tar.gz   (with props)
dev/hbase/hbase-1.1.13/hbase-1.1.13-bin.tar.gz.asc
dev/hbase/hbase-1.1.13/hbase-1.1.13-bin.tar.gz.md5
dev/hbase/hbase-1.1.13/hbase-1.1.13-bin.tar.gz.mds
dev/hbase/hbase-1.1.13/hbase-1.1.13-bin.tar.gz.sha
dev/hbase/hbase-1.1.13/hbase-1.1.13-src.tar.gz   (with props)
dev/hbase/hbase-1.1.13/hbase-1.1.13-src.tar.gz.asc
dev/hbase/hbase-1.1.13/hbase-1.1.13-src.tar.gz.md5
dev/hbase/hbase-1.1.13/hbase-1.1.13-src.tar.gz.mds
dev/hbase/hbase-1.1.13/hbase-1.1.13-src.tar.gz.sha

Added: dev/hbase/hbase-1.1.13/hbase-1.1.13-bin.tar.gz
==
Binary file - no diff available.

Propchange: dev/hbase/hbase-1.1.13/hbase-1.1.13-bin.tar.gz
--
svn:mime-type = application/octet-stream

Added: dev/hbase/hbase-1.1.13/hbase-1.1.13-bin.tar.gz.asc
==
--- dev/hbase/hbase-1.1.13/hbase-1.1.13-bin.tar.gz.asc (added)
+++ dev/hbase/hbase-1.1.13/hbase-1.1.13-bin.tar.gz.asc Fri Dec  1 06:09:44 2017
@@ -0,0 +1,16 @@
+-BEGIN PGP SIGNATURE-
+
+iQIzBAABCgAdFiEEbvbOx0uJuSk7TZzQrZA5Bxw0ib0FAlog8AMACgkQrZA5Bxw0
+ib3erhAAgd1zIGjP+8zbEtobiGv+l7VTYLIC/C0OeiyLzvwGUykVkF4Agn58Gn9B
+RJef5bm1eCoHyf+hx+ltmFS4d+fSqZTxSd2fHb8B7ZedJa7A96GuIDNdWy8yXGSA
++oISD7qDTLNtVRL6TBxrnRCObWQQ16n7fnQc6/TlIk4zRIUE10qLYangbSnlQBmA
+ZxeoUsv/FRt25GBd8OrSD7HtemGdyNxBKMZx0lrhi94xRa+tHFeFKCu5K6ous1Wv
+EUDWVHKXRf2Pw+rXu/nBLqlkPsQM9/oMf5gzA6rIGOqu8GuEswfck76YiPlubHFW
+5HgTFgM3TZ4aZqBpfYx/cmLDIA7IITFRplqxPc6r6ytAsICzbOefXdTSkS97MMXk
+mb5tdmWJGtnBozuzIzEnq5wyJqlEp7Oc6Shp58wiuKQS6Ttx8jmAa5yXZ15cwRU4
+omFczO1i4CTJG4WuD3WnPuKWPt3StRyNlRqicnvd/b3zXpXEgc0tjyyFPK8DN+Vo
+4NB0nz0g7JPCd0TOTsBViYTqkhpMPiPYGnW3b0JctY8PhO9EX1gomtFT0Q2B2ks8
+n4UKFlxMjfY6t18x3+GaaDGPTvmiWjObGaNstcCdAdUJ3MDun9Ax9ZTITeQNTuEy
+vVAAjDwkK9gIOv0JHtDqoWdp2qnvCmm9MyjtWGxbfq6go3c6wZA=
+=5Wq+
+-END PGP SIGNATURE-

Added: dev/hbase/hbase-1.1.13/hbase-1.1.13-bin.tar.gz.md5
==
--- dev/hbase/hbase-1.1.13/hbase-1.1.13-bin.tar.gz.md5 (added)
+++ dev/hbase/hbase-1.1.13/hbase-1.1.13-bin.tar.gz.md5 Fri Dec  1 06:09:44 2017
@@ -0,0 +1 @@
+hbase-1.1.13-bin.tar.gz: 33 B2 35 17 20 7A F6 9F  67 55 C1 2D 83 F1 EC DF

Added: dev/hbase/hbase-1.1.13/hbase-1.1.13-bin.tar.gz.mds
==
--- dev/hbase/hbase-1.1.13/hbase-1.1.13-bin.tar.gz.mds (added)
+++ dev/hbase/hbase-1.1.13/hbase-1.1.13-bin.tar.gz.mds Fri Dec  1 06:09:44 2017
@@ -0,0 +1,17 @@
+hbase-1.1.13-bin.tar.gz:MD5 = 33 B2 35 17 20 7A F6 9F  67 55 C1 2D 83 F1 EC
+  DF
+hbase-1.1.13-bin.tar.gz:   SHA1 = 9950 1D0E 81F3 1C1B 5475  356F 412E 6700 712C
+  C9EA
+hbase-1.1.13-bin.tar.gz: RMD160 = 2AA5 B5CC FA87 DE96 1778  36BD BA17 B9BC 1246
+  CD3C
+hbase-1.1.13-bin.tar.gz: SHA224 = 7423C788 87C8ECB2 9B76A201 360E6DA2 AACF12BD
+  7D3265CA CCA0BA98
+hbase-1.1.13-bin.tar.gz: SHA256 = 9291DBA3 4611FF7B A0EAA4F2 AD8CD757 564A5724
+  5A34FEA1 A4432C4F F01AFE71
+hbase-1.1.13-bin.tar.gz: SHA384 = 0CECC6F8 5BC99D3B 5E23BAA3 78C99E85 EB1AD4D6
+  736F1155 0C8A660B 960C3FFE 4CA4F99C 95C18706
+  B1194177 496F1D6A
+hbase-1.1.13-bin.tar.gz: SHA512 = 6E803C24 9ABA6363 6B1C3A62 8C5D4E7B 4A783104
+  DAC32FEC F6D34D96 B362B28F BE8D1AA0 F5343040
+  30296E84 B48CACB9 2E872D72 7DADF580 00525105
+  A61DD9F4

Added: dev/hbase/hbase-1.1.13/hbase-1.1.13-bin.tar.gz.sha
==
--- dev/hbase/hbase-1.1.13/hbase-1.1.13-bin.tar.gz.sha (added)
+++ dev/hbase/hbase-1.1.13/hbase-1.1.13-bin.tar.gz.sha Fri Dec  1 06:09:44 2017
@@ -0,0 +1,3 @@
+hbase-1.1.13-bin.tar.gz: 6E803C24 9ABA6363 6B1C3A62 8C5D4E7B 4A783104 DAC32FEC
+ F6D34D96 B362B28F BE8D1AA0 F5343040 30296E84 B48CACB9
+ 2E872D72 7DADF580 00525105 A61DD9F4

Added: dev/hbase/hbase-1.1.13/hbase-1.1.13-src.tar.gz
==
Binary file - no diff available.

Propchange: dev/hbase/hbase-1.1.13/hbase-1.1.13-src.tar.gz
--
svn:mime-type = application/octet-stream

Added: dev/hbase/hbase-1.1.13/hbase-1.1.13-src.tar.gz.asc
==

[5/6] hbase git commit: updating docs from master

2017-11-30 Thread ndimiduk
updating docs from master


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2e9a55be
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2e9a55be
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2e9a55be

Branch: refs/heads/branch-1.1
Commit: 2e9a55befc308b4892ea5a083412e4f36178ed1a
Parents: b6ff374
Author: Nick Dimiduk 
Authored: Thu Nov 30 19:53:20 2017 -0800
Committer: Nick Dimiduk 
Committed: Thu Nov 30 19:53:20 2017 -0800

--
 .../asciidoc/_chapters/appendix_acl_matrix.adoc |   1 +
 .../appendix_contributing_to_documentation.adoc |  42 +-
 src/main/asciidoc/_chapters/architecture.adoc   | 269 --
 src/main/asciidoc/_chapters/asf.adoc|   4 +-
 src/main/asciidoc/_chapters/backup_restore.adoc | 912 +++
 src/main/asciidoc/_chapters/community.adoc  |   6 +-
 src/main/asciidoc/_chapters/compression.adoc|  10 +-
 src/main/asciidoc/_chapters/configuration.adoc  |  40 +-
 src/main/asciidoc/_chapters/cp.adoc |  12 +-
 src/main/asciidoc/_chapters/datamodel.adoc  |  34 +-
 src/main/asciidoc/_chapters/developer.adoc  | 351 ---
 src/main/asciidoc/_chapters/external_apis.adoc  |  27 +-
 src/main/asciidoc/_chapters/faq.adoc|   4 +-
 .../asciidoc/_chapters/getting_started.adoc |   6 +-
 src/main/asciidoc/_chapters/hbase-default.adoc  |  52 +-
 src/main/asciidoc/_chapters/hbase_apis.adoc |   2 +-
 src/main/asciidoc/_chapters/mapreduce.adoc  | 112 ++-
 src/main/asciidoc/_chapters/ops_mgt.adoc| 159 +++-
 src/main/asciidoc/_chapters/other_info.adoc |  14 +-
 src/main/asciidoc/_chapters/performance.adoc|  41 +-
 src/main/asciidoc/_chapters/preface.adoc|   4 +-
 src/main/asciidoc/_chapters/protobuf.adoc   |   2 +-
 src/main/asciidoc/_chapters/rpc.adoc|   2 +-
 src/main/asciidoc/_chapters/schema_design.adoc  |  41 +-
 src/main/asciidoc/_chapters/security.adoc   |  12 +-
 src/main/asciidoc/_chapters/spark.adoc  |   4 +-
 src/main/asciidoc/_chapters/sql.adoc|   4 +-
 .../_chapters/thrift_filter_language.adoc   |   2 +-
 src/main/asciidoc/_chapters/tracing.adoc|   8 +-
 .../asciidoc/_chapters/troubleshooting.adoc |  19 +-
 src/main/asciidoc/_chapters/unit_testing.adoc   |   8 +-
 src/main/asciidoc/_chapters/upgrading.adoc  | 166 +++-
 src/main/asciidoc/_chapters/zookeeper.adoc  |   8 +-
 src/main/asciidoc/book.adoc |   9 +-
 34 files changed, 1855 insertions(+), 532 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2e9a55be/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
--
diff --git a/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc 
b/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
index 1d7c748..0c99b1f 100644
--- a/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
+++ b/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
@@ -123,6 +123,7 @@ In case the table goes out of date, the unit tests which 
check for accuracy of p
 || getReplicationPeerConfig | superuser\|global(A)
 || updateReplicationPeerConfig | superuser\|global(A)
 || listReplicationPeers | superuser\|global(A)
+|| getClusterStatus | superuser\|global(A)
 | Region | openRegion | superuser\|global(A)
 || closeRegion | superuser\|global(A)
 || flush | 
superuser\|global(A)\|global\(C)\|TableOwner\|table(A)\|table\(C)

http://git-wip-us.apache.org/repos/asf/hbase/blob/2e9a55be/src/main/asciidoc/_chapters/appendix_contributing_to_documentation.adoc
--
diff --git 
a/src/main/asciidoc/_chapters/appendix_contributing_to_documentation.adoc 
b/src/main/asciidoc/_chapters/appendix_contributing_to_documentation.adoc
index 0337182..a603c16 100644
--- a/src/main/asciidoc/_chapters/appendix_contributing_to_documentation.adoc
+++ b/src/main/asciidoc/_chapters/appendix_contributing_to_documentation.adoc
@@ -35,9 +35,9 @@ including the documentation.
 
 In HBase, documentation includes the following areas, and probably some others:
 
-* The link:http://hbase.apache.org/book.html[HBase Reference
+* The link:https://hbase.apache.org/book.html[HBase Reference
   Guide] (this book)
-* The link:http://hbase.apache.org/[HBase website]
+* The link:https://hbase.apache.org/[HBase website]
 * API documentation
 * Command-line utility output and help text
 * Web UI strings, explicit help text, context-sensitive strings, and others
@@ -119,14 +119,14 @@ JIRA and add a version number to the name of the new 
patch.
 
 === Editing the HBase Website
 
-The source for the HBase website is in the HBase source, in the 
_src/main/site/_ directory.
+The source for the HBase we

[4/6] hbase git commit: updating docs from master

2017-11-30 Thread ndimiduk
http://git-wip-us.apache.org/repos/asf/hbase/blob/2e9a55be/src/main/asciidoc/_chapters/backup_restore.adoc
--
diff --git a/src/main/asciidoc/_chapters/backup_restore.adoc 
b/src/main/asciidoc/_chapters/backup_restore.adoc
new file mode 100644
index 000..a9dbcf5
--- /dev/null
+++ b/src/main/asciidoc/_chapters/backup_restore.adoc
@@ -0,0 +1,912 @@
+
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+[[casestudies]]
+= Backup and Restore
+:doctype: book
+:numbered:
+:toc: left
+:icons: font
+:experimental:
+
+[[br.overview]]
+== Overview
+
+Backup and restore is a standard operation provided by many databases. An 
effective backup and restore
+strategy helps ensure that users can recover data in case of unexpected 
failures. The HBase backup and restore
+feature helps ensure that enterprises using HBase as a canonical data 
repository can recover from catastrophic
+failures. Another important feature is the ability to restore the database to 
a particular
+point-in-time, commonly referred to as a snapshot.
+
+The HBase backup and restore feature provides the ability to create full 
backups and incremental backups on
+tables in an HBase cluster. The full backup is the foundation on which 
incremental backups are applied
+to build iterative snapshots. Incremental backups can be run on a schedule to 
capture changes over time,
+for example by using a Cron task. Incremental backups are more cost-effective 
than full backups because they only capture
+the changes since the last backup and they also enable administrators to 
restore the database to any prior incremental backup. Furthermore, the
+utilities also enable table-level data backup-and-recovery if you do not want 
to restore the entire dataset
+of the backup.
+
+The backup and restore feature supplements the HBase Replication feature. 
While HBase replication is ideal for
+creating "hot" copies of the data (where the replicated data is immediately 
available for query), the backup and
+restore feature is ideal for creating "cold" copies of data (where a manual 
step must be taken to restore the system).
+Previously, users only had the ability to create full backups via the 
ExportSnapshot functionality. The incremental
+backup implementation is the novel improvement over the previous "art" 
provided by ExportSnapshot.
+
+[[br.terminology]]
+== Terminology
+
+The backup and restore feature introduces new terminology which can be used to 
understand how control flows through the
+system.
+
+* _A backup_: A logical unit of data and metadata which can restore a table to 
its state at a specific point in time.
+* _Full backup_: a type of backup which wholly encapsulates the contents of 
the table at a point in time.
+* _Incremental backup_: a type of backup which contains the changes in a table 
since a full backup.
+* _Backup set_: A user-defined name which references one or more tables over 
which a backup can be executed.
+* _Backup ID_: A unique names which identifies one backup from the rest, e.g. 
`backupId_1467823988425`
+
+[[br.planning]]
+== Planning
+
+There are some common strategies which can be used to implement backup and 
restore in your environment. The following section
+shows how these strategies are implemented and identifies potential tradeoffs 
with each.
+
+WARNING: This backup and restore tools has not been tested on Transparent Data 
Encryption (TDE) enabled HDFS clusters.
+This is related to the open issue 
link:https://issues.apache.org/jira/browse/HBASE-16178[HBASE-16178].
+
+[[br.intracluster.backup]]
+=== Backup within a cluster
+
+This strategy stores the backups on the same cluster as where the backup was 
taken. This approach is only appropriate for testing
+as it does not provide any additional safety on top of what the software 
itself already provides.
+
+.Intra-Cluster Backup
+image::backup-intra-cluster.png[]
+
+[[br.dedicated.cluster.backup]]
+=== Backup using a dedicated cluster
+
+This strategy provides greater fault tolerance and provides a path towards 
disaster recovery. In this setting, you will
+store the backup on a separate HDFS cluster by supplying the backup 
destinat

[3/6] hbase git commit: updating docs from master

2017-11-30 Thread ndimiduk
http://git-wip-us.apache.org/repos/asf/hbase/blob/2e9a55be/src/main/asciidoc/_chapters/developer.adoc
--
diff --git a/src/main/asciidoc/_chapters/developer.adoc 
b/src/main/asciidoc/_chapters/developer.adoc
index 6a546fb..0ada9a6 100644
--- a/src/main/asciidoc/_chapters/developer.adoc
+++ b/src/main/asciidoc/_chapters/developer.adoc
@@ -46,7 +46,7 @@ As Apache HBase is an Apache Software Foundation project, see 
<>
 === Mailing Lists
 
 Sign up for the dev-list and the user-list.
-See the link:http://hbase.apache.org/mail-lists.html[mailing lists] page.
+See the link:https://hbase.apache.org/mail-lists.html[mailing lists] page.
 Posing questions - and helping to answer other people's questions - is 
encouraged! There are varying levels of experience on both lists so patience 
and politeness are encouraged (and please stay on topic.)
 
 [[slack]]
@@ -64,7 +64,7 @@ FreeNode offers a web-based client, but most people prefer a 
native client, and
 
 === Jira
 
-Check for existing issues in 
link:https://issues.apache.org/jira/browse/HBASE[Jira].
+Check for existing issues in 
link:https://issues.apache.org/jira/projects/HBASE/issues[Jira].
 If it's either a new feature request, enhancement, or a bug, file a ticket.
 
 We track multiple types of work in JIRA:
@@ -173,8 +173,8 @@ GIT is our repository of record for all but the Apache 
HBase website.
 We used to be on SVN.
 We migrated.
 See link:https://issues.apache.org/jira/browse/INFRA-7768[Migrate Apache HBase 
SVN Repos to Git].
-See link:http://hbase.apache.org/source-repository.html[Source Code
-Management] page for contributor and committer links or search 
for HBase on the link:http://git.apache.org/[Apache Git] page.
+See link:https://hbase.apache.org/source-repository.html[Source Code
+Management] page for contributor and committer links or search 
for HBase on the link:https://git.apache.org/[Apache Git] page.
 
 == IDEs
 
@@ -479,8 +479,7 @@ mvn -DskipTests package assembly:single deploy
 
 If you see `Unable to find resource 'VM_global_library.vm'`, ignore it.
 It's not an error.
-It is link:http://jira.codehaus.org/browse/MSITE-286[officially
-ugly] though.
+It is link:https://issues.apache.org/jira/browse/MSITE-286[officially ugly] 
though.
 
 [[releasing]]
 == Releasing Apache HBase
@@ -540,35 +539,30 @@ For the build to sign them for you, you a properly 
configured _settings.xml_ in
 
 [[maven.release]]
 === Making a Release Candidate
-
-NOTE: These instructions are for building HBase 1.y.z
-
-.Point Releases
-If you are making a point release (for example to quickly address a critical 
incompatibility or security problem) off of a release branch instead of a 
development branch, the tagging instructions are slightly different.
-I'll prefix those special steps with _Point Release Only_.
+Only committers may make releases of hbase artifacts.
 
 .Before You Begin
-Before you make a release candidate, do a practice run by deploying a snapshot.
-Before you start, check to be sure recent builds have been passing for the 
branch from where you are going to take your release.
-You should also have tried recent branch tips out on a cluster under load, 
perhaps by running the `hbase-it` integration test suite for a few hours to 
'burn in' the near-candidate bits.
-
-.Point Release Only
+Make sure your environment is properly set up. Maven and Git are the main 
tooling
+used in the below. You'll need a properly configured _settings.xml_ file in 
your
+local _~/.m2_ maven repository with logins for apache repos (See 
<>).
+You will also need to have a published signing key. Browse the Hadoop
+link:http://wiki.apache.org/hadoop/HowToRelease[How To Release] wiki page on
+how to release. It is a model for most of the instructions below. It often has 
more
+detail on particular steps, for example, on adding your code signing key to the
+project KEYS file up in Apache or on how to update JIRA in preparation for 
release.
+
+Before you make a release candidate, do a practice run by deploying a SNAPSHOT.
+Check to be sure recent builds have been passing for the branch from where you
+are going to take your release. You should also have tried recent branch tips
+out on a cluster under load, perhaps by running the `hbase-it` integration test
+suite for a few hours to 'burn in' the near-candidate bits.
+
+
+.Specifying the Heap Space for Maven
 [NOTE]
 
-At this point you should tag the previous release branch (ex: 0.96.1) with the 
new point release tag (e.g.
-0.96.1.1 tag). Any commits with changes for the point release should go 
against the new tag.
-
-
-The Hadoop link:http://wiki.apache.org/hadoop/HowToRelease[How To
-Release] wiki page is used as a model for most of the 
instructions below.
-Although it now stale, it may have more detail on 
particular sections, so
-it 

[1/6] hbase git commit: bump version to 1.1.13

2017-11-30 Thread ndimiduk
Repository: hbase
Updated Branches:
  refs/heads/branch-1.1 926021447 -> c64bf8a9f


bump version to 1.1.13


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b6ff3743
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b6ff3743
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b6ff3743

Branch: refs/heads/branch-1.1
Commit: b6ff374346da142bdde46dd89498e47ba28dd465
Parents: 9260214
Author: Nick Dimiduk 
Authored: Thu Nov 30 19:34:00 2017 -0800
Committer: Nick Dimiduk 
Committed: Thu Nov 30 19:34:00 2017 -0800

--
 hbase-annotations/pom.xml  | 2 +-
 hbase-assembly/pom.xml | 2 +-
 hbase-checkstyle/pom.xml   | 4 ++--
 hbase-client/pom.xml   | 2 +-
 hbase-common/pom.xml   | 2 +-
 hbase-examples/pom.xml | 2 +-
 hbase-hadoop-compat/pom.xml| 2 +-
 hbase-hadoop2-compat/pom.xml   | 2 +-
 hbase-it/pom.xml   | 2 +-
 hbase-prefix-tree/pom.xml  | 2 +-
 hbase-procedure/pom.xml| 2 +-
 hbase-protocol/pom.xml | 2 +-
 hbase-resource-bundle/pom.xml  | 2 +-
 hbase-rest/pom.xml | 2 +-
 hbase-server/pom.xml   | 2 +-
 hbase-shaded/hbase-shaded-check-invariants/pom.xml | 2 +-
 hbase-shaded/hbase-shaded-client/pom.xml   | 2 +-
 hbase-shaded/hbase-shaded-server/pom.xml   | 2 +-
 hbase-shaded/pom.xml   | 2 +-
 hbase-shell/pom.xml| 2 +-
 hbase-testing-util/pom.xml | 2 +-
 hbase-thrift/pom.xml   | 2 +-
 pom.xml| 2 +-
 23 files changed, 24 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b6ff3743/hbase-annotations/pom.xml
--
diff --git a/hbase-annotations/pom.xml b/hbase-annotations/pom.xml
index 0aa992a..75c9edb 100644
--- a/hbase-annotations/pom.xml
+++ b/hbase-annotations/pom.xml
@@ -23,7 +23,7 @@
   
 hbase
 org.apache.hbase
-1.1.13-SNAPSHOT
+1.1.13
 ..
   
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b6ff3743/hbase-assembly/pom.xml
--
diff --git a/hbase-assembly/pom.xml b/hbase-assembly/pom.xml
index 024fcaf..09e1c1b 100644
--- a/hbase-assembly/pom.xml
+++ b/hbase-assembly/pom.xml
@@ -23,7 +23,7 @@
   
 hbase
 org.apache.hbase
-1.1.13-SNAPSHOT
+1.1.13
 ..
   
   hbase-assembly

http://git-wip-us.apache.org/repos/asf/hbase/blob/b6ff3743/hbase-checkstyle/pom.xml
--
diff --git a/hbase-checkstyle/pom.xml b/hbase-checkstyle/pom.xml
index 87b2308..db01952 100644
--- a/hbase-checkstyle/pom.xml
+++ b/hbase-checkstyle/pom.xml
@@ -22,14 +22,14 @@
 4.0.0
 org.apache.hbase
 hbase-checkstyle
-1.1.13-SNAPSHOT
+1.1.13
 Apache HBase - Checkstyle
 Module to hold Checkstyle properties for HBase.
 
   
 hbase
 org.apache.hbase
-1.1.13-SNAPSHOT
+1.1.13
 ..
   
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b6ff3743/hbase-client/pom.xml
--
diff --git a/hbase-client/pom.xml b/hbase-client/pom.xml
index 06606cf..47a2e5e 100644
--- a/hbase-client/pom.xml
+++ b/hbase-client/pom.xml
@@ -23,7 +23,7 @@
   
 hbase
 org.apache.hbase
-1.1.13-SNAPSHOT
+1.1.13
 ..
   
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b6ff3743/hbase-common/pom.xml
--
diff --git a/hbase-common/pom.xml b/hbase-common/pom.xml
index 7b7d211..786f50b 100644
--- a/hbase-common/pom.xml
+++ b/hbase-common/pom.xml
@@ -23,7 +23,7 @@
   
 hbase
 org.apache.hbase
-1.1.13-SNAPSHOT
+1.1.13
 ..
   
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b6ff3743/hbase-examples/pom.xml
--
diff --git a/hbase-examples/pom.xml b/hbase-examples/pom.xml
index af814d8..fecbd68 100644
--- a/hbase-examples/pom.xml
+++ b/hbase-examples/pom.xml
@@ -23,7 +23,7 @@
   
 hbase
 org.apache.hbase
-1.1.13-SNAPSHOT
+1.1.13
 ..
   
   hbase-examples

http://git-wip-us.apache.org/repos/asf/hbase/blob/b6ff3743/hbase-hadoop-compat/pom.xml
--
diff --git a/hbase-hadoop-compat/pom.xml b/hbase-hadoop-compat/pom.xml
ind

[6/6] hbase git commit: update changes for 1.1.13

2017-11-30 Thread ndimiduk
update changes for 1.1.13


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c64bf8a9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c64bf8a9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c64bf8a9

Branch: refs/heads/branch-1.1
Commit: c64bf8a9f35352cd504f2b8f4b02f9148cf45ab6
Parents: 2e9a55b
Author: Nick Dimiduk 
Authored: Thu Nov 30 20:41:12 2017 -0800
Committer: Nick Dimiduk 
Committed: Thu Nov 30 20:41:12 2017 -0800

--
 CHANGES.txt | 91 
 1 file changed, 91 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c64bf8a9/CHANGES.txt
--
diff --git a/CHANGES.txt b/CHANGES.txt
index d3f9013..bd11e47 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -3,6 +3,97 @@ HBase Change Log
 
 
 
+
+Release Notes - HBase - Version 1.1.13 11/30/2017
+
+** Sub-task
+* [HBASE-18867] - maven enforcer plugin needs update to work with jdk9
+* [HBASE-18957] - add test that confirms 2 FamilyFilters in a FilterList 
using MUST_PASS_ONE operator will return results that match either of the 
FamilyFilters and revert as needed to make it pass.
+* [HBASE-18980] - Address issues found by error-prone in 
hbase-hadoop2-compat
+* [HBASE-19070] - temporarily make the mvnsite nightly test non-voting.
+
+
+
+
+
+
+
+** Bug
+* [HBASE-14745] - Shade the last few dependencies in hbase-shaded-client
+* [HBASE-18125] - HBase shell disregards spaces at the end of a split key 
in a split file
+* [HBASE-18438] - Precommit doesn't warn about unused imports
+* [HBASE-18505] - Our build/yetus personality will run tests on individual 
modules and then on all (i.e. 'root'). Should do one or other
+* [HBASE-18577] - shaded client includes several non-relocated third party 
dependencies
+* [HBASE-18665] - ReversedScannerCallable invokes getRegionLocations 
incorrectly
+* [HBASE-18679] - YARN may null Counters object and cause an NPE in ITBLL
+* [HBASE-18818] - TestConnectionImplemenation fails
+* [HBASE-18934] - precommit on branch-1 isn't supposed to run against 
hadoop 3
+* [HBASE-18940] - branch-2 (and probably others) fail check of generated 
source artifact
+* [HBASE-18998] - processor.getRowsToLock() always assumes there is some 
row being locked
+* [HBASE-19020] - TestXmlParsing exception checking relies on a particular 
xml implementation without declaring it.
+* [HBASE-19030] - nightly runs should attempt to log test results after 
archiving
+* [HBASE-19038] - precommit mvn install should run from root on patch
+* [HBASE-19039] - refactor shadedjars test to only run on java changes.
+* [HBASE-19055] - Backport HBASE-19042 to other active branches
+* [HBASE-19058] - The wget isn't installed in building docker image
+* [HBASE-19060] - "Hadoop check" test is running all the time instead of 
just when changes to java
+* [HBASE-19061] - enforcer NPE on hbase-shaded-invariants
+* [HBASE-19066] - Correct the directory of openjdk-8 for jenkins
+* [HBASE-19124] - Move HBase-Nightly source artifact creation test from 
JenkinsFile to a script in dev-support
+* [HBASE-19137] - Nightly test should make junit reports optional rather 
than attempt archive after reporting.
+* [HBASE-19184] - clean up nightly source artifact test to match 
expectations from switch to git-archive
+* [HBASE-19223] - Remove references to Date Tiered compaction from 
branch-1.2 and branch-1.1 ref guide
+* [HBASE-19229] - Nightly script to check source artifact should not do a 
destructive git operation without opt-in
+* [HBASE-19249] - test for "hbase antipatterns" should check _count_ of 
occurance rather than text of
+* [HBASE-19393] - HTTP 413 FULL head while accessing HBase UI using SSL. 
+
+
+
+
+
+
+
+
+
+
+** Improvement
+* [HBASE-18631] - Allow configuration of ChaosMonkey properties via 
hbase-site
+* [HBASE-18675] - Making {max,min}SessionTimeout configurable for 
MiniZooKeeperCluster
+* [HBASE-19052] - FixedFileTrailer should recognize CellComparatorImpl 
class in branch-1.x
+* [HBASE-19140] - hbase-cleanup.sh uses deprecated call to remove files in 
hdfs
+* [HBASE-19227] - Nightly jobs should archive JVM dumpstream files
+* [HBASE-19228] - nightly job should gather machine stats.
+
+
+
+** New Feature
+* [HBASE-19189] - Ad-hoc test job for running a subset of tests lots of 
times
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+** Task
+* [HBASE-16459] - Remove unused hbase shell --format option
+* [HBASE-18833] - Ensure precommit personality is up to date on all active 
branches
+* [HBASE-19097] - update testing to use Apache Yetus Test Patch version 
0.6.0
+
+
+
+
+

[2/6] hbase git commit: updating docs from master

2017-11-30 Thread ndimiduk
http://git-wip-us.apache.org/repos/asf/hbase/blob/2e9a55be/src/main/asciidoc/_chapters/performance.adoc
--
diff --git a/src/main/asciidoc/_chapters/performance.adoc 
b/src/main/asciidoc/_chapters/performance.adoc
index 114754f..c917646 100644
--- a/src/main/asciidoc/_chapters/performance.adoc
+++ b/src/main/asciidoc/_chapters/performance.adoc
@@ -320,7 +320,7 @@ See also <> for compression 
caveats.
 [[schema.regionsize]]
 === Table RegionSize
 
-The regionsize can be set on a per-table basis via `setFileSize` on 
link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/HTableDescriptor.html[HTableDescriptor]
 in the event where certain tables require different regionsizes than the 
configured default regionsize.
+The regionsize can be set on a per-table basis via `setFileSize` on 
link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/HTableDescriptor.html[HTableDescriptor]
 in the event where certain tables require different regionsizes than the 
configured default regionsize.
 
 See <> for more information.
 
@@ -372,7 +372,7 @@ Bloom filters are enabled on a Column Family.
 You can do this by using the setBloomFilterType method of HColumnDescriptor or 
using the HBase API.
 Valid values are `NONE`, `ROW` (default), or `ROWCOL`.
 See <> for more information on `ROW` versus `ROWCOL`.
-See also the API documentation for 
link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/HColumnDescriptor.html[HColumnDescriptor].
+See also the API documentation for 
link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/HColumnDescriptor.html[HColumnDescriptor].
 
 The following example creates a table and enables a ROWCOL Bloom filter on the 
`colfam1` column family.
 
@@ -431,7 +431,7 @@ The blocksize can be configured for each ColumnFamily in a 
table, and defaults t
 Larger cell values require larger blocksizes.
 There is an inverse relationship between blocksize and the resulting StoreFile 
indexes (i.e., if the blocksize is doubled then the resulting indexes should be 
roughly halved).
 
-See 
link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/HColumnDescriptor.html[HColumnDescriptor]
 and <>for more information.
+See 
link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/HColumnDescriptor.html[HColumnDescriptor]
 and <>for more information.
 
 [[cf.in.memory]]
 === In-Memory ColumnFamilies
@@ -440,7 +440,7 @@ ColumnFamilies can optionally be defined as in-memory.
 Data is still persisted to disk, just like any other ColumnFamily.
 In-memory blocks have the highest priority in the <>, but it is 
not a guarantee that the entire table will be in memory.
 
-See 
link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/HColumnDescriptor.html[HColumnDescriptor]
 for more information.
+See 
link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/HColumnDescriptor.html[HColumnDescriptor]
 for more information.
 
 [[perf.compression]]
 === Compression
@@ -549,19 +549,9 @@ If deferred log flush is used, WAL edits are kept in 
memory until the flush peri
 The benefit is aggregated and asynchronous `WAL`- writes, but the potential 
downside is that if the RegionServer goes down the yet-to-be-flushed edits are 
lost.
 This is safer, however, than not using WAL at all with Puts.
 
-Deferred log flush can be configured on tables via 
link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/HTableDescriptor.html[HTableDescriptor].
+Deferred log flush can be configured on tables via 
link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/HTableDescriptor.html[HTableDescriptor].
 The default value of `hbase.regionserver.optionallogflushinterval` is 1000ms.
 
-[[perf.hbase.client.autoflush]]
-=== HBase Client: AutoFlush
-
-When performing a lot of Puts, make sure that setAutoFlush is set to false on 
your 
link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Table.html[Table]
 instance.
-Otherwise, the Puts will be sent one at a time to the RegionServer.
-Puts added via `table.add(Put)` and `table.add(  Put)` wind up in the 
same write buffer.
-If `autoFlush = false`, these messages are not sent until the write-buffer is 
filled.
-To explicitly flush the messages, call `flushCommits`.
-Calling `close` on the `Table` instance will invoke `flushCommits`.
-
 [[perf.hbase.client.putwal]]
 === HBase Client: Turn off WAL on Puts
 
@@ -584,7 +574,7 @@ There is a utility `HTableUtil` currently on MASTER that 
does this, but you can
 [[perf.hbase.write.mr.reducer]]
 === MapReduce: Skip The Reducer
 
-When writing a lot of data to an HBase table from a MR job (e.g., with 
link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.html[TableOutputFormat]),
 and specifically where Puts are being emitted from the Mapper, skip the 
Reducer step.
+When writing a lot of data to an HBase table from a MR job (e.g., with 
link:https://hbase.apache.org/apidocs/org/apache/hadoo

hbase git commit: HBASE-19344 improve asyncWAL by using Independent thread for netty #IO in FanOutOneBlockAsyncDFSOutput

2017-11-30 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/branch-2 2bda22a64 -> df3668818


HBASE-19344 improve asyncWAL by using Independent thread for netty #IO in 
FanOutOneBlockAsyncDFSOutput


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/df366881
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/df366881
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/df366881

Branch: refs/heads/branch-2
Commit: df3668818de6b5d78f7e22911186909ad6aaf113
Parents: 2bda22a
Author: zhangduo 
Authored: Thu Nov 30 22:02:10 2017 +0800
Committer: zhangduo 
Committed: Fri Dec 1 11:19:09 2017 +0800

--
 .../hbase/regionserver/wal/AbstractFSWAL.java   |   6 +-
 .../hbase/regionserver/wal/AsyncFSWAL.java  | 252 ++-
 .../wal/AsyncProtobufLogWriter.java |  26 +-
 .../hbase/regionserver/wal/FSWALEntry.java  |  14 --
 .../hbase/regionserver/wal/RingBufferTruck.java |   6 +-
 .../wal/SecureAsyncProtobufLogWriter.java   |  11 +-
 .../hadoop/hbase/wal/AsyncFSWALProvider.java|  17 +-
 .../hbase/regionserver/wal/TestAsyncFSWAL.java  |   4 +-
 .../regionserver/wal/TestAsyncWALReplay.java|   2 +-
 9 files changed, 168 insertions(+), 170 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/df366881/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
index 64f44cd..534315e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
@@ -969,11 +969,7 @@ public abstract class AbstractFSWAL 
implements WAL {
 try (TraceScope scope = TraceUtil.createTrace(implClassName + ".append")) {
   FSWALEntry entry = new FSWALEntry(txid, key, edits, hri, inMemstore);
   entry.stampRegionSequenceId(we);
-  if (scope != null) {
-ringBuffer.get(txid).load(entry, scope.getSpan());
-  } else {
-ringBuffer.get(txid).load(entry, null);
-  }
+  ringBuffer.get(txid).load(entry);
 } finally {
   ringBuffer.publish(txid);
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/df366881/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
index 9aad2bc..18007aa 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
@@ -19,6 +19,10 @@ package org.apache.hadoop.hbase.regionserver.wal;
 
 import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.shouldRetryCreate;
 
+import com.lmax.disruptor.RingBuffer;
+import com.lmax.disruptor.Sequence;
+import com.lmax.disruptor.Sequencer;
+
 import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.lang.reflect.Field;
@@ -32,6 +36,9 @@ import java.util.SortedSet;
 import java.util.TreeSet;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.locks.Condition;
 import java.util.concurrent.locks.Lock;
@@ -44,28 +51,25 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
-import org.apache.hadoop.hbase.trace.TraceUtil;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.client.ConnectionUtils;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.io.asyncfs.AsyncFSOutput;
 import 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.NameNodeException;
+import org.apache.hadoop.hbase.trace.TraceUtil;
 import org.apache.hadoop.hbase.wal.AsyncFSWALProvider;
 import org.apache.hadoop.hbase.wal.WALEdit;
 import org.apache.hadoop.hbase.wal.WALKey;
 import org.apache.hadoop.hbase.wal.WALProvider.AsyncWriter;
-import 
org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
-import org.apache.hadoop.hbase.shaded.io.netty.channel.Channel;
-import org.apac

hbase git commit: HBASE-19344 improve asyncWAL by using Independent thread for netty #IO in FanOutOneBlockAsyncDFSOutput

2017-11-30 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/master cc3f804b0 -> 49a9fe488


HBASE-19344 improve asyncWAL by using Independent thread for netty #IO in 
FanOutOneBlockAsyncDFSOutput


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/49a9fe48
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/49a9fe48
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/49a9fe48

Branch: refs/heads/master
Commit: 49a9fe48830cb0ef0ae9eef2de305420c08d09ab
Parents: cc3f804
Author: zhangduo 
Authored: Thu Nov 30 22:02:10 2017 +0800
Committer: zhangduo 
Committed: Fri Dec 1 11:19:03 2017 +0800

--
 .../hbase/regionserver/wal/AbstractFSWAL.java   |   6 +-
 .../hbase/regionserver/wal/AsyncFSWAL.java  | 252 ++-
 .../wal/AsyncProtobufLogWriter.java |  26 +-
 .../hbase/regionserver/wal/FSWALEntry.java  |  14 --
 .../hbase/regionserver/wal/RingBufferTruck.java |   6 +-
 .../wal/SecureAsyncProtobufLogWriter.java   |  11 +-
 .../hadoop/hbase/wal/AsyncFSWALProvider.java|  17 +-
 .../hbase/regionserver/wal/TestAsyncFSWAL.java  |   4 +-
 .../regionserver/wal/TestAsyncWALReplay.java|   2 +-
 9 files changed, 168 insertions(+), 170 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/49a9fe48/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
index 64f44cd..534315e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
@@ -969,11 +969,7 @@ public abstract class AbstractFSWAL 
implements WAL {
 try (TraceScope scope = TraceUtil.createTrace(implClassName + ".append")) {
   FSWALEntry entry = new FSWALEntry(txid, key, edits, hri, inMemstore);
   entry.stampRegionSequenceId(we);
-  if (scope != null) {
-ringBuffer.get(txid).load(entry, scope.getSpan());
-  } else {
-ringBuffer.get(txid).load(entry, null);
-  }
+  ringBuffer.get(txid).load(entry);
 } finally {
   ringBuffer.publish(txid);
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/49a9fe48/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
index 9aad2bc..18007aa 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
@@ -19,6 +19,10 @@ package org.apache.hadoop.hbase.regionserver.wal;
 
 import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.shouldRetryCreate;
 
+import com.lmax.disruptor.RingBuffer;
+import com.lmax.disruptor.Sequence;
+import com.lmax.disruptor.Sequencer;
+
 import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.lang.reflect.Field;
@@ -32,6 +36,9 @@ import java.util.SortedSet;
 import java.util.TreeSet;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.locks.Condition;
 import java.util.concurrent.locks.Lock;
@@ -44,28 +51,25 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
-import org.apache.hadoop.hbase.trace.TraceUtil;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.client.ConnectionUtils;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.io.asyncfs.AsyncFSOutput;
 import 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.NameNodeException;
+import org.apache.hadoop.hbase.trace.TraceUtil;
 import org.apache.hadoop.hbase.wal.AsyncFSWALProvider;
 import org.apache.hadoop.hbase.wal.WALEdit;
 import org.apache.hadoop.hbase.wal.WALKey;
 import org.apache.hadoop.hbase.wal.WALProvider.AsyncWriter;
-import 
org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
-import org.apache.hadoop.hbase.shaded.io.netty.channel.Channel;
-import org.apache.h

hbase git commit: HBASE-18233 We shouldn't wait for readlock in doMiniBatchMutation in case of deadlock (Allan Yang)

2017-11-30 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-1 143ceb97b -> 2f7a6f21e


HBASE-18233 We shouldn't wait for readlock in doMiniBatchMutation in case of 
deadlock (Allan Yang)

This patch plus a sorting of the batch (HBASE-17924) fixes a regression
in Increment/CheckAndPut-style operations.

Signed-off-by: Yu Li 
Signed-off-by: Allan Yang 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2f7a6f21
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2f7a6f21
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2f7a6f21

Branch: refs/heads/branch-1
Commit: 2f7a6f21eb5938d7c2126d64282d03bef0eeff9b
Parents: 143ceb9
Author: Michael Stack 
Authored: Tue Nov 28 09:14:58 2017 -0800
Committer: Michael Stack 
Committed: Thu Nov 30 17:10:27 2017 -0800

--
 .../hadoop/hbase/regionserver/HRegion.java  | 100 +
 .../hadoop/hbase/client/TestMultiParallel.java  | 148 +++
 .../hbase/regionserver/TestAtomicOperation.java |   5 +-
 3 files changed, 225 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2f7a6f21/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 1b178f6..06f2990 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -2191,7 +2191,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
* Should the store be flushed because it is old enough.
* 
* Every FlushPolicy should call this to determine whether a store is old 
enough to flush(except
-   * that you always flush all stores). Otherwise the {@link #shouldFlush()} 
method will always
+   * that you always flush all stores). Otherwise the shouldFlush method will 
always
* returns true which will make a lot of flush requests.
*/
   boolean shouldFlushStore(Store store) {
@@ -3243,11 +3243,20 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   continue;
 }
 
+
+//HBASE-18233
 // If we haven't got any rows in our batch, we should block to
-// get the next one.
+// get the next one's read lock. We need at least one row to mutate.
+// If we have got rows, do not block when lock is not available,
+// so that we can fail fast and go on with the rows with locks in
+// the batch. By doing this, we can reduce contention and prevent
+// possible deadlocks.
+// The unfinished rows in the batch will be detected in batchMutate,
+// and it wil try to finish them by calling doMiniBatchMutation again.
+boolean shouldBlock = numReadyToWrite == 0;
 RowLock rowLock = null;
 try {
-  rowLock = getRowLockInternal(mutation.getRow(), true);
+  rowLock = getRowLockInternal(mutation.getRow(), true, shouldBlock);
 } catch (TimeoutIOException e) {
   // We will retry when other exceptions, but we should stop if we 
timeout .
   throw e;
@@ -3256,8 +3265,10 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 + Bytes.toStringBinary(mutation.getRow()), ioe);
 }
 if (rowLock == null) {
-  // We failed to grab another lock
-  break; // stop acquiring more rows for this batch
+  // We failed to grab another lock. Stop acquiring more rows for this
+  // batch and go on with the gotten ones
+  break;
+
 } else {
   acquiredRowLocks.add(rowLock);
 }
@@ -3356,7 +3367,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   checkAndPrepareMutation(cpMutation, isInReplay, cpFamilyMap, 
now);
 
   // Acquire row locks. If not, the whole batch will fail.
-  acquiredRowLocks.add(getRowLockInternal(cpMutation.getRow(), 
true));
+  acquiredRowLocks.add(getRowLockInternal(cpMutation.getRow(), 
true, true));
 
   // Returned mutations from coprocessor correspond to the 
Mutation at index i. We can
   // directly add the cells from those mutations to the familyMaps 
of this mutation.
@@ -3676,7 +3687,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   get.addColumn(family, qualifier);
   checkRow(row, "checkAndMutate");
   // Lock row - note that doBatchMutate will relock this row if called
-  RowLock rowLock = get

hbase git commit: HBASE-18233 We shouldn't wait for readlock in doMiniBatchMutation in case of deadlock (Allan Yang)

2017-11-30 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 21eb8ba6d -> 6c490625a


HBASE-18233 We shouldn't wait for readlock in doMiniBatchMutation in case of 
deadlock (Allan Yang)

This patch plus a sorting of the batch (HBASE-17924) fixes a regression
in Increment/CheckAndPut-style operations.

Signed-off-by: Yu Li 
Signed-off-by: Allan Yang 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6c490625
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6c490625
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6c490625

Branch: refs/heads/branch-1.4
Commit: 6c490625aa102ed33a99352b7b308e2a9c2f3c35
Parents: 21eb8ba
Author: Michael Stack 
Authored: Tue Nov 28 09:14:58 2017 -0800
Committer: Michael Stack 
Committed: Thu Nov 30 17:08:06 2017 -0800

--
 .../hadoop/hbase/regionserver/HRegion.java  | 100 +
 .../hadoop/hbase/client/TestMultiParallel.java  | 148 +++
 .../hbase/regionserver/TestAtomicOperation.java |   5 +-
 3 files changed, 225 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6c490625/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 1b178f6..06f2990 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -2191,7 +2191,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
* Should the store be flushed because it is old enough.
* 
* Every FlushPolicy should call this to determine whether a store is old 
enough to flush(except
-   * that you always flush all stores). Otherwise the {@link #shouldFlush()} 
method will always
+   * that you always flush all stores). Otherwise the shouldFlush method will 
always
* returns true which will make a lot of flush requests.
*/
   boolean shouldFlushStore(Store store) {
@@ -3243,11 +3243,20 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   continue;
 }
 
+
+//HBASE-18233
 // If we haven't got any rows in our batch, we should block to
-// get the next one.
+// get the next one's read lock. We need at least one row to mutate.
+// If we have got rows, do not block when lock is not available,
+// so that we can fail fast and go on with the rows with locks in
+// the batch. By doing this, we can reduce contention and prevent
+// possible deadlocks.
+// The unfinished rows in the batch will be detected in batchMutate,
+// and it wil try to finish them by calling doMiniBatchMutation again.
+boolean shouldBlock = numReadyToWrite == 0;
 RowLock rowLock = null;
 try {
-  rowLock = getRowLockInternal(mutation.getRow(), true);
+  rowLock = getRowLockInternal(mutation.getRow(), true, shouldBlock);
 } catch (TimeoutIOException e) {
   // We will retry when other exceptions, but we should stop if we 
timeout .
   throw e;
@@ -3256,8 +3265,10 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 + Bytes.toStringBinary(mutation.getRow()), ioe);
 }
 if (rowLock == null) {
-  // We failed to grab another lock
-  break; // stop acquiring more rows for this batch
+  // We failed to grab another lock. Stop acquiring more rows for this
+  // batch and go on with the gotten ones
+  break;
+
 } else {
   acquiredRowLocks.add(rowLock);
 }
@@ -3356,7 +3367,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   checkAndPrepareMutation(cpMutation, isInReplay, cpFamilyMap, 
now);
 
   // Acquire row locks. If not, the whole batch will fail.
-  acquiredRowLocks.add(getRowLockInternal(cpMutation.getRow(), 
true));
+  acquiredRowLocks.add(getRowLockInternal(cpMutation.getRow(), 
true, true));
 
   // Returned mutations from coprocessor correspond to the 
Mutation at index i. We can
   // directly add the cells from those mutations to the familyMaps 
of this mutation.
@@ -3676,7 +3687,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   get.addColumn(family, qualifier);
   checkRow(row, "checkAndMutate");
   // Lock row - note that doBatchMutate will relock this row if called
-  RowLock rowLock =

[2/3] hbase git commit: HBASE-19393 HTTP 413 FULL head while accessing HBase UI using SSL.

2017-11-30 Thread apurtell
HBASE-19393 HTTP 413 FULL head while accessing HBase UI using SSL.

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/891db9a8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/891db9a8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/891db9a8

Branch: refs/heads/branch-1.2
Commit: 891db9a8ae289ab7d2b2769d8c53a2960c31b4cc
Parents: d0c99f1
Author: Sergey Soldatov 
Authored: Thu Nov 30 15:46:38 2017 -0800
Committer: Andrew Purtell 
Committed: Thu Nov 30 17:07:24 2017 -0800

--
 .../src/main/java/org/apache/hadoop/hbase/http/HttpServer.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/891db9a8/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
index 667e597..0be4072 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
@@ -378,6 +378,7 @@ public class HttpServer implements FilterContainer {
   throw new HadoopIllegalArgumentException(
   "unknown scheme for endpoint:" + ep);
 }
+listener.setHeaderBufferSize(1024*64);
 listener.setHost(ep.getHost());
 listener.setPort(ep.getPort() == -1 ? 0 : ep.getPort());
 server.addManagedListener(listener);
@@ -604,7 +605,6 @@ public class HttpServer implements FilterContainer {
   // the same port with indeterminate routing of incoming requests to them
   ret.setReuseAddress(false);
 }
-ret.setHeaderBufferSize(1024*64);
 return ret;
   }
 



[1/3] hbase git commit: HBASE-19393 HTTP 413 FULL head while accessing HBase UI using SSL.

2017-11-30 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-1.1 540bf082a -> 926021447
  refs/heads/branch-1.2 d0c99f1dd -> 891db9a8a
  refs/heads/branch-1.3 7fde0fdcc -> 2685d0691


HBASE-19393 HTTP 413 FULL head while accessing HBase UI using SSL.

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/92602144
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/92602144
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/92602144

Branch: refs/heads/branch-1.1
Commit: 926021447f033c6211c8201ca8309dcc2c2f3c54
Parents: 540bf08
Author: Sergey Soldatov 
Authored: Thu Nov 30 15:46:38 2017 -0800
Committer: Andrew Purtell 
Committed: Thu Nov 30 17:07:18 2017 -0800

--
 .../src/main/java/org/apache/hadoop/hbase/http/HttpServer.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/92602144/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
index 1ffd515..393434b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
@@ -378,6 +378,7 @@ public class HttpServer implements FilterContainer {
   throw new HadoopIllegalArgumentException(
   "unknown scheme for endpoint:" + ep);
 }
+listener.setHeaderBufferSize(1024*64);
 listener.setHost(ep.getHost());
 listener.setPort(ep.getPort() == -1 ? 0 : ep.getPort());
 server.addManagedListener(listener);
@@ -600,7 +601,6 @@ public class HttpServer implements FilterContainer {
   // the same port with indeterminate routing of incoming requests to them
   ret.setReuseAddress(false);
 }
-ret.setHeaderBufferSize(1024*64);
 return ret;
   }
 



[3/3] hbase git commit: HBASE-19393 HTTP 413 FULL head while accessing HBase UI using SSL.

2017-11-30 Thread apurtell
HBASE-19393 HTTP 413 FULL head while accessing HBase UI using SSL.

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2685d069
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2685d069
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2685d069

Branch: refs/heads/branch-1.3
Commit: 2685d06911d8bf0ef30ae172376ffb460d918619
Parents: 7fde0fd
Author: Sergey Soldatov 
Authored: Thu Nov 30 15:46:38 2017 -0800
Committer: Andrew Purtell 
Committed: Thu Nov 30 17:07:27 2017 -0800

--
 .../src/main/java/org/apache/hadoop/hbase/http/HttpServer.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2685d069/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
index 667e597..0be4072 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
@@ -378,6 +378,7 @@ public class HttpServer implements FilterContainer {
   throw new HadoopIllegalArgumentException(
   "unknown scheme for endpoint:" + ep);
 }
+listener.setHeaderBufferSize(1024*64);
 listener.setHost(ep.getHost());
 listener.setPort(ep.getPort() == -1 ? 0 : ep.getPort());
 server.addManagedListener(listener);
@@ -604,7 +605,6 @@ public class HttpServer implements FilterContainer {
   // the same port with indeterminate routing of incoming requests to them
   ret.setReuseAddress(false);
 }
-ret.setHeaderBufferSize(1024*64);
 return ret;
   }
 



[2/2] hbase git commit: HBASE-19393 HTTP 413 FULL head while accessing HBase UI using SSL.

2017-11-30 Thread apurtell
HBASE-19393 HTTP 413 FULL head while accessing HBase UI using SSL.

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/21eb8ba6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/21eb8ba6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/21eb8ba6

Branch: refs/heads/branch-1.4
Commit: 21eb8ba6dd6c20c5e9d92ae0cec1e15243f2f4ab
Parents: f8e6a56
Author: Sergey Soldatov 
Authored: Thu Nov 30 15:46:38 2017 -0800
Committer: Andrew Purtell 
Committed: Thu Nov 30 17:03:43 2017 -0800

--
 .../src/main/java/org/apache/hadoop/hbase/http/HttpServer.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/21eb8ba6/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
index 70b5242..c630dc0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
@@ -412,6 +412,7 @@ public class HttpServer implements FilterContainer {
   throw new HadoopIllegalArgumentException(
   "unknown scheme for endpoint:" + ep);
 }
+listener.setHeaderBufferSize(1024*64);
 listener.setHost(ep.getHost());
 listener.setPort(ep.getPort() == -1 ? 0 : ep.getPort());
 server.addManagedListener(listener);
@@ -638,7 +639,6 @@ public class HttpServer implements FilterContainer {
   // the same port with indeterminate routing of incoming requests to them
   ret.setReuseAddress(false);
 }
-ret.setHeaderBufferSize(1024*64);
 return ret;
   }
 



[1/2] hbase git commit: HBASE-19393 HTTP 413 FULL head while accessing HBase UI using SSL.

2017-11-30 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-1 a4b9ac605 -> 143ceb97b
  refs/heads/branch-1.4 f8e6a56e1 -> 21eb8ba6d


HBASE-19393 HTTP 413 FULL head while accessing HBase UI using SSL.

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/143ceb97
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/143ceb97
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/143ceb97

Branch: refs/heads/branch-1
Commit: 143ceb97ba2af808ffc00cf80f837ff918ede23b
Parents: a4b9ac6
Author: Sergey Soldatov 
Authored: Thu Nov 30 15:46:38 2017 -0800
Committer: Andrew Purtell 
Committed: Thu Nov 30 17:03:32 2017 -0800

--
 .../src/main/java/org/apache/hadoop/hbase/http/HttpServer.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/143ceb97/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
index 70b5242..c630dc0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
@@ -412,6 +412,7 @@ public class HttpServer implements FilterContainer {
   throw new HadoopIllegalArgumentException(
   "unknown scheme for endpoint:" + ep);
 }
+listener.setHeaderBufferSize(1024*64);
 listener.setHost(ep.getHost());
 listener.setPort(ep.getPort() == -1 ? 0 : ep.getPort());
 server.addManagedListener(listener);
@@ -638,7 +639,6 @@ public class HttpServer implements FilterContainer {
   // the same port with indeterminate routing of incoming requests to them
   ret.setReuseAddress(false);
 }
-ret.setHeaderBufferSize(1024*64);
 return ret;
   }
 



hbase git commit: HBASE-19326 Remove decommissioned servers from rsgroup

2017-11-30 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master c64546aa3 -> cc3f804b0


HBASE-19326 Remove decommissioned servers from rsgroup

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/cc3f804b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/cc3f804b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/cc3f804b

Branch: refs/heads/master
Commit: cc3f804b07213f5e60e6ce775d7b4795eada448a
Parents: c64546a
Author: Guangxu Cheng 
Authored: Fri Dec 1 03:48:29 2017 +0800
Committer: Michael Stack 
Committed: Thu Nov 30 16:10:28 2017 -0800

--
 .../hadoop/hbase/rsgroup/RSGroupAdmin.java  |  10 ++
 .../hbase/rsgroup/RSGroupAdminClient.java   |  20 
 .../hbase/rsgroup/RSGroupAdminEndpoint.java |  36 ++
 .../hbase/rsgroup/RSGroupAdminServer.java   |  52 +
 .../hbase/rsgroup/RSGroupInfoManager.java   |   6 +
 .../hbase/rsgroup/RSGroupInfoManagerImpl.java   |  26 +
 .../src/main/protobuf/RSGroupAdmin.proto|  10 ++
 .../hadoop/hbase/rsgroup/TestRSGroups.java  |   6 +-
 .../hadoop/hbase/rsgroup/TestRSGroupsBase.java  | 112 ++-
 .../rsgroup/VerifyingRSGroupAdminClient.java|   6 +
 .../hbase/coprocessor/MasterObserver.java   |  18 +++
 .../hbase/master/MasterCoprocessorHost.java |  24 
 .../hbase/security/access/AccessController.java |   6 +
 .../src/main/ruby/hbase/rsgroup_admin.rb|  12 ++
 hbase-shell/src/main/ruby/shell.rb  |   1 +
 .../shell/commands/remove_servers_rsgroup.rb|  35 ++
 16 files changed, 376 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/cc3f804b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdmin.java
--
diff --git 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdmin.java 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdmin.java
index 5f38d39..453ef54 100644
--- 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdmin.java
+++ 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdmin.java
@@ -88,4 +88,14 @@ public interface RSGroupAdmin {
*/
   void moveServersAndTables(Set servers, Set tables,
 String targetGroup) throws IOException;
+
+  /**
+   * Remove decommissioned servers from rsgroup.
+   * 1. Sometimes we may find the server aborted due to some hardware failure 
and we must offline
+   * the server for repairing. Or we need to move some servers to join other 
clusters.
+   * So we need to remove these servers from the rsgroup.
+   * 2. Dead/recovering/live servers will be disallowed.
+   * @param servers set of servers to remove
+   */
+  void removeServers(Set servers) throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/cc3f804b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java
--
diff --git 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java
 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java
index 9949704..be83a7b 100644
--- 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java
+++ 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java
@@ -42,6 +42,7 @@ import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServers
 import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RSGroupAdminService;
 import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest;
 import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos;
 
 import org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
@@ -205,4 +206,23 @@ class RSGroupAdminClient implements RSGroupAdmin {
   throw ProtobufUtil.handleRemoteException(e);
 }
   }
+
+  @Override
+  public void removeServers(Set servers) throws IOException {
+Set hostPorts = Sets.newHashSet();
+for(Address el: servers) {
+  hostPorts.add(HBaseProtos.ServerName.newBuilder()
+  .setHostName(el.getHostname())
+  .setPort(el.getPort())
+  .build());
+}
+RemoveServersRequest request = RemoveServersRequest.newBuilder()
+.addAllServers(hostPorts)
+.build();
+try {
+  stub.removeServers(null, request);
+} catch (ServiceException e) {
+  throw ProtobufUtil.handleRemoteException(e);
+}
+  }
 }

hbase git commit: HBASE-19388 - Incorrect value is being set for Compaction Pressure in RegionLoadStats object inside HRegion class

2017-11-30 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-1 4846300c8 -> a4b9ac605


HBASE-19388 - Incorrect value is being set for Compaction Pressure in 
RegionLoadStats object inside HRegion class

Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a4b9ac60
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a4b9ac60
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a4b9ac60

Branch: refs/heads/branch-1
Commit: a4b9ac6050b620b2bde8e486d29fcc1c1031c9b3
Parents: 4846300
Author: Harshal Deepakkumar Jain 

Authored: Thu Nov 30 18:08:01 2017 +0530
Committer: Andrew Purtell 
Committed: Thu Nov 30 15:07:45 2017 -0800

--
 .../main/java/org/apache/hadoop/hbase/regionserver/HRegion.java  | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a4b9ac60/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index c68f813..1b178f6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -7325,8 +7325,8 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 stats.setHeapOccupancy((int)(occupancy * 100));
   }
 }
-stats.setCompactionPressure((int)rsServices.getCompactionPressure()*100 > 
100 ? 100 :
-(int)rsServices.getCompactionPressure()*100);
+stats.setCompactionPressure((int) (rsServices.getCompactionPressure() * 
100 > 100 ? 100
+: rsServices.getCompactionPressure() * 100));
 return stats.build();
   }
 



[3/3] hbase git commit: HBASE-19388 - Incorrect value is being set for Compaction Pressure in RegionLoadStats object inside HRegion class

2017-11-30 Thread apurtell
HBASE-19388 - Incorrect value is being set for Compaction Pressure in 
RegionLoadStats object inside HRegion class

Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d0c99f1d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d0c99f1d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d0c99f1d

Branch: refs/heads/branch-1.2
Commit: d0c99f1dd88266a155798f1b688fdd2bf7b9c2ba
Parents: 9ab637c
Author: Harshal Deepakkumar Jain 

Authored: Thu Nov 30 18:08:01 2017 +0530
Committer: Andrew Purtell 
Committed: Thu Nov 30 15:07:57 2017 -0800

--
 .../main/java/org/apache/hadoop/hbase/regionserver/HRegion.java  | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d0c99f1d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 30202a0..696f8c3 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -7008,8 +7008,8 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 stats.setHeapOccupancy((int)(occupancy * 100));
   }
 }
-stats.setCompactionPressure((int)rsServices.getCompactionPressure()*100 > 
100 ? 100 :
-(int)rsServices.getCompactionPressure()*100);
+stats.setCompactionPressure((int) (rsServices.getCompactionPressure() * 
100 > 100 ? 100
+: rsServices.getCompactionPressure() * 100));
 return stats.build();
   }
 



[1/3] hbase git commit: HBASE-19388 - Incorrect value is being set for Compaction Pressure in RegionLoadStats object inside HRegion class

2017-11-30 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 9ab637c22 -> d0c99f1dd
  refs/heads/branch-1.3 5dc97e298 -> 7fde0fdcc
  refs/heads/branch-1.4 c1a1c97e8 -> f8e6a56e1


HBASE-19388 - Incorrect value is being set for Compaction Pressure in 
RegionLoadStats object inside HRegion class

Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f8e6a56e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f8e6a56e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f8e6a56e

Branch: refs/heads/branch-1.4
Commit: f8e6a56e1e231ae55db6a34279f979cc7399bed0
Parents: c1a1c97
Author: Harshal Deepakkumar Jain 

Authored: Thu Nov 30 18:08:01 2017 +0530
Committer: Andrew Purtell 
Committed: Thu Nov 30 15:07:50 2017 -0800

--
 .../main/java/org/apache/hadoop/hbase/regionserver/HRegion.java  | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f8e6a56e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index c68f813..1b178f6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -7325,8 +7325,8 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 stats.setHeapOccupancy((int)(occupancy * 100));
   }
 }
-stats.setCompactionPressure((int)rsServices.getCompactionPressure()*100 > 
100 ? 100 :
-(int)rsServices.getCompactionPressure()*100);
+stats.setCompactionPressure((int) (rsServices.getCompactionPressure() * 
100 > 100 ? 100
+: rsServices.getCompactionPressure() * 100));
 return stats.build();
   }
 



[2/3] hbase git commit: HBASE-19388 - Incorrect value is being set for Compaction Pressure in RegionLoadStats object inside HRegion class

2017-11-30 Thread apurtell
HBASE-19388 - Incorrect value is being set for Compaction Pressure in 
RegionLoadStats object inside HRegion class

Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7fde0fdc
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7fde0fdc
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7fde0fdc

Branch: refs/heads/branch-1.3
Commit: 7fde0fdcc19e37f9cbc66c3bcbb95bb3465faad3
Parents: 5dc97e2
Author: Harshal Deepakkumar Jain 

Authored: Thu Nov 30 18:08:01 2017 +0530
Committer: Andrew Purtell 
Committed: Thu Nov 30 15:07:54 2017 -0800

--
 .../main/java/org/apache/hadoop/hbase/regionserver/HRegion.java  | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7fde0fdc/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index b25e11b..99cdf5f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -7215,8 +7215,8 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 stats.setHeapOccupancy((int)(occupancy * 100));
   }
 }
-stats.setCompactionPressure((int)rsServices.getCompactionPressure()*100 > 
100 ? 100 :
-(int)rsServices.getCompactionPressure()*100);
+stats.setCompactionPressure((int) (rsServices.getCompactionPressure() * 
100 > 100 ? 100
+: rsServices.getCompactionPressure() * 100));
 return stats.build();
   }
 



[3/3] hbase git commit: HBASE-19285 Implements table-level latency histograms

2017-11-30 Thread elserj
HBASE-19285 Implements table-level latency histograms

For a egionserver's view of a table (the regions
that belong to a table hosted on a regionserver),
this change tracks the latencies of operations that
affect the regions for this table.

Tracking at the per-table level avoids the memory bloat
and performance impact that accompanied the previous
per-region latency metrics while still providing important
details for operators to consume.

Signed-Off-By: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5dc97e29
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5dc97e29
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5dc97e29

Branch: refs/heads/branch-1.3
Commit: 5dc97e298f79327198507c8f8f26270fdced40f9
Parents: 561f336
Author: Josh Elser 
Authored: Fri Nov 17 13:39:43 2017 -0500
Committer: Josh Elser 
Committed: Thu Nov 30 17:45:57 2017 -0500

--
 .../regionserver/MetricsTableLatencies.java | 107 +
 .../hadoop/hbase/test/MetricsAssertHelper.java  |   7 +
 .../regionserver/MetricsTableLatenciesImpl.java | 152 +++
 ...oop.hbase.regionserver.MetricsTableLatencies |  17 +++
 .../hbase/test/MetricsAssertHelperImpl.java |   9 +-
 .../hbase/regionserver/HRegionServer.java   |   3 +-
 .../hbase/regionserver/MetricsRegionServer.java |  63 ++--
 .../hbase/regionserver/RSRpcServices.java   |  30 ++--
 .../regionserver/RegionServerTableMetrics.java  |  63 
 .../regionserver/TestMetricsRegionServer.java   |  23 +--
 .../regionserver/TestMetricsTableLatencies.java |  68 +
 11 files changed, 508 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5dc97e29/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatencies.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatencies.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatencies.java
new file mode 100644
index 000..46232bd
--- /dev/null
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatencies.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+/**
+ * Latency metrics for a specific table in a RegionServer.
+ */
+public interface MetricsTableLatencies {
+
+  /**
+   * The name of the metrics
+   */
+  String METRICS_NAME = "TableLatencies";
+
+  /**
+   * The name of the metrics context that metrics will be under.
+   */
+  String METRICS_CONTEXT = "regionserver";
+
+  /**
+   * Description
+   */
+  String METRICS_DESCRIPTION = "Metrics about Tables on a single HBase 
RegionServer";
+
+  /**
+   * The name of the metrics context that metrics will be under in jmx
+   */
+  String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME;
+
+  String GET_TIME = "getTime";
+  String SCAN_TIME = "scanTime";
+  String SCAN_SIZE = "scanSize";
+  String PUT_TIME = "putTime";
+  String DELETE_TIME = "deleteTime";
+  String INCREMENT_TIME = "incrementTime";
+  String APPEND_TIME = "appendTime";
+
+  /**
+   * Update the Put time histogram
+   *
+   * @param tableName The table the metric is for
+   * @param t time it took
+   */
+  void updatePut(String tableName, long t);
+
+  /**
+   * Update the Delete time histogram
+   *
+   * @param tableName The table the metric is for
+   * @param t time it took
+   */
+  void updateDelete(String tableName, long t);
+
+  /**
+   * Update the Get time histogram .
+   *
+   * @param tableName The table the metric is for
+   * @param t time it took
+   */
+  void updateGet(String tableName, long t);
+
+  /**
+   * Update the Increment time histogram.
+   *
+   * @param tableName The table the metric is for
+   * @param t time it took
+   */
+  void updateIncrement(String tableName, long t);
+
+  /**
+   * Update the Append time histogram.
+   *
+   

[2/3] hbase git commit: HBASE-19285 Implements table-level latency histograms

2017-11-30 Thread elserj
HBASE-19285 Implements table-level latency histograms

For a egionserver's view of a table (the regions
that belong to a table hosted on a regionserver),
this change tracks the latencies of operations that
affect the regions for this table.

Tracking at the per-table level avoids the memory bloat
and performance impact that accompanied the previous
per-region latency metrics while still providing important
details for operators to consume.

Signed-Off-By: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4846300c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4846300c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4846300c

Branch: refs/heads/branch-1
Commit: 4846300c8bef8fe36d64a2f6e251ce7c1f45e96b
Parents: ac7fd29
Author: Josh Elser 
Authored: Fri Nov 17 13:39:43 2017 -0500
Committer: Josh Elser 
Committed: Thu Nov 30 17:45:23 2017 -0500

--
 .../regionserver/MetricsTableLatencies.java | 125 +
 .../hadoop/hbase/test/MetricsAssertHelper.java  |   7 +
 .../regionserver/MetricsTableLatenciesImpl.java | 175 +++
 ...oop.hbase.regionserver.MetricsTableLatencies |  17 ++
 .../hbase/test/MetricsAssertHelperImpl.java |   9 +-
 .../hbase/regionserver/HRegionServer.java   |   3 +-
 .../hbase/regionserver/MetricsRegionServer.java |  71 ++--
 .../hbase/regionserver/RSRpcServices.java   |  41 +++--
 .../regionserver/RegionServerTableMetrics.java  |  71 
 .../regionserver/TestMetricsRegionServer.java   |  26 +--
 .../regionserver/TestMetricsTableLatencies.java |  68 +++
 11 files changed, 572 insertions(+), 41 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4846300c/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatencies.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatencies.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatencies.java
new file mode 100644
index 000..67e651a
--- /dev/null
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatencies.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+/**
+ * Latency metrics for a specific table in a RegionServer.
+ */
+public interface MetricsTableLatencies {
+
+  /**
+   * The name of the metrics
+   */
+  String METRICS_NAME = "TableLatencies";
+
+  /**
+   * The name of the metrics context that metrics will be under.
+   */
+  String METRICS_CONTEXT = "regionserver";
+
+  /**
+   * Description
+   */
+  String METRICS_DESCRIPTION = "Metrics about Tables on a single HBase 
RegionServer";
+
+  /**
+   * The name of the metrics context that metrics will be under in jmx
+   */
+  String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME;
+
+  String GET_TIME = "getTime";
+  String SCAN_TIME = "scanTime";
+  String SCAN_SIZE = "scanSize";
+  String PUT_TIME = "putTime";
+  String PUT_BATCH_TIME = "putBatchTime";
+  String DELETE_TIME = "deleteTime";
+  String DELETE_BATCH_TIME = "deleteBatchTime";
+  String INCREMENT_TIME = "incrementTime";
+  String APPEND_TIME = "appendTime";
+
+  /**
+   * Update the Put time histogram
+   *
+   * @param tableName The table the metric is for
+   * @param t time it took
+   */
+  void updatePut(String tableName, long t);
+
+  /**
+   * Update the batch Put time histogram
+   *
+   * @param tableName The table the metric is for
+   * @param t time it took
+   */
+  void updatePutBatch(String tableName, long t);
+
+  /**
+   * Update the Delete time histogram
+   *
+   * @param tableName The table the metric is for
+   * @param t time it took
+   */
+  void updateDelete(String tableName, long t);
+
+  /**
+   * Update the batch Delete time histogram
+   *
+   * @param tableName The table the metric is for
+   * @param t time it took
+   */
+  void upd

[1/3] hbase git commit: HBASE-19285 Implements table-level latency histograms

2017-11-30 Thread elserj
Repository: hbase
Updated Branches:
  refs/heads/branch-1 ac7fd29f7 -> 4846300c8
  refs/heads/branch-1.3 561f336c1 -> 5dc97e298
  refs/heads/branch-1.4 b52c424d2 -> c1a1c97e8


HBASE-19285 Implements table-level latency histograms

For a egionserver's view of a table (the regions
that belong to a table hosted on a regionserver),
this change tracks the latencies of operations that
affect the regions for this table.

Tracking at the per-table level avoids the memory bloat
and performance impact that accompanied the previous
per-region latency metrics while still providing important
details for operators to consume.

Signed-Off-By: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c1a1c97e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c1a1c97e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c1a1c97e

Branch: refs/heads/branch-1.4
Commit: c1a1c97e842a6f1fcedbc51f315be01ca9150953
Parents: b52c424
Author: Josh Elser 
Authored: Fri Nov 17 13:39:43 2017 -0500
Committer: Josh Elser 
Committed: Thu Nov 30 17:44:50 2017 -0500

--
 .../regionserver/MetricsTableLatencies.java | 125 +
 .../hadoop/hbase/test/MetricsAssertHelper.java  |   7 +
 .../regionserver/MetricsTableLatenciesImpl.java | 175 +++
 ...oop.hbase.regionserver.MetricsTableLatencies |  17 ++
 .../hbase/test/MetricsAssertHelperImpl.java |   9 +-
 .../hbase/regionserver/HRegionServer.java   |   3 +-
 .../hbase/regionserver/MetricsRegionServer.java |  71 ++--
 .../hbase/regionserver/RSRpcServices.java   |  41 +++--
 .../regionserver/RegionServerTableMetrics.java  |  71 
 .../regionserver/TestMetricsRegionServer.java   |  26 +--
 .../regionserver/TestMetricsTableLatencies.java |  68 +++
 11 files changed, 572 insertions(+), 41 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c1a1c97e/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatencies.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatencies.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatencies.java
new file mode 100644
index 000..67e651a
--- /dev/null
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatencies.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+/**
+ * Latency metrics for a specific table in a RegionServer.
+ */
+public interface MetricsTableLatencies {
+
+  /**
+   * The name of the metrics
+   */
+  String METRICS_NAME = "TableLatencies";
+
+  /**
+   * The name of the metrics context that metrics will be under.
+   */
+  String METRICS_CONTEXT = "regionserver";
+
+  /**
+   * Description
+   */
+  String METRICS_DESCRIPTION = "Metrics about Tables on a single HBase 
RegionServer";
+
+  /**
+   * The name of the metrics context that metrics will be under in jmx
+   */
+  String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME;
+
+  String GET_TIME = "getTime";
+  String SCAN_TIME = "scanTime";
+  String SCAN_SIZE = "scanSize";
+  String PUT_TIME = "putTime";
+  String PUT_BATCH_TIME = "putBatchTime";
+  String DELETE_TIME = "deleteTime";
+  String DELETE_BATCH_TIME = "deleteBatchTime";
+  String INCREMENT_TIME = "incrementTime";
+  String APPEND_TIME = "appendTime";
+
+  /**
+   * Update the Put time histogram
+   *
+   * @param tableName The table the metric is for
+   * @param t time it took
+   */
+  void updatePut(String tableName, long t);
+
+  /**
+   * Update the batch Put time histogram
+   *
+   * @param tableName The table the metric is for
+   * @param t time it took
+   */
+  void updatePutBatch(String tableName, long t);
+
+  /**
+   * Update the Delete time histogram
+   *
+   * @param tableName The table the metric is for
+   * @param t time it took
+   */
+  void updateDelete(Strin

hbase git commit: HBASE-19350 TestMetaWithReplicas is flaky

2017-11-30 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 04f1029c0 -> 561f336c1


HBASE-19350 TestMetaWithReplicas is flaky


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/561f336c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/561f336c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/561f336c

Branch: refs/heads/branch-1.3
Commit: 561f336c1d224fbed396533f88c1d32a006f8c2a
Parents: 04f1029
Author: Chia-Ping Tsai 
Authored: Fri Dec 1 04:01:15 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Fri Dec 1 04:02:42 2017 +0800

--
 .../src/main/java/org/apache/hadoop/hbase/ClusterStatus.java| 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/561f336c/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
index f1e2d56..b55c5db 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
@@ -276,7 +276,10 @@ public class ClusterStatus extends VersionedWritable {
 
   @InterfaceAudience.Private
   public Map getRegionsInTransition() {
-return this.intransition;
+if (intransition == null) {
+  return Collections.EMPTY_MAP;
+}
+return Collections.unmodifiableMap(intransition);
   }
 
   public String getClusterId() {



hbase git commit: HBASE-19350 TestMetaWithReplicas is flaky

2017-11-30 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 93b380dd9 -> 9ab637c22


HBASE-19350 TestMetaWithReplicas is flaky


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9ab637c2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9ab637c2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9ab637c2

Branch: refs/heads/branch-1.2
Commit: 9ab637c2292e36313b22b41be1e86809c6ac60f6
Parents: 93b380d
Author: Chia-Ping Tsai 
Authored: Fri Dec 1 04:01:15 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Fri Dec 1 04:01:15 2017 +0800

--
 .../src/main/java/org/apache/hadoop/hbase/ClusterStatus.java| 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9ab637c2/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
index c8caa96..9f00373 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
@@ -276,7 +276,10 @@ public class ClusterStatus extends VersionedWritable {
 
   @InterfaceAudience.Private
   public Map getRegionsInTransition() {
-return this.intransition;
+if (intransition == null) {
+  return Collections.EMPTY_MAP;
+}
+return Collections.unmodifiableMap(intransition);
   }
 
   public String getClusterId() {



hbase git commit: HBASE-19350 TestMetaWithReplicas is flaky

2017-11-30 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/master e0dd26de8 -> c64546aa3


HBASE-19350 TestMetaWithReplicas is flaky


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c64546aa
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c64546aa
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c64546aa

Branch: refs/heads/master
Commit: c64546aa3130b9f39d878bb11c81825ebc61ec49
Parents: e0dd26d
Author: Chia-Ping Tsai 
Authored: Fri Dec 1 03:39:44 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Fri Dec 1 03:39:44 2017 +0800

--
 .../src/main/java/org/apache/hadoop/hbase/ClusterStatus.java| 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c64546aa/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
index 351b0c8..9c3cc73 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
@@ -296,7 +296,10 @@ public class ClusterStatus {
 
   @InterfaceAudience.Private
   public List getRegionsInTransition() {
-return this.intransition;
+if (intransition == null) {
+  return Collections.emptyList();
+}
+return Collections.unmodifiableList(intransition);
   }
 
   public String getClusterId() {



hbase git commit: HBASE-19350 TestMetaWithReplicas is flaky

2017-11-30 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 8492952a9 -> b52c424d2


HBASE-19350 TestMetaWithReplicas is flaky


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b52c424d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b52c424d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b52c424d

Branch: refs/heads/branch-1.4
Commit: b52c424d2cd86433d6e64f2cb309edc286c38e90
Parents: 8492952
Author: Chia-Ping Tsai 
Authored: Fri Dec 1 03:44:46 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Fri Dec 1 03:47:40 2017 +0800

--
 .../src/main/java/org/apache/hadoop/hbase/ClusterStatus.java| 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b52c424d/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
index 2eb1162..e2f10d3 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
@@ -303,7 +303,10 @@ public class ClusterStatus extends VersionedWritable {
 
   @InterfaceAudience.Private
   public Set getRegionsInTransition() {
-return this.intransition;
+if (intransition == null) {
+  return Collections.emptySet();
+}
+return Collections.unmodifiableSet(intransition);
   }
 
   public String getClusterId() {



hbase git commit: HBASE-19350 TestMetaWithReplicas is flaky

2017-11-30 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/branch-1 1e0067304 -> ac7fd29f7


HBASE-19350 TestMetaWithReplicas is flaky


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ac7fd29f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ac7fd29f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ac7fd29f

Branch: refs/heads/branch-1
Commit: ac7fd29f7e38f8c83ac14e052c7a4d81dd748bcd
Parents: 1e00673
Author: Chia-Ping Tsai 
Authored: Fri Dec 1 03:44:46 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Fri Dec 1 03:44:46 2017 +0800

--
 .../src/main/java/org/apache/hadoop/hbase/ClusterStatus.java| 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ac7fd29f/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
index 2eb1162..e2f10d3 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
@@ -303,7 +303,10 @@ public class ClusterStatus extends VersionedWritable {
 
   @InterfaceAudience.Private
   public Set getRegionsInTransition() {
-return this.intransition;
+if (intransition == null) {
+  return Collections.emptySet();
+}
+return Collections.unmodifiableSet(intransition);
   }
 
   public String getClusterId() {



hbase git commit: HBASE-19350 TestMetaWithReplicas is flaky

2017-11-30 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/branch-2 49eaa7a8f -> 2bda22a64


HBASE-19350 TestMetaWithReplicas is flaky


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2bda22a6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2bda22a6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2bda22a6

Branch: refs/heads/branch-2
Commit: 2bda22a64e1cf612bd37b57b1fef1bdcb478df21
Parents: 49eaa7a
Author: Chia-Ping Tsai 
Authored: Fri Dec 1 03:39:44 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Fri Dec 1 03:40:17 2017 +0800

--
 .../src/main/java/org/apache/hadoop/hbase/ClusterStatus.java| 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2bda22a6/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
index 351b0c8..9c3cc73 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
@@ -296,7 +296,10 @@ public class ClusterStatus {
 
   @InterfaceAudience.Private
   public List getRegionsInTransition() {
-return this.intransition;
+if (intransition == null) {
+  return Collections.emptyList();
+}
+return Collections.unmodifiableList(intransition);
   }
 
   public String getClusterId() {



hbase git commit: HBASE-19388 - Incorrect value is being set for Compaction Pressure in RegionLoadStats object inside HRegion class

2017-11-30 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/branch-2 e20a7574d -> 49eaa7a8f


HBASE-19388 - Incorrect value is being set for Compaction Pressure in 
RegionLoadStats object inside HRegion class

Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/49eaa7a8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/49eaa7a8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/49eaa7a8

Branch: refs/heads/branch-2
Commit: 49eaa7a8f9a613de610863c181f9af5dc22ee538
Parents: e20a757
Author: Harshal Deepakkumar Jain 

Authored: Thu Nov 30 18:08:01 2017 +0530
Committer: tedyu 
Committed: Thu Nov 30 08:48:27 2017 -0800

--
 .../main/java/org/apache/hadoop/hbase/regionserver/HRegion.java  | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/49eaa7a8/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index e5e0729..364c32a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -7189,8 +7189,8 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 stats.setHeapOccupancy((int)(occupancy * 100));
   }
 }
-stats.setCompactionPressure((int)rsServices.getCompactionPressure()*100 > 
100 ? 100 :
-(int)rsServices.getCompactionPressure()*100);
+stats.setCompactionPressure((int) (rsServices.getCompactionPressure() * 
100 > 100 ? 100
+: rsServices.getCompactionPressure() * 100));
 return stats.build();
   }
 



hbase git commit: HBASE-19388 - Incorrect value is being set for Compaction Pressure in RegionLoadStats object inside HRegion class

2017-11-30 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/master 6a6409a30 -> e0dd26de8


HBASE-19388 - Incorrect value is being set for Compaction Pressure in 
RegionLoadStats object inside HRegion class

Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e0dd26de
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e0dd26de
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e0dd26de

Branch: refs/heads/master
Commit: e0dd26de8e75a14e2332827e1c092a24f08f640d
Parents: 6a6409a
Author: Harshal Deepakkumar Jain 

Authored: Thu Nov 30 18:08:01 2017 +0530
Committer: tedyu 
Committed: Thu Nov 30 08:47:23 2017 -0800

--
 .../main/java/org/apache/hadoop/hbase/regionserver/HRegion.java  | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e0dd26de/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index e5e0729..364c32a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -7189,8 +7189,8 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 stats.setHeapOccupancy((int)(occupancy * 100));
   }
 }
-stats.setCompactionPressure((int)rsServices.getCompactionPressure()*100 > 
100 ? 100 :
-(int)rsServices.getCompactionPressure()*100);
+stats.setCompactionPressure((int) (rsServices.getCompactionPressure() * 
100 > 100 ? 100
+: rsServices.getCompactionPressure() * 100));
 return stats.build();
   }
 



[46/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/book.html
--
diff --git a/book.html b/book.html
index fe11dbb..c9759ad 100644
--- a/book.html
+++ b/book.html
@@ -2899,7 +2899,7 @@ Some configurations would only appear in source code; the 
only way to identify t
 
 
 Default
-35
+10
 
 
 
@@ -27431,11 +27431,14 @@ If a slave cluster does run out of room, or is 
inaccessible for other reasons, i
 
 Consistency Across Replicated Clusters
 
-How your application builds on top of the HBase API matters when 
replication is in play. HBase’s replication system provides at-least-once 
delivery of client edits for an enabled column family to each configured 
destination cluster. In the event of failure to reach a given destination, the 
replication system will retry sending edits in a way that might repeat a given 
message. Further more, there is not a guaranteed order of delivery for client 
edits. In the event of a RegionServer failing, recovery of the replication 
queue happens independent of recovery of the individual regions that server was 
previously handling. This means that it is possible for the not-yet-replicated 
edits to be serviced by a RegionServer that is currently slower to replicate 
than the one that handles edits from after the failure.
+How your application builds on top of the HBase API matters when 
replication is in play. HBase’s replication system provides at-least-once 
delivery of client edits for an enabled column family to each configured 
destination cluster. In the event of failure to reach a given destination, the 
replication system will retry sending edits in a way that might repeat a given 
message. HBase provides two ways of replication, one is the original 
replication and the other is serial replication. In the previous way of 
replication, there is not a guaranteed order of delivery for client edits. In 
the event of a RegionServer failing, recovery of the replication queue happens 
independent of recovery of the individual regions that server was previously 
handling. This means that it is possible for the not-yet-replicated edits to be 
serviced by a RegionServer that is currently slower to replicate than the one 
that handles edits from after the failure.
 
 
 The combination of these two properties (at-least-once delivery and the 
lack of message ordering) means that some destination clusters may end up in a 
different state if your application makes use of operations that are not 
idempotent, e.g. Increments.
 
+
+To solve the problem, HBase now supports serial replication, which sends 
edits to destination cluster as the order of requests from client.
+
 
 
 
@@ -27518,6 +27521,10 @@ Create tables with the same names and column families 
on both the source and des
 LOG.info("Replicating "+clusterId + " -> " + peerClusterId);
 
 
+
+Serial Replication Configuration
+See Serial Replication
+
 
 Cluster Management Commands
 
@@ -27569,7 +27576,60 @@ replication as long as peers exist.
 
 
 
-150.3. Verifying Replicated Data
+150.3. Serial Replication
+
+Note: this feature is introduced in HBase 1.5
+
+
+Function of serial replication
+Serial replication supports to push logs to the destination cluster in the 
same order as logs reach to the source cluster.
+
+
+Why need serial replication?
+In replication of HBase, we push mutations to destination cluster by 
reading WAL in each region server. We have a queue for WAL files so we can read 
them in order of creation time. However, when region-move or RS failure occurs 
in source cluster, the hlog entries that are not pushed before region-move or 
RS-failure will be pushed by original RS(for region move) or another RS which 
takes over the remained hlog of dead RS(for RS failure), and the new entries 
for the same region(s) will be pushed by the RS which now serves the region(s), 
but they push the hlog entries of a same region concurrently without 
coordination.
+
+
+This treatment can possibly lead to data inconsistency between source and 
destination clusters:
+
+
+
+
+there are put and then delete written to source cluster.
+
+
+due to region-move / RS-failure, they are pushed by different 
replication-source threads to peer cluster.
+
+
+if delete is pushed to peer cluster before put, and flush and major-compact 
occurs in peer cluster before put is pushed to peer cluster, the delete is 
collected and the put remains in peer cluster, but in source cluster the put is 
masked by the delete, hence data inconsistency between source and destination 
clusters.
+
+
+
+
+Serial replication configuration
+
+
+Set REPLICATION_SCOPE⇒2 on the column family which is to be 
replicated serially when creating tables.
+
+
+REPLICATION_SCOPE is a column family level attribute. Its value can be 0, 
1 or 2. Value 0 means replication is disabled, 1 means replication is enabled 
but which not guarantee log order, and 2 means serial replication is 
enabled.
+
+
+

[33/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateNode.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateNode.html
 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateNode.html
index ffb1bb9..e3f5cf0 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateNode.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateNode.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static class RegionStates.RegionStateNode
+public static class RegionStates.RegionStateNode
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable
 Current Region State.
@@ -309,22 +309,26 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 toDescriptiveString() 
 
 
+RegionState
+toRegionState() 
+
+
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 toShortString() 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 toString() 
 
-
-RegionState.State
+
+void
 transitionState(RegionState.State update,
RegionState.State... expected)
 Set new RegionState.State but only if 
currently in expected State
  (if not, throw UnexpectedStateException.
 
 
-
+
 boolean
 unsetProcedure(RegionTransitionProcedure proc) 
 
@@ -356,7 +360,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 regionInfo
-private final RegionInfo regionInfo
+private final RegionInfo regionInfo
 
 
 
@@ -365,7 +369,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 event
-private final ProcedureEvent 
event
+private final ProcedureEvent 
event
 
 
 
@@ -374,7 +378,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 procedure
-private volatile RegionTransitionProcedure procedure
+private volatile RegionTransitionProcedure procedure
 
 
 
@@ -383,7 +387,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 regionLocation
-private volatile ServerName regionLocation
+private volatile ServerName regionLocation
 
 
 
@@ -392,7 +396,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 lastHost
-private volatile ServerName lastHost
+private volatile ServerName lastHost
 
 
 
@@ -401,7 +405,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 state
-private volatile RegionState.State state
+private volatile RegionState.State state
 A Region-in-Transition (RIT) moves through states.
  See RegionState.State for complete 
list. A Region that
  is opened moves from OFFLINE => OPENING => OPENED.
@@ -413,7 +417,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 lastUpdate
-private volatile long lastUpdate
+private volatile long lastUpdate
 Updated whenever a call to setRegionLocation(ServerName)
  or #setState(State, State...).
 
@@ -424,7 +428,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 openSeqNum
-private volatile long openSeqNum
+private volatile long openSeqNum
 
 
 
@@ -441,7 +445,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 RegionStateNode
-public RegionStateNode(RegionInfo regionInfo)
+public RegionStateNode(RegionInfo regionInfo)
 
 
 
@@ -458,8 +462,15 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 setState
-public boolean setState(RegionState.State update,
+public boolean setState(RegionState.State update,
 RegionState.State... expected)
+
+Parameters:
+update - new region state this node should be assigned.
+expected - current state

[05/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.StorageTypeSetter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.StorageTypeSetter.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.StorageTypeSetter.html
index 25e368d..d0f781f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.StorageTypeSetter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.StorageTypeSetter.html
@@ -25,798 +25,798 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.asyncfs;
 019
-020import static 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelOption.CONNECT_TIMEOUT_MILLIS;
-021import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
-022import static 
org.apache.hadoop.fs.CreateFlag.CREATE;
-023import static 
org.apache.hadoop.fs.CreateFlag.OVERWRITE;
-024import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor;
-025import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.trySaslNegotiate;
+020import static 
org.apache.hadoop.fs.CreateFlag.CREATE;
+021import static 
org.apache.hadoop.fs.CreateFlag.OVERWRITE;
+022import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor;
+023import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.trySaslNegotiate;
+024import static 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelOption.CONNECT_TIMEOUT_MILLIS;
+025import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
 026import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
 027import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME;
 028import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT;
 029import static 
org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage.PIPELINE_SETUP_CREATE;
 030
-031import 
org.apache.hadoop.hbase.shaded.com.google.common.base.Throwables;
-032import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableMap;
-033import 
com.google.protobuf.CodedOutputStream;
-034
-035import 
org.apache.hadoop.hbase.shaded.io.netty.bootstrap.Bootstrap;
-036import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBuf;
-037import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufAllocator;
-038import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufOutputStream;
-039import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.PooledByteBufAllocator;
-040import 
org.apache.hadoop.hbase.shaded.io.netty.channel.Channel;
-041import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelFuture;
-042import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelFutureListener;
-043import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandler;
-044import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerContext;
-045import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelInitializer;
-046import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelPipeline;
-047import 
org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoop;
-048import 
org.apache.hadoop.hbase.shaded.io.netty.channel.SimpleChannelInboundHandler;
-049import 
org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufDecoder;
-050import 
org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
-051import 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateEvent;
-052import 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateHandler;
-053import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Future;
-054import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.FutureListener;
-055import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Promise;
-056
-057import java.io.IOException;
-058import 
java.lang.reflect.InvocationTargetException;
-059import java.lang.reflect.Method;
-060import java.util.ArrayList;
-061import java.util.EnumSet;
-062import java.util.List;
-063import java.util.concurrent.TimeUnit;
-064
-065import org.apache.commons.logging.Log;
-066import 
org.apache.commons.logging.LogFactory;
-067import 
org.apache.hadoop.conf.Configuration;
-068import 
org.apache.hadoop.crypto.CryptoProtocolVersion;
-069import 
org.apache.hadoop.crypto.Encryptor;
-070import org.apache.hadoop.fs.CreateFlag;
-071import org.apache.hadoop.fs.FileSystem;
-072import 
org.apache.hadoop.fs.FileSystemLinkResolver;
-073import org.apache.hadoop.fs.Path;
-074import 
org.apache.hadoop.fs.UnresolvedLinkException;
-075import 
org.apac

[01/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 51b7ea776 -> 713d773f1


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionState.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionState.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionState.html
index 2daacb5..fb5cc60 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionState.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionState.html
@@ -25,413 +25,414 @@
 017 */
 018package org.apache.hadoop.hbase.master;
 019
-020import java.util.Date;
-021
-022import 
org.apache.hadoop.hbase.ServerName;
-023import 
org.apache.hadoop.hbase.client.RegionInfo;
-024import 
org.apache.yetus.audience.InterfaceAudience;
-025import 
org.apache.yetus.audience.InterfaceStability;
-026
-027import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-028import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
-029
-030/**
-031 * State of a Region while undergoing 
transitions.
-032 * This class is immutable.
-033 */
-034@InterfaceAudience.Private
-035public class RegionState {
-036
-037  @InterfaceAudience.Private
-038  @InterfaceStability.Evolving
-039  public enum State {
-040OFFLINE,// region is in an 
offline state
-041OPENING,// server has begun 
to open but not yet done
-042OPEN,   // server opened 
region and updated meta
-043CLOSING,// server has begun 
to close but not yet done
-044CLOSED, // server closed 
region and updated meta
-045SPLITTING,  // server started 
split of a region
-046SPLIT,  // server completed 
split of a region
-047FAILED_OPEN,// failed to open, 
and won't retry any more
-048FAILED_CLOSE,   // failed to close, 
and won't retry any more
-049MERGING,// server started 
merge a region
-050MERGED, // server completed 
merge a region
-051SPLITTING_NEW,  // new region to be 
created when RS splits a parent
-052// region but hasn't 
be created yet, or master doesn't
-053// know it's already 
created
-054MERGING_NEW;// new region to be 
created when RS merges two
-055// daughter regions 
but hasn't be created yet, or
-056// master doesn't 
know it's already created
-057
-058/**
-059 * Convert to protobuf 
ClusterStatusProtos.RegionState.State
-060 */
-061public 
ClusterStatusProtos.RegionState.State convert() {
-062  
ClusterStatusProtos.RegionState.State rs;
-063  switch (this) {
-064  case OFFLINE:
-065rs = 
ClusterStatusProtos.RegionState.State.OFFLINE;
-066break;
-067  case OPENING:
-068rs = 
ClusterStatusProtos.RegionState.State.OPENING;
-069break;
-070  case OPEN:
-071rs = 
ClusterStatusProtos.RegionState.State.OPEN;
-072break;
-073  case CLOSING:
-074rs = 
ClusterStatusProtos.RegionState.State.CLOSING;
-075break;
-076  case CLOSED:
-077rs = 
ClusterStatusProtos.RegionState.State.CLOSED;
-078break;
-079  case SPLITTING:
-080rs = 
ClusterStatusProtos.RegionState.State.SPLITTING;
-081break;
-082  case SPLIT:
-083rs = 
ClusterStatusProtos.RegionState.State.SPLIT;
-084break;
-085  case FAILED_OPEN:
-086rs = 
ClusterStatusProtos.RegionState.State.FAILED_OPEN;
-087break;
-088  case FAILED_CLOSE:
-089rs = 
ClusterStatusProtos.RegionState.State.FAILED_CLOSE;
-090break;
-091  case MERGING:
-092rs = 
ClusterStatusProtos.RegionState.State.MERGING;
-093break;
-094  case MERGED:
-095rs = 
ClusterStatusProtos.RegionState.State.MERGED;
-096break;
-097  case SPLITTING_NEW:
-098rs = 
ClusterStatusProtos.RegionState.State.SPLITTING_NEW;
-099break;
-100  case MERGING_NEW:
-101rs = 
ClusterStatusProtos.RegionState.State.MERGING_NEW;
-102break;
-103  default:
-104throw new 
IllegalStateException("");
-105  }
-106  return rs;
-107}
-108
-109/**
-110 * Convert a protobuf 
HBaseProtos.RegionState.State to a RegionState.State
-111 *
-112 * @return the RegionState.State
-113 */
-114public static State 
convert(ClusterStatusProtos.RegionState.State protoState) {
-115  State state;
-116  switch (protoState) {
-117  case OFFLINE:
-118state = OFFLINE;
-119break;
-120  case PENDING_OPEN:
-121  case OPENING:
-122state = OPENING;
-123break;
-124  case OPEN:
-125state = OPEN;
-126break;
-127  case PENDING_CLOSE:
-128  case CLOSING:
-129state = CLOSING;
-130break;
-131  case CLOSED:
-132state = CLOS

[08/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.NameNodeException.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.NameNodeException.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.NameNodeException.html
index 25e368d..d0f781f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.NameNodeException.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.NameNodeException.html
@@ -25,798 +25,798 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.asyncfs;
 019
-020import static 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelOption.CONNECT_TIMEOUT_MILLIS;
-021import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
-022import static 
org.apache.hadoop.fs.CreateFlag.CREATE;
-023import static 
org.apache.hadoop.fs.CreateFlag.OVERWRITE;
-024import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor;
-025import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.trySaslNegotiate;
+020import static 
org.apache.hadoop.fs.CreateFlag.CREATE;
+021import static 
org.apache.hadoop.fs.CreateFlag.OVERWRITE;
+022import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor;
+023import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.trySaslNegotiate;
+024import static 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelOption.CONNECT_TIMEOUT_MILLIS;
+025import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
 026import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
 027import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME;
 028import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT;
 029import static 
org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage.PIPELINE_SETUP_CREATE;
 030
-031import 
org.apache.hadoop.hbase.shaded.com.google.common.base.Throwables;
-032import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableMap;
-033import 
com.google.protobuf.CodedOutputStream;
-034
-035import 
org.apache.hadoop.hbase.shaded.io.netty.bootstrap.Bootstrap;
-036import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBuf;
-037import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufAllocator;
-038import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufOutputStream;
-039import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.PooledByteBufAllocator;
-040import 
org.apache.hadoop.hbase.shaded.io.netty.channel.Channel;
-041import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelFuture;
-042import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelFutureListener;
-043import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandler;
-044import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerContext;
-045import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelInitializer;
-046import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelPipeline;
-047import 
org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoop;
-048import 
org.apache.hadoop.hbase.shaded.io.netty.channel.SimpleChannelInboundHandler;
-049import 
org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufDecoder;
-050import 
org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
-051import 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateEvent;
-052import 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateHandler;
-053import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Future;
-054import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.FutureListener;
-055import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Promise;
-056
-057import java.io.IOException;
-058import 
java.lang.reflect.InvocationTargetException;
-059import java.lang.reflect.Method;
-060import java.util.ArrayList;
-061import java.util.EnumSet;
-062import java.util.List;
-063import java.util.concurrent.TimeUnit;
-064
-065import org.apache.commons.logging.Log;
-066import 
org.apache.commons.logging.LogFactory;
-067import 
org.apache.hadoop.conf.Configuration;
-068import 
org.apache.hadoop.crypto.CryptoProtocolVersion;
-069import 
org.apache.hadoop.crypto.Encryptor;
-070import org.apache.hadoop.fs.CreateFlag;
-071import org.apache.hadoop.fs.FileSystem;
-072import 
org.apache.hadoop.fs.FileSystemLinkResolver;
-073import org.apache.hadoop.fs.Path;
-074import 
org.apache.hadoop.fs.UnresolvedLinkException;
-075import 
org.apac

[29/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.CloseRegionRemoteCall.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.CloseRegionRemoteCall.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.CloseRegionRemoteCall.html
index e017759..377d410 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.CloseRegionRemoteCall.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.CloseRegionRemoteCall.html
@@ -122,8 +122,10 @@ var activeTableTab = "activeTableTab";
 
 
 
-private final class RSProcedureDispatcher.CloseRegionRemoteCall
+private final class RSProcedureDispatcher.CloseRegionRemoteCall
 extends RSProcedureDispatcher.AbstractRSRemoteCall
+Compatibility class used by RSProcedureDispatcher.CompatRemoteProcedureResolver
 to close regions using old
+ AdminService#closeRegion(RpcController, CloseRegionRequest, 
RpcCallback) rpc.
 
 
 
@@ -233,7 +235,7 @@ extends 
 
 operation
-private final RSProcedureDispatcher.RegionCloseOperation
 operation
+private final RSProcedureDispatcher.RegionCloseOperation
 operation
 
 
 
@@ -250,7 +252,7 @@ extends 
 
 CloseRegionRemoteCall
-public CloseRegionRemoteCall(ServerName serverName,
+public CloseRegionRemoteCall(ServerName serverName,
  RSProcedureDispatcher.RegionCloseOperation operation)
 
 
@@ -268,7 +270,7 @@ extends 
 
 call
-public http://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true";
 title="class or interface in java.lang">Void call()
+public http://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true";
 title="class or interface in java.lang">Void call()
 
 Specified by:
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Callable.html?is-external=true#call--";
 title="class or interface in java.util.concurrent">call in 
interface http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Callable.html?is-external=true";
 title="class or interface in java.util.concurrent">CallableVoid>
@@ -283,7 +285,7 @@ extends 
 
 sendRequest
-private org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse sendRequest(ServerName serverName,
+private org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse sendRequest(ServerName serverName,

   
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest request)

throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 
@@ -298,7 +300,7 @@ extends 
 
 remoteCallCompleted
-private void remoteCallCompleted(MasterProcedureEnv env,
+private void remoteCallCompleted(MasterProcedureEnv env,
  
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse response)
 
 
@@ -308,7 +310,7 @@ extends 
 
 remoteCallFailed
-private void remoteCallFailed(MasterProcedureEnv env,
+private void remoteCallFailed(MasterProcedureEnv env,
   http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException e)
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.CompatRemoteProcedureResolver.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.CompatRemoteProcedureResolver.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.CompatRemoteProcedureResolver.html
index 2eb3914..fc30b4a 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.CompatRemoteProcedureResolver.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.CompatRemoteProcedureResolver.html
@@ -117,9 +117,11 @@ var activeTableTab = "activeTableTab";
 
 
 
-protected class RSProcedureDispatcher.CompatRemoteProcedureResolver
+protected class RSProcedureDispatcher.CompatRemoteProcedureResolver
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Callable.html?is-external=true";
 title="class or interface in java.util.concurrent">Callable

[38/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.NameNodeException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.NameNodeException.html
 
b/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.NameNodeException.html
index c5c7b66..17c7510 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.NameNodeException.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.NameNodeException.html
@@ -126,7 +126,7 @@
 
 
 
-public static class FanOutOneBlockAsyncDFSOutputHelper.NameNodeException
+public static class FanOutOneBlockAsyncDFSOutputHelper.NameNodeException
 extends http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 Exception other than RemoteException thrown when calling 
create on namenode
 
@@ -215,7 +215,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/IOException.ht
 
 
 serialVersionUID
-private static final long serialVersionUID
+private static final long serialVersionUID
 
 See Also:
 Constant
 Field Values
@@ -236,7 +236,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/IOException.ht
 
 
 NameNodeException
-public NameNodeException(http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true";
 title="class or interface in java.lang">Throwable cause)
+public NameNodeException(http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true";
 title="class or interface in java.lang">Throwable cause)
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.PBHelper.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.PBHelper.html
 
b/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.PBHelper.html
index d0f7db9..df2714a 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.PBHelper.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.PBHelper.html
@@ -105,7 +105,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private static interface FanOutOneBlockAsyncDFSOutputHelper.PBHelper
+private static interface FanOutOneBlockAsyncDFSOutputHelper.PBHelper
 
 
 
@@ -153,7 +153,7 @@ var activeTableTab = "activeTableTab";
 
 
 convert
-org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto convert(org.apache.hadoop.hdfs.protocol.ExtendedBlock b)
+org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto convert(org.apache.hadoop.hdfs.protocol.ExtendedBlock b)
 
 
 
@@ -162,7 +162,7 @@ var activeTableTab = "activeTableTab";
 
 
 convert
-org.apache.hadoop.security.proto.SecurityProtos.TokenProto convert(org.apache.hadoop.security.token.Token tok)
+org.apache.hadoop.security.proto.SecurityProtos.TokenProto convert(org.apache.hadoop.security.token.Token tok)
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.PipelineAckStatusGetter.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.PipelineAckStatusGetter.html
 
b/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.PipelineAckStatusGetter.html
index 3f3bf91..f280554 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.PipelineAckStatusGetter.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.PipelineAckStatusGetter.html
@@ -105,7 +105,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private static interface FanOutOneBlockAsyncDFSOutputHelper.PipelineAckStatusGetter
+private static interface FanOutOneBlockAsyncDFSOutputHelper.PipelineAckStatusGetter
 
 
 
@@ -149,7 +149,7 @@ var activeTableTab = "activeTableTab";
 
 
 get
-org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status get(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto ack)
+org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status get(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto ack)
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.StorageTypeSetter.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanO

[40/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
index 65b7191..e9b93ad 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
@@ -2395,28 +2395,33 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
RegionInfo right) 
 
 
+static RegionState
+RegionState.createForTesting(RegionInfo region,
+RegionState.State state) 
+
+
 void
 MasterFileSystem.deleteFamilyFromFS(org.apache.hadoop.fs.Path rootDir,
   RegionInfo region,
   byte[] familyName) 
 
-
+
 void
 MasterFileSystem.deleteFamilyFromFS(RegionInfo region,
   byte[] familyName) 
 
-
+
 boolean
 ServerManager.isRegionInServerManagerStates(RegionInfo hri) 
 
-
+
 long
 HMaster.mergeRegions(RegionInfo[] regionsToMerge,
 boolean forcible,
 long nonceGroup,
 long nonce) 
 
-
+
 long
 MasterServices.mergeRegions(RegionInfo[] regionsToMerge,
 boolean forcible,
@@ -2425,17 +2430,17 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 Merge regions in a table.
 
 
-
+
 void
 MasterCoprocessorHost.postAssign(RegionInfo regionInfo) 
 
-
+
 void
 MasterCoprocessorHost.postCompletedCreateTableAction(TableDescriptor htd,
   RegionInfo[] regions,
   User user) 
 
-
+
 void
 MasterCoprocessorHost.postCompletedMergeRegionsAction(RegionInfo[] regionsToMerge,
RegionInfo mergedRegion,
@@ -2443,7 +2448,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 Invoked after completing merge regions operation
 
 
-
+
 void
 MasterCoprocessorHost.postCompletedMergeRegionsAction(RegionInfo[] regionsToMerge,
RegionInfo mergedRegion,
@@ -2451,7 +2456,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 Invoked after completing merge regions operation
 
 
-
+
 void
 MasterCoprocessorHost.postCompletedSplitRegionAction(RegionInfo regionInfoA,
   RegionInfo regionInfoB,
@@ -2459,16 +2464,16 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 Invoked just after a split
 
 
-
+
 void
 MasterCoprocessorHost.postCreateTable(TableDescriptor htd,
RegionInfo[] regions) 
 
-
+
 void
 MasterCoprocessorHost.postMergeRegions(RegionInfo[] regionsToMerge) 
 
-
+
 void
 MasterCoprocessorHost.postMergeRegionsCommit(RegionInfo[] regionsToMerge,
   RegionInfo mergedRegion,
@@ -2476,7 +2481,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 Invoked after merge regions operation writes the new region 
to hbase:meta
 
 
-
+
 void
 MasterCoprocessorHost.postMergeRegionsCommit(RegionInfo[] regionsToMerge,
   RegionInfo mergedRegion,
@@ -2484,17 +2489,17 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 Invoked after merge regions operation writes the new region 
to hbase:meta
 
 
-
+
 void
 MasterCoprocessorHost.postMove(RegionInfo region,
 ServerName srcServer,
 ServerName destServer) 
 
-
+
 void
 MasterCoprocessorHost.postRegionOffline(RegionInfo regionInfo) 
 
-
+
 void
 MasterCoprocessorHost.postRequestLock(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String namespace,
TableName tableName,
@@ -2502,45 +2507,45 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
LockType type,
http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in 
java.lang">String description) 
 
-
+
 void
 MasterCoprocessorHost.postRollBackMergeRegionsAction(RegionInfo[] regionsToMerge,
   User user)
 Invoked after rollback merge regions operation
 
 
-
+
 void
 MasterCoprocessorHost.postUnassign(RegionInfo regionInfo,
 boolean force) 
 
-
+
 void
 MasterCoprocessorHost.preAssign(RegionInfo regionInfo) 
 
-
+
 void
 MasterCoprocessorHost.preCreateTable(TableDescriptor htd,
   RegionInfo[] regions) 
 
-
+
 void
 MasterCoprocessorHost.preCreateTableAction(TableDescriptor htd,
 RegionInfo[] regions,
 User user) 
 
-
+
 void
 MasterCoprocessorHost.preMergeRegions(RegionInfo[] regionsToMerge) 
 
-
+
 void
 MasterCoprocessorHost.preMergeRegionsAction(RegionInfo[] regionsToMerge,
  User user)
 Invoked just

[34/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
index e8c7a75..2e8c50f 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
@@ -1299,7 +1299,7 @@ implements 
 
 pendingAssignQueue
-private final http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in java.util">ArrayList 
pendingAssignQueue
+private final http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in java.util">ArrayList 
pendingAssignQueue
 
 
 
@@ -1308,7 +1308,7 @@ implements 
 
 assignQueueLock
-private final http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/ReentrantLock.html?is-external=true";
 title="class or interface in java.util.concurrent.locks">ReentrantLock assignQueueLock
+private final http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/ReentrantLock.html?is-external=true";
 title="class or interface in java.util.concurrent.locks">ReentrantLock assignQueueLock
 
 
 
@@ -1317,7 +1317,7 @@ implements 
 
 assignQueueFullCond
-private final http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/Condition.html?is-external=true";
 title="class or interface in java.util.concurrent.locks">Condition assignQueueFullCond
+private final http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/Condition.html?is-external=true";
 title="class or interface in java.util.concurrent.locks">Condition assignQueueFullCond
 
 
 
@@ -2455,7 +2455,7 @@ implements 
 
 undoRegionAsOpening
-public void undoRegionAsOpening(RegionStates.RegionStateNode regionNode)
+public void undoRegionAsOpening(RegionStates.RegionStateNode regionNode)
 
 
 
@@ -2464,7 +2464,7 @@ implements 
 
 markRegionAsOpened
-public void markRegionAsOpened(RegionStates.RegionStateNode regionNode)
+public void markRegionAsOpened(RegionStates.RegionStateNode regionNode)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 
 Throws:
@@ -2478,7 +2478,7 @@ implements 
 
 markRegionAsClosing
-public void markRegionAsClosing(RegionStates.RegionStateNode regionNode)
+public void markRegionAsClosing(RegionStates.RegionStateNode regionNode)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 
 Throws:
@@ -2492,7 +2492,7 @@ implements 
 
 undoRegionAsClosing
-public void undoRegionAsClosing(RegionStates.RegionStateNode regionNode)
+public void undoRegionAsClosing(RegionStates.RegionStateNode regionNode)
 
 
 
@@ -2501,7 +2501,7 @@ implements 
 
 markRegionAsClosed
-public void markRegionAsClosed(RegionStates.RegionStateNode regionNode)
+public void markRegionAsClosed(RegionStates.RegionStateNode regionNode)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 
 Throws:
@@ -2515,7 +2515,7 @@ implements 
 
 markRegionAsSplit
-public void markRegionAsSplit(RegionInfo parent,
+public void markRegionAsSplit(RegionInfo parent,
   ServerName serverName,
   RegionInfo daughterA,
   RegionInfo daughterB)
@@ -2532,7 +2532,7 @@ implements 
 
 markRegionAsMerged
-public void markRegionAsMerged(RegionInfo child,
+public void markRegionAsMerged(RegionInfo child,
ServerName serverName,
RegionInfo mother,
RegionInfo father)
@@ -2555,7 +2555,7 @@ implements 
 
 shouldAssignFavoredNodes
-private boolean shouldAssignFavoredNodes(RegionInfo region)
+private boolean shouldAssignFavoredNodes(RegionInfo region)
 
 
 
@@ -2564,7 +2564,7 @@ implements 
 
 queueAssign
-protected void queueAssign(RegionStates.RegionStateNode regionNode)
+protected void queueAssign(RegionStates.RegionStateNode regionNode)
 Add the assign operation to the assignment queue.
  The pending assignment operation will be processed,
  and each region will be assigned by a server using the balancer.
@@ -2576,7 +2576,7 @@ implements 
 
 startAssignmentThread
-private void startAssignmentThread()
+private void startAssignmentThread()
 
 
 
@@ -2585,7 +2585,7 @@ implements 
 
 stopAssignmentThread
-private void stopAssignmentThread()
+private void stopAssignmen

[30/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/org/apache/hadoop/hbase/master/assignment/class-use/RegionStates.RegionStateNode.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/class-use/RegionStates.RegionStateNode.html
 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/class-use/RegionStates.RegionStateNode.html
index 71724c6..0cd9f33 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/class-use/RegionStates.RegionStateNode.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/class-use/RegionStates.RegionStateNode.html
@@ -150,31 +150,27 @@
 
 
 protected RegionStates.RegionStateNode
-RegionStates.createRegionNode(RegionInfo regionInfo) 
+RegionStates.createRegionStateNode(RegionInfo regionInfo) 
 
 
 protected RegionStates.RegionStateNode
-RegionStates.getOrCreateRegionNode(RegionInfo regionInfo) 
+RegionStates.getOrCreateRegionStateNode(RegionInfo regionInfo) 
 
 
 RegionStates.RegionStateNode
-RegionStates.RegionFailedOpen.getRegionNode() 
+RegionTransitionProcedure.getRegionState(MasterProcedureEnv env) 
 
 
-protected RegionStates.RegionStateNode
-RegionStates.getRegionNode(RegionInfo regionInfo) 
+RegionStates.RegionStateNode
+RegionStates.RegionFailedOpen.getRegionStateNode() 
 
 
-(package private) RegionStates.RegionStateNode
-RegionStates.getRegionNodeFromEncodedName(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in 
java.lang">String encodedRegionName) 
+protected RegionStates.RegionStateNode
+RegionStates.getRegionStateNode(RegionInfo regionInfo) 
 
 
 (package private) RegionStates.RegionStateNode
-RegionStates.getRegionNodeFromName(byte[] regionName) 
-
-
-RegionStates.RegionStateNode
-RegionTransitionProcedure.getRegionState(MasterProcedureEnv env) 
+RegionStates.getRegionStateNodeFromName(byte[] regionName) 
 
 
 
@@ -186,21 +182,21 @@
 
 
 
-(package private) http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true";
 title="class or interface in java.util">Collection
-RegionStates.getRegionNodes() 
-
-
 http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in java.util">Set
 RegionStates.ServerStateNode.getRegions() 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
 AssignmentManager.getRegionsInTransition() 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
 RegionStates.getRegionsInTransition() 
 
+
+(package private) http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true";
 title="class or interface in java.util">Collection
+RegionStates.getRegionStateNodes() 
+
 
 (package private) http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in java.util">ArrayList
 RegionStates.getTableRegionStateNodes(TableName tableName) 
@@ -234,8 +230,7 @@
 
 
 RegionStates.ServerStateNode
-RegionStates.addRegionToServer(ServerName serverName,
- RegionStates.RegionStateNode regionNode) 
+RegionStates.addRegionToServer(RegionStates.RegionStateNode regionNode) 
 
 
 RegionStates.RegionFailedOpen
@@ -250,158 +245,158 @@
 RegionStates.RegionStateNode.compareTo(RegionStates.RegionStateNode other) 
 
 
-private RegionState
-RegionStates.createRegionState(RegionStates.RegionStateNode node) 
-
-
 protected void
 UnassignProcedure.finishTransition(MasterProcedureEnv env,
 RegionStates.RegionStateNode regionNode) 
 
-
+
 protected abstract void
 RegionTransitionProcedure.finishTransition(MasterProcedureEnv env,
 RegionStates.RegionStateNode regionNode) 
 
-
+
 protected void
 AssignProcedure.finishTransition(MasterProcedureEnv env,
 RegionStates.RegionStateNode regionNode) 
 
-
+
 private void
 AssignProcedure.handleFailure(MasterProcedureEnv env,
  RegionStates.RegionStateNode regionNode)
 Called when dispatch or subsequent OPEN request fail.
 
 
-
+
 (package private) boolean
 RegionStates.include(RegionStates.RegionStateNode node,
boolean offline)
 Utility.
 
 
-
+
 private boolean
 AssignProcedure.incrementAndCheckMaxAttempts(MasterProcedureEnv env,
 RegionStates.RegionStateNode regionNode) 
 
-
+
 protected boolean
 RegionTransitionProcedure.isServerOnline(MasterProcedureEnv env,
   RegionStates.RegionStateNode regionNode) 
 
-
+
 void
 AssignmentManager.markRegionAsClosed(RegionStates.RegionStateNode regionNode) 
 
-
+
 void
 AssignmentManager.markRegionAsClosing(Regi

[45/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index ae49b24..d419b25 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Checkstyle Results
 
@@ -286,10 +286,10 @@
  Warnings
  Errors
 
-3437
+3440
 0
 0
-20803
+20775
 
 Files
 
@@ -2487,7 +2487,7 @@
 org/apache/hadoop/hbase/client/TestRegionInfoDisplay.java
 0
 0
-16
+13
 
 org/apache/hadoop/hbase/client/TestReplicaWithCluster.java
 0
@@ -4059,960 +4059,940 @@
 0
 1
 
-org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutputHelper.java
-0
-0
-2
-
-org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.java
-0
-0
-4
-
 org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java
 0
 0
-7
+3
 
 org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java
 0
 0
 3
 
-org/apache/hadoop/hbase/io/asyncfs/TestFanOutOneBlockAsyncDFSOutput.java
+org/apache/hadoop/hbase/io/asyncfs/TestLocalAsyncOutput.java
 0
 0
 1
 
-org/apache/hadoop/hbase/io/asyncfs/TestLocalAsyncOutput.java
+org/apache/hadoop/hbase/io/asyncfs/TestSendBufSizePredictor.java
 0
 0
 1
 
-org/apache/hadoop/hbase/io/asyncfs/TestSaslFanOutOneBlockAsyncDFSOutput.java
-0
-0
-2
-
 org/apache/hadoop/hbase/io/compress/Compression.java
 0
 0
 10
-
+
 org/apache/hadoop/hbase/io/compress/ReusableStreamGzipCodec.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/io/crypto/Cipher.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/io/crypto/Context.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/io/crypto/CryptoCipherProvider.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/io/crypto/Decryptor.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/io/crypto/DefaultCipherProvider.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/io/crypto/Encryption.java
 0
 0
 54
-
+
 org/apache/hadoop/hbase/io/crypto/Encryptor.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/io/crypto/KeyProvider.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/io/crypto/KeyProviderForTesting.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/io/crypto/TestEncryption.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/io/crypto/aes/AES.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/io/crypto/aes/AESDecryptor.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/io/crypto/aes/AESEncryptor.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/io/crypto/aes/CommonsCryptoAES.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/io/crypto/aes/CommonsCryptoAESDecryptor.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/io/crypto/aes/CommonsCryptoAESEncryptor.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/io/crypto/aes/CryptoAES.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/io/crypto/aes/TestCommonsAES.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/io/encoding/AbstractDataBlockEncoder.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java
 0
 0
 8
-
+
 org/apache/hadoop/hbase/io/encoding/CompressionState.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/io/encoding/CopyKeyDataBlockEncoder.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java
 0
 0
 16
-
+
 org/apache/hadoop/hbase/io/encoding/DataBlockEncoding.java
 0
 0
 11
-
+
 org/apache/hadoop/hbase/io/encoding/DiffKeyDeltaEncoder.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/io/encoding/FastDiffDeltaEncoder.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/io/encoding/HFileBlockDecodingContext.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/io/encoding/HFileBlockDefaultDecodingContext.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/io/encoding/HFileBlockDefaultEncodingContext.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/io/encoding/HFileBlockEncodingContext.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/io/encoding/NoneEncoder.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/io/encoding/PrefixKeyDeltaEncoder.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/io/encoding/RowIndexCodecV1.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/io/encoding/RowIndexEncoderV1.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/io/encoding/RowIndexSeekerV1.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/io/encoding/TestBufferedDataBlockEncoder.java
 0
 0
 7
-
+
 org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java
 0
 0
 11
-
+
 org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/io/encoding/TestLoadAndSwitchEncodeOnDisk.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/io/encoding/TestSeekToBlockWithEncoders.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/io/hadoopbackport/TestThrottledInputStream.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java
 0
 0
 6
-
+
 org/apache/hadoop/hbase/io/hfile/BlockCache.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/io/hfile/BlockC

[35/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/org/apache/hadoop/hbase/master/RegionState.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/RegionState.html 
b/devapidocs/org/apache/hadoop/hbase/master/RegionState.html
index 130b3a6..1a0cb4e 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/RegionState.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/RegionState.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":9,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":9,"i39":10,"i40":10,"i41":10};
+var methods = 
{"i0":10,"i1":9,"i2":9,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":9,"i40":10,"i41":10,"i42":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class RegionState
+public class RegionState
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 State of a Region while undergoing transitions.
  This class is immutable.
@@ -186,23 +186,19 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 Constructor and Description
 
 
-RegionState(RegionInfo region,
-   RegionState.State state) 
-
-
 RegionState(RegionInfo region,
RegionState.State state,
long stamp,
ServerName serverName) 
 
-
+
 RegionState(RegionInfo region,
RegionState.State state,
long stamp,
ServerName serverName,
long ritDuration) 
 
-
+
 RegionState(RegionInfo region,
RegionState.State state,
ServerName serverName) 
@@ -235,178 +231,183 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 
+static RegionState
+createForTesting(RegionInfo region,
+RegionState.State state) 
+
+
 boolean
 equals(http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object obj)
 Check if two states are the same, except timestamp
 
 
-
+
 RegionInfo
 getRegion() 
 
-
+
 long
 getRitDuration() 
 
-
+
 ServerName
 getServerName() 
 
-
+
 long
 getStamp() 
 
-
+
 RegionState.State
 getState() 
 
-
+
 int
 hashCode()
 Don't count timestamp in hash code calculation
 
 
-
+
 boolean
 isClosed() 
 
-
+
 boolean
 isClosing() 
 
-
+
 boolean
 isClosingOrClosedOnServer(ServerName sn) 
 
-
+
 boolean
 isFailedClose() 
 
-
+
 boolean
 isFailedOpen() 
 
-
+
 boolean
 isMerged() 
 
-
+
 boolean
 isMerging() 
 
-
+
 boolean
 isMergingNew() 
 
-
+
 boolean
 isMergingNewOnServer(ServerName sn) 
 
-
+
 boolean
 isMergingNewOrOfflineOnServer(ServerName sn) 
 
-
+
 boolean
 isMergingNewOrOpenedOnServer(ServerName sn) 
 
-
+
 boolean
 isMergingOnServer(ServerName sn) 
 
-
+
 boolean
 isOffline() 
 
-
+
 boolean
 isOnServer(ServerName sn) 
 
-
+
 boolean
 isOpened() 
 
-
+
 boolean
 isOpenedOnServer(ServerName sn) 
 
-
+
 boolean
 isOpening() 
 
-
+
 boolean
 isOpeningOrFailedOpenOnServer(ServerName sn) 
 
-
+
 boolean
 isOpeningOrOpenedOnServer(ServerName sn) 
 
-
+
 boolean
 isReadyToOffline()
 Check if a region state can transition to offline
 
 
-
+
 boolean
 isReadyToOnline()
 Check if a region state can transition to online
 
 
-
+
 boolean
 isSplit() 
 
-
+
 boolean
 isSplitting() 
 
-
+
 boolean
 isSplittingNew() 
 
-
+
 boolean
 isSplittingNewOnServer(ServerName sn) 
 
-
+
 boolean
 isSplittingOnServer(ServerName sn) 
 
-
+
 boolean
 isSplittingOrOpenedOnServer(ServerName sn) 
 
-
+
 boolean
 isSplittingOrSplitOnServer(ServerName sn) 
 
-
+
 boolean
 isUnassignable()
 Check if a region state is one of offline states that
  can't transition to pending_close/closing (unassign/offline)
 
 
-
+
 static boolean
 isUnassignable(RegionState.State state)
 Check if a region state is one of offline states that
  can't transition to pending_close/closing (unassign/offline)
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 toDescriptiveString()
 A slower (but more easy-to-read) stringification
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/lang/

[49/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/apidocs/constant-values.html
--
diff --git a/apidocs/constant-values.html b/apidocs/constant-values.html
index db62141..e40e802 100644
--- a/apidocs/constant-values.html
+++ b/apidocs/constant-values.html
@@ -798,7 +798,7 @@
 
 public static final int
 DEFAULT_HBASE_CLIENT_RETRIES_NUMBER
-35
+10
 
 
 
@@ -822,572 +822,586 @@
 6
 
 
+
+
+public static final int
+DEFAULT_HBASE_CLIENT_SERVERSIDE_RETRIES_MULTIPLIER
+3
+
+
 
 
 public static final double
 DEFAULT_HBASE_MASTER_BALANCER_MAX_RIT_PERCENT
 1.0
 
-
+
 
 
 public static final int
 DEFAULT_HBASE_META_BLOCK_SIZE
 8192
 
-
+
 
 
 public static final int
 DEFAULT_HBASE_META_SCANNER_CACHING
 100
 
-
+
 
 
 public static final int
 DEFAULT_HBASE_META_VERSIONS
 3
 
-
+
 
 
 public static final int
 DEFAULT_HBASE_RPC_SHORTOPERATION_TIMEOUT
 1
 
-
+
 
 
 public static final int
 DEFAULT_HBASE_RPC_TIMEOUT
 6
 
-
+
 
 
 public static final int
 DEFAULT_HBASE_SERVER_PAUSE
 1000
 
-
+
 
 
 public static final long
 DEFAULT_HBASE_SERVER_SCANNER_MAX_RESULT_SIZE
 104857600L
 
-
+
 
 
 public static final int
 DEFAULT_HEALTH_FAILURE_THRESHOLD
 3
 
-
+
 
 
 public static final long
 DEFAULT_HEALTH_SCRIPT_TIMEOUT
 6L
 
-
+
 
 
 public static final float
 DEFAULT_HEAP_OCCUPANCY_HIGH_WATERMARK
 0.980190734863f
 
-
+
 
 
 public static final float
 DEFAULT_HEAP_OCCUPANCY_LOW_WATERMARK
 0.94988079071f
 
-
+
 
 
 public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 DEFAULT_HOST
 "0.0.0.0"
 
-
+
 
 
 public static final boolean
 DEFAULT_HREGION_EDITS_REPLAY_SKIP_ERRORS
 false
 
-
+
 
 
 public static final int
 DEFAULT_HREGION_MEMSTORE_BLOCK_MULTIPLIER
 4
 
-
+
 
 
 public static final int
 DEFAULT_HSTORE_OPEN_AND_CLOSE_THREADS_MAX
 1
 
-
+
 
 
 public static final int
 DEFAULT_MASTER_INFOPORT
 16010
 
-
+
 
 
 public static final int
 DEFAULT_MASTER_PORT
 16000
 
-
+
 
 
 public static final boolean
 DEFAULT_MASTER_TYPE_BACKUP
 false
 
-
+
 
 
 public static final long
 DEFAULT_MAX_FILE_SIZE
 10737418240L
 
-
+
 
 
 public static final int
 DEFAULT_META_REPLICA_NUM
 1
 
-
+
 
 
 public static final double
 DEFAULT_REGION_SERVER_HANDLER_ABORT_ON_ERROR_PERCENT
 0.5
 
-
+
 
 
 public static final int
 DEFAULT_REGION_SERVER_HANDLER_COUNT
 30
 
-
+
 
 
 public static final int
 DEFAULT_REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT
 20
 
-
+
 
 
 public static final int
 DEFAULT_REGION_SERVER_REPLICATION_HANDLER_COUNT
 3
 
-
+
 
 
 public static final int
 DEFAULT_REGIONSERVER_INFOPORT
 16030
 
-
+
 
 
 public static final long
 DEFAULT_REGIONSERVER_METRICS_PERIOD
 5000L
 
-
+
 
 
 public static final int
 DEFAULT_REGIONSERVER_PORT
 16020
 
-
+
 
 
 public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 DEFAULT_SNAPSHOT_RESTORE_FAILSAFE_NAME
 "hbase-failsafe-{snapshot.name}-{restore.timestamp}"
 
-
+
 
 
 public static final boolean
 DEFAULT_SNAPSHOT_RESTORE_TAKE_FAILSAFE_SNAPSHOT
 true
 
-
+
 
 
 public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 DEFAULT_STATUS_MULTICAST_ADDRESS
 "226.1.1.3"
 
-
+
 
 
 public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 DEFAULT_STATUS_MULTICAST_BIND_ADDRESS
 "0.0.0.0"
 
-
+
 
 
 public static final int
 DEFAULT_STATUS_MULTICAST_PORT
 16100
 
-
+
 
 
 public static final int
 DEFAULT_THREAD_WAKE_FREQUENCY
 1
 
-
+
 
 
 public static final boolean
 DEFAULT_USE_META_REPLICAS
 false
 
-
+
 
 
 public static final int
 DEFAULT_VERSION_FILE_WRITE_ATTEMPTS
 3
 
-
+
 
 
 public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 DEFAULT_WAL_STORAGE_POLICY
 "HOT"
 
-
+
 
 
 public static final int
 DEFAULT_ZK_SESSION_TIMEOUT
 9
 
-
+
 
 
 public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 DEFAULT_ZOOKEEPER_ZNODE_PARENT
 "/hbase"
 
-
+
 
 
 public static final int
 DEFAULT_ZOOKEPER_CLIENT_PORT
 2181
 
-
+
 
 
 public static final int
 DEFAULT_ZOOKEPER_MAX_CLIENT_CNXNS
 300
 
-
+
 
 
 public static final int
 DELIMITER
 44
 
-
+
 
 
 public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 ENABLE_CLIENT_BACKPRESSURE
 "hbase.client.backpressure.enabled"
 
-
+
 
 
 public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 ENABLE_DATA_FILE_UMASK

[37/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/org/apache/hadoop/hbase/io/asyncfs/class-use/FanOutOneBlockAsyncDFSOutput.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/asyncfs/class-use/FanOutOneBlockAsyncDFSOutput.html
 
b/devapidocs/org/apache/hadoop/hbase/io/asyncfs/class-use/FanOutOneBlockAsyncDFSOutput.html
index d9e4e55..7ad1b14 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/asyncfs/class-use/FanOutOneBlockAsyncDFSOutput.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/asyncfs/class-use/FanOutOneBlockAsyncDFSOutput.html
@@ -104,26 +104,26 @@
 
 
 static FanOutOneBlockAsyncDFSOutput
-FanOutOneBlockAsyncDFSOutputHelper.createOutput(org.apache.hadoop.hdfs.DistributedFileSystem dfs,
+FanOutOneBlockAsyncDFSOutputHelper.createOutput(org.apache.hadoop.hdfs.DistributedFileSystem dfs,
 org.apache.hadoop.fs.Path f,
 boolean overwrite,
 boolean createParent,
 short replication,
 long blockSize,
-
org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoop eventLoop,
+
org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoopGroup eventLoopGroup,
 http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true";
 title="class or interface in java.lang">Class channelClass)
 Create a FanOutOneBlockAsyncDFSOutput.
 
 
 
 private static FanOutOneBlockAsyncDFSOutput
-FanOutOneBlockAsyncDFSOutputHelper.createOutput(org.apache.hadoop.hdfs.DistributedFileSystem dfs,
+FanOutOneBlockAsyncDFSOutputHelper.createOutput(org.apache.hadoop.hdfs.DistributedFileSystem dfs,
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String src,
 boolean overwrite,
 boolean createParent,
 short replication,
 long blockSize,
-
org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoop eventLoop,
+
org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoopGroup eventLoopGroup,
 http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true";
 title="class or interface in java.lang">Class channelClass) 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/org/apache/hadoop/hbase/io/asyncfs/class-use/SendBufSizePredictor.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/asyncfs/class-use/SendBufSizePredictor.html
 
b/devapidocs/org/apache/hadoop/hbase/io/asyncfs/class-use/SendBufSizePredictor.html
new file mode 100644
index 000..cd30820
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/asyncfs/class-use/SendBufSizePredictor.html
@@ -0,0 +1,165 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+
+
+
+Uses of Class org.apache.hadoop.hbase.io.asyncfs.SendBufSizePredictor 
(Apache HBase 3.0.0-SNAPSHOT API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+
+
+
+Uses of 
Classorg.apache.hadoop.hbase.io.asyncfs.SendBufSizePredictor
+
+
+
+
+
+Packages that use SendBufSizePredictor 
+
+Package
+Description
+
+
+
+org.apache.hadoop.hbase.io.asyncfs
+ 
+
+
+
+
+
+
+
+
+
+Uses of SendBufSizePredictor 
in org.apache.hadoop.hbase.io.asyncfs
+
+Fields in org.apache.hadoop.hbase.io.asyncfs
 declared as SendBufSizePredictor 
+
+Modifier and Type
+Field and Description
+
+
+
+private SendBufSizePredictor
+FanOutOneBlockAsyncDFSOutput.sendBufSizePRedictor 
+
+
+
+
+
+
+
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+
+
+Copyright © 2007–2017 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
+
+

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/org/apache/hadoop/hb

[22/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
index 01c0791..4d26b63 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
@@ -16,11 +16,11 @@
 008@InterfaceAudience.Private
 009public class Version {
 010  public static final String version = 
"3.0.0-SNAPSHOT";
-011  public static final String revision = 
"8b32d3792934507c774997cd82dc061b75410f83";
+011  public static final String revision = 
"6a6409a30aa634875467683203de0e21e0491986";
 012  public static final String user = 
"jenkins";
-013  public static final String date = "Wed 
Nov 29 14:42:11 UTC 2017";
+013  public static final String date = "Thu 
Nov 30 14:42:34 UTC 2017";
 014  public static final String url = 
"git://asf920.gq1.ygridcore.net/home/jenkins/jenkins-slave/workspace/hbase_generate_website/hbase";
-015  public static final String srcChecksum 
= "aea71cf3474c2eecf646181c5e4c0fa3";
+015  public static final String srcChecksum 
= "252c37b6e2a91e50595f45109cbc77dd";
 016}
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.MasterlessConnection.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.MasterlessConnection.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.MasterlessConnection.html
index 1bddf29..f667b93 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.MasterlessConnection.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.MasterlessConnection.html
@@ -124,380 +124,381 @@
 116  
HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
 117// Go big. Multiply by 10. If we 
can't get to meta after this many retries
 118// then something seriously wrong.
-119int serversideMultiplier = 
c.getInt("hbase.client.serverside.retries.multiplier", 10);
-120int retries = hcRetries * 
serversideMultiplier;
-121
c.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, retries);
-122log.info(sn + " server-side 
Connection retries=" + retries);
-123  }
-124
-125  /**
-126   * A ClusterConnection that will 
short-circuit RPC making direct invocations against the
-127   * localhost if the invocation target 
is 'this' server; save on network and protobuf
-128   * invocations.
-129   */
-130  // TODO This has to still do PB 
marshalling/unmarshalling stuff. Check how/whether we can avoid.
-131  @VisibleForTesting // Class is visible 
so can assert we are short-circuiting when expected.
-132  public static class 
ShortCircuitingClusterConnection extends ConnectionImplementation {
-133private final ServerName 
serverName;
-134private final 
AdminService.BlockingInterface localHostAdmin;
-135private final 
ClientService.BlockingInterface localHostClient;
-136
-137private 
ShortCircuitingClusterConnection(Configuration conf, ExecutorService pool, User 
user,
-138ServerName serverName, 
AdminService.BlockingInterface admin,
-139ClientService.BlockingInterface 
client)
-140throws IOException {
-141  super(conf, pool, user);
-142  this.serverName = serverName;
-143  this.localHostAdmin = admin;
-144  this.localHostClient = client;
-145}
-146
-147@Override
-148public AdminService.BlockingInterface 
getAdmin(ServerName sn) throws IOException {
-149  return serverName.equals(sn) ? 
this.localHostAdmin : super.getAdmin(sn);
-150}
-151
-152@Override
-153public 
ClientService.BlockingInterface getClient(ServerName sn) throws IOException {
-154  return serverName.equals(sn) ? 
this.localHostClient : super.getClient(sn);
-155}
-156
-157@Override
-158public MasterKeepAliveConnection 
getKeepAliveMasterService() throws MasterNotRunningException {
-159  if (this.localHostClient instanceof 
MasterService.BlockingInterface) {
-160return new 
ShortCircuitMasterConnection((MasterService.BlockingInterface)this.localHostClient);
-161  }
-162  return 
super.getKeepAliveMasterService();
-163}
-164  }
-165
-166  /**
-167   * Creates a short-circuit connection 
that can bypass the RPC layer (serialization,
-168   * deserialization, networking, etc..) 
when talking to a local server.
-169   * @param conf the current 
configuration
-170   * @param pool the thread pool to use 
for batch operations
-171   * @param user the user the connection 
is for
-172   * @param serverName the local server 
name
-173   * @param admin the admin interface of 
the local server
-174   * @param client the client interface 
of the local server
-175   * @

[12/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.ChecksumCreater.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.ChecksumCreater.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.ChecksumCreater.html
index 25e368d..d0f781f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.ChecksumCreater.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.ChecksumCreater.html
@@ -25,798 +25,798 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.asyncfs;
 019
-020import static 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelOption.CONNECT_TIMEOUT_MILLIS;
-021import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
-022import static 
org.apache.hadoop.fs.CreateFlag.CREATE;
-023import static 
org.apache.hadoop.fs.CreateFlag.OVERWRITE;
-024import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor;
-025import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.trySaslNegotiate;
+020import static 
org.apache.hadoop.fs.CreateFlag.CREATE;
+021import static 
org.apache.hadoop.fs.CreateFlag.OVERWRITE;
+022import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor;
+023import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.trySaslNegotiate;
+024import static 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelOption.CONNECT_TIMEOUT_MILLIS;
+025import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
 026import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
 027import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME;
 028import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT;
 029import static 
org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage.PIPELINE_SETUP_CREATE;
 030
-031import 
org.apache.hadoop.hbase.shaded.com.google.common.base.Throwables;
-032import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableMap;
-033import 
com.google.protobuf.CodedOutputStream;
-034
-035import 
org.apache.hadoop.hbase.shaded.io.netty.bootstrap.Bootstrap;
-036import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBuf;
-037import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufAllocator;
-038import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufOutputStream;
-039import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.PooledByteBufAllocator;
-040import 
org.apache.hadoop.hbase.shaded.io.netty.channel.Channel;
-041import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelFuture;
-042import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelFutureListener;
-043import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandler;
-044import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerContext;
-045import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelInitializer;
-046import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelPipeline;
-047import 
org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoop;
-048import 
org.apache.hadoop.hbase.shaded.io.netty.channel.SimpleChannelInboundHandler;
-049import 
org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufDecoder;
-050import 
org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
-051import 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateEvent;
-052import 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateHandler;
-053import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Future;
-054import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.FutureListener;
-055import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Promise;
-056
-057import java.io.IOException;
-058import 
java.lang.reflect.InvocationTargetException;
-059import java.lang.reflect.Method;
-060import java.util.ArrayList;
-061import java.util.EnumSet;
-062import java.util.List;
-063import java.util.concurrent.TimeUnit;
-064
-065import org.apache.commons.logging.Log;
-066import 
org.apache.commons.logging.LogFactory;
-067import 
org.apache.hadoop.conf.Configuration;
-068import 
org.apache.hadoop.crypto.CryptoProtocolVersion;
-069import 
org.apache.hadoop.crypto.Encryptor;
-070import org.apache.hadoop.fs.CreateFlag;
-071import org.apache.hadoop.fs.FileSystem;
-072import 
org.apache.hadoop.fs.FileSystemLinkResolver;
-073import org.apache.hadoop.fs.Path;
-074import 
org.apache.hadoop.fs.UnresolvedLinkException;
-075import 
org.apache.hadoop.

[03/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/SendBufSizePredictor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/SendBufSizePredictor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/SendBufSizePredictor.html
new file mode 100644
index 000..8a522f0
--- /dev/null
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/SendBufSizePredictor.html
@@ -0,0 +1,129 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+Source code
+
+
+
+
+001/**
+002 * Licensed to the Apache Software 
Foundation (ASF) under one
+003 * or more contributor license 
agreements.  See the NOTICE file
+004 * distributed with this work for 
additional information
+005 * regarding copyright ownership.  The 
ASF licenses this file
+006 * to you under the Apache License, 
Version 2.0 (the
+007 * "License"); you may not use this file 
except in compliance
+008 * with the License.  You may obtain a 
copy of the License at
+009 *
+010 * 
http://www.apache.org/licenses/LICENSE-2.0
+011 *
+012 * Unless required by applicable law or 
agreed to in writing, software
+013 * distributed under the License is 
distributed on an "AS IS" BASIS,
+014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+015 * See the License for the specific 
language governing permissions and
+016 * limitations under the License.
+017 */
+018package 
org.apache.hadoop.hbase.io.asyncfs;
+019
+020import 
org.apache.yetus.audience.InterfaceAudience;
+021
+022/**
+023 * Used to predict the next send buffer 
size.
+024 */
+025@InterfaceAudience.Private
+026class SendBufSizePredictor {
+027
+028  // LIMIT is 128MB
+029  private static final int LIMIT = 128 * 
1024 * 1024;
+030
+031  // buf's initial capacity - 4KB
+032  private int capacity = 4 * 1024;
+033
+034  int initialSize() {
+035return capacity;
+036  }
+037
+038  int guess(int bytesWritten) {
+039// if the bytesWritten is greater 
than the current capacity
+040// always increase the capacity in 
powers of 2.
+041if (bytesWritten > this.capacity) 
{
+042  // Ensure we don't cross the 
LIMIT
+043  if ((this.capacity << 1) 
<= LIMIT) {
+044// increase the capacity in the 
range of power of 2
+045this.capacity = this.capacity 
<< 1;
+046  }
+047} else {
+048  // if we see that the bytesWritten 
is lesser we could again decrease
+049  // the capacity by dividing it by 2 
if the bytesWritten is satisfied by
+050  // that reduction
+051  if ((this.capacity >> 1) 
>= bytesWritten) {
+052this.capacity = this.capacity 
>> 1;
+053  }
+054}
+055return this.capacity;
+056  }
+057}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.html
new file mode 100644
index 000..48e79b7
--- /dev/null
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.html
@@ -0,0 +1,309 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+Source code
+
+
+
+
+001/**
+002 * Licensed to the Apache Software 
Foundation (ASF) under one
+003 * or more contributor license 
agreements.  See the NOTICE file
+004 * distributed with this work for 
additional information
+005 * regarding copyright ownership.  The 
ASF licenses this file
+006 * to you under the Apache License, 
Version 2.0 (the
+007 * "License"); you may not use this file 
except in compliance
+008 * with the License.  You may obtain a 
copy of the License at
+009 * 

+010 * http://www.apache.org/licenses/LICENSE-2.0 +011 *

+012 * Unless required by applicable law or agreed to in writing, software +013 * distributed under the License is distributed on an "AS IS" BASIS, +014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +015 * See the License for the specific language governing permissions and +016 * limitations under the License. +017 */ +018package org.apache.hadoop.hbase.ipc; +019 +020import java.io.IOException; +021import java.util.List; +022 +023import org.apache.hadoop.hbase.DoNotRetryIOException; +024import org.apache.hadoop.hbase.client.VersionInfoUtil; +025import org.apache.hadoop.hbase.exceptions.RequestTooBigException; +026import org.apache.yetus.audience.InterfaceAudience; +027 +028import org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBuf; +029import org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelFutureListener; +030import org.apache.hadoop.hbase.shaded.io.netty.channel.Channe


[14/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.BlockAdder.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.BlockAdder.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.BlockAdder.html
index 25e368d..d0f781f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.BlockAdder.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.BlockAdder.html
@@ -25,798 +25,798 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.asyncfs;
 019
-020import static 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelOption.CONNECT_TIMEOUT_MILLIS;
-021import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
-022import static 
org.apache.hadoop.fs.CreateFlag.CREATE;
-023import static 
org.apache.hadoop.fs.CreateFlag.OVERWRITE;
-024import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor;
-025import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.trySaslNegotiate;
+020import static 
org.apache.hadoop.fs.CreateFlag.CREATE;
+021import static 
org.apache.hadoop.fs.CreateFlag.OVERWRITE;
+022import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor;
+023import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.trySaslNegotiate;
+024import static 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelOption.CONNECT_TIMEOUT_MILLIS;
+025import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
 026import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
 027import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME;
 028import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT;
 029import static 
org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage.PIPELINE_SETUP_CREATE;
 030
-031import 
org.apache.hadoop.hbase.shaded.com.google.common.base.Throwables;
-032import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableMap;
-033import 
com.google.protobuf.CodedOutputStream;
-034
-035import 
org.apache.hadoop.hbase.shaded.io.netty.bootstrap.Bootstrap;
-036import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBuf;
-037import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufAllocator;
-038import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufOutputStream;
-039import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.PooledByteBufAllocator;
-040import 
org.apache.hadoop.hbase.shaded.io.netty.channel.Channel;
-041import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelFuture;
-042import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelFutureListener;
-043import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandler;
-044import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerContext;
-045import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelInitializer;
-046import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelPipeline;
-047import 
org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoop;
-048import 
org.apache.hadoop.hbase.shaded.io.netty.channel.SimpleChannelInboundHandler;
-049import 
org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufDecoder;
-050import 
org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
-051import 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateEvent;
-052import 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateHandler;
-053import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Future;
-054import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.FutureListener;
-055import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Promise;
-056
-057import java.io.IOException;
-058import 
java.lang.reflect.InvocationTargetException;
-059import java.lang.reflect.Method;
-060import java.util.ArrayList;
-061import java.util.EnumSet;
-062import java.util.List;
-063import java.util.concurrent.TimeUnit;
-064
-065import org.apache.commons.logging.Log;
-066import 
org.apache.commons.logging.LogFactory;
-067import 
org.apache.hadoop.conf.Configuration;
-068import 
org.apache.hadoop.crypto.CryptoProtocolVersion;
-069import 
org.apache.hadoop.crypto.Encryptor;
-070import org.apache.hadoop.fs.CreateFlag;
-071import org.apache.hadoop.fs.FileSystem;
-072import 
org.apache.hadoop.fs.FileSystemLinkResolver;
-073import org.apache.hadoop.fs.Path;
-074import 
org.apache.hadoop.fs.UnresolvedLinkException;
-075import 
org.apache.hadoop.fs.permission.FsPermissio

[25/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.RegionReplicaSinkWriter.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.RegionReplicaSinkWriter.html
 
b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.RegionReplicaSinkWriter.html
index 9947d82..168cbb4 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.RegionReplicaSinkWriter.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.RegionReplicaSinkWriter.html
@@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class RegionReplicaReplicationEndpoint.RegionReplicaSinkWriter
+static class RegionReplicaReplicationEndpoint.RegionReplicaSinkWriter
 extends WALSplitter.SinkWriter
 
 
@@ -236,7 +236,7 @@ extends 
 
 sink
-RegionReplicaReplicationEndpoint.RegionReplicaOutputSink
 sink
+RegionReplicaReplicationEndpoint.RegionReplicaOutputSink
 sink
 
 
 
@@ -245,7 +245,7 @@ extends 
 
 connection
-ClusterConnection connection
+ClusterConnection connection
 
 
 
@@ -254,7 +254,7 @@ extends 
 
 rpcControllerFactory
-RpcControllerFactory rpcControllerFactory
+RpcControllerFactory rpcControllerFactory
 
 
 
@@ -263,7 +263,7 @@ extends 
 
 rpcRetryingCallerFactory
-RpcRetryingCallerFactory 
rpcRetryingCallerFactory
+RpcRetryingCallerFactory 
rpcRetryingCallerFactory
 
 
 
@@ -272,7 +272,7 @@ extends 
 
 operationTimeout
-int operationTimeout
+int operationTimeout
 
 
 
@@ -281,7 +281,7 @@ extends 
 
 pool
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true";
 title="class or interface in java.util.concurrent">ExecutorService pool
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true";
 title="class or interface in java.util.concurrent">ExecutorService pool
 
 
 
@@ -290,7 +290,7 @@ extends 
 
 disabledAndDroppedTables
-org.apache.hadoop.hbase.shaded.com.google.common.cache.CacheBoolean> disabledAndDroppedTables
+org.apache.hadoop.hbase.shaded.com.google.common.cache.CacheBoolean> disabledAndDroppedTables
 
 
 
@@ -307,7 +307,7 @@ extends 
 
 RegionReplicaSinkWriter
-public RegionReplicaSinkWriter(RegionReplicaReplicationEndpoint.RegionReplicaOutputSink sink,
+public RegionReplicaSinkWriter(RegionReplicaReplicationEndpoint.RegionReplicaOutputSink sink,
ClusterConnection connection,
http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true";
 title="class or interface in 
java.util.concurrent">ExecutorService pool,
int operationTimeout)
@@ -327,7 +327,7 @@ extends 
 
 append
-public void append(TableName tableName,
+public void append(TableName tableName,
byte[] encodedRegionName,
byte[] row,
http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List entries)

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.RetryingRpcCallable.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.RetryingRpcCallable.html
 
b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.RetryingRpcCallable.html
index 546cf4f..4e5d3bd 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.RetryingRpcCallable.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.RetryingRpcCallable.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class RegionReplicaReplicationEndpoint.RetryingRpcCallable
+static class RegionReplicaReplicationEndpoint.RetryingRpcCallable
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Callable.html?is-external=true";
 title="class or interface in java.util.concurrent">Callable
 
@@ -216,7 +216,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/concurren
 
 
 fa

[07/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.PBHelper.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.PBHelper.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.PBHelper.html
index 25e368d..d0f781f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.PBHelper.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.PBHelper.html
@@ -25,798 +25,798 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.asyncfs;
 019
-020import static 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelOption.CONNECT_TIMEOUT_MILLIS;
-021import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
-022import static 
org.apache.hadoop.fs.CreateFlag.CREATE;
-023import static 
org.apache.hadoop.fs.CreateFlag.OVERWRITE;
-024import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor;
-025import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.trySaslNegotiate;
+020import static 
org.apache.hadoop.fs.CreateFlag.CREATE;
+021import static 
org.apache.hadoop.fs.CreateFlag.OVERWRITE;
+022import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor;
+023import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.trySaslNegotiate;
+024import static 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelOption.CONNECT_TIMEOUT_MILLIS;
+025import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
 026import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
 027import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME;
 028import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT;
 029import static 
org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage.PIPELINE_SETUP_CREATE;
 030
-031import 
org.apache.hadoop.hbase.shaded.com.google.common.base.Throwables;
-032import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableMap;
-033import 
com.google.protobuf.CodedOutputStream;
-034
-035import 
org.apache.hadoop.hbase.shaded.io.netty.bootstrap.Bootstrap;
-036import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBuf;
-037import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufAllocator;
-038import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufOutputStream;
-039import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.PooledByteBufAllocator;
-040import 
org.apache.hadoop.hbase.shaded.io.netty.channel.Channel;
-041import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelFuture;
-042import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelFutureListener;
-043import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandler;
-044import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerContext;
-045import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelInitializer;
-046import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelPipeline;
-047import 
org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoop;
-048import 
org.apache.hadoop.hbase.shaded.io.netty.channel.SimpleChannelInboundHandler;
-049import 
org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufDecoder;
-050import 
org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
-051import 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateEvent;
-052import 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateHandler;
-053import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Future;
-054import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.FutureListener;
-055import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Promise;
-056
-057import java.io.IOException;
-058import 
java.lang.reflect.InvocationTargetException;
-059import java.lang.reflect.Method;
-060import java.util.ArrayList;
-061import java.util.EnumSet;
-062import java.util.List;
-063import java.util.concurrent.TimeUnit;
-064
-065import org.apache.commons.logging.Log;
-066import 
org.apache.commons.logging.LogFactory;
-067import 
org.apache.hadoop.conf.Configuration;
-068import 
org.apache.hadoop.crypto.CryptoProtocolVersion;
-069import 
org.apache.hadoop.crypto.Encryptor;
-070import org.apache.hadoop.fs.CreateFlag;
-071import org.apache.hadoop.fs.FileSystem;
-072import 
org.apache.hadoop.fs.FileSystemLinkResolver;
-073import org.apache.hadoop.fs.Path;
-074import 
org.apache.hadoop.fs.UnresolvedLinkException;
-075import 
org.apache.hadoop.fs.permission.FsPermission;
-076imp

[21/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.ShortCircuitingClusterConnection.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.ShortCircuitingClusterConnection.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.ShortCircuitingClusterConnection.html
index 1bddf29..f667b93 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.ShortCircuitingClusterConnection.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.ShortCircuitingClusterConnection.html
@@ -124,380 +124,381 @@
 116  
HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
 117// Go big. Multiply by 10. If we 
can't get to meta after this many retries
 118// then something seriously wrong.
-119int serversideMultiplier = 
c.getInt("hbase.client.serverside.retries.multiplier", 10);
-120int retries = hcRetries * 
serversideMultiplier;
-121
c.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, retries);
-122log.info(sn + " server-side 
Connection retries=" + retries);
-123  }
-124
-125  /**
-126   * A ClusterConnection that will 
short-circuit RPC making direct invocations against the
-127   * localhost if the invocation target 
is 'this' server; save on network and protobuf
-128   * invocations.
-129   */
-130  // TODO This has to still do PB 
marshalling/unmarshalling stuff. Check how/whether we can avoid.
-131  @VisibleForTesting // Class is visible 
so can assert we are short-circuiting when expected.
-132  public static class 
ShortCircuitingClusterConnection extends ConnectionImplementation {
-133private final ServerName 
serverName;
-134private final 
AdminService.BlockingInterface localHostAdmin;
-135private final 
ClientService.BlockingInterface localHostClient;
-136
-137private 
ShortCircuitingClusterConnection(Configuration conf, ExecutorService pool, User 
user,
-138ServerName serverName, 
AdminService.BlockingInterface admin,
-139ClientService.BlockingInterface 
client)
-140throws IOException {
-141  super(conf, pool, user);
-142  this.serverName = serverName;
-143  this.localHostAdmin = admin;
-144  this.localHostClient = client;
-145}
-146
-147@Override
-148public AdminService.BlockingInterface 
getAdmin(ServerName sn) throws IOException {
-149  return serverName.equals(sn) ? 
this.localHostAdmin : super.getAdmin(sn);
-150}
-151
-152@Override
-153public 
ClientService.BlockingInterface getClient(ServerName sn) throws IOException {
-154  return serverName.equals(sn) ? 
this.localHostClient : super.getClient(sn);
-155}
-156
-157@Override
-158public MasterKeepAliveConnection 
getKeepAliveMasterService() throws MasterNotRunningException {
-159  if (this.localHostClient instanceof 
MasterService.BlockingInterface) {
-160return new 
ShortCircuitMasterConnection((MasterService.BlockingInterface)this.localHostClient);
-161  }
-162  return 
super.getKeepAliveMasterService();
-163}
-164  }
-165
-166  /**
-167   * Creates a short-circuit connection 
that can bypass the RPC layer (serialization,
-168   * deserialization, networking, etc..) 
when talking to a local server.
-169   * @param conf the current 
configuration
-170   * @param pool the thread pool to use 
for batch operations
-171   * @param user the user the connection 
is for
-172   * @param serverName the local server 
name
-173   * @param admin the admin interface of 
the local server
-174   * @param client the client interface 
of the local server
-175   * @return an short-circuit 
connection.
-176   * @throws IOException if IO failure 
occurred
-177   */
-178  public static ClusterConnection 
createShortCircuitConnection(final Configuration conf,
-179  ExecutorService pool, User user, 
final ServerName serverName,
-180  final 
AdminService.BlockingInterface admin, final ClientService.BlockingInterface 
client)
-181  throws IOException {
-182if (user == null) {
-183  user = 
UserProvider.instantiate(conf).getCurrent();
-184}
-185return new 
ShortCircuitingClusterConnection(conf, pool, user, serverName, admin, 
client);
-186  }
-187
-188  /**
-189   * Setup the connection class, so that 
it will not depend on master being online. Used for testing
-190   * @param conf configuration to set
-191   */
-192  @VisibleForTesting
-193  public static void 
setupMasterlessConnection(Configuration conf) {
-194
conf.set(ClusterConnection.HBASE_CLIENT_CONNECTION_IMPL, 
MasterlessConnection.class.getName());
-195  }
-196
-197  /**
-198   * Some tests shut down the master. But 
table availability is a master RPC which is performed on
-199   * region re-lookups.
-200   */
-201  static class MasterlessConnection 
extends ConnectionImplementation {
-202MasterlessConnect

[50/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/apache_hbase_reference_guide.pdf
--
diff --git a/apache_hbase_reference_guide.pdf b/apache_hbase_reference_guide.pdf
index aac8cae..85a97f7 100644
--- a/apache_hbase_reference_guide.pdf
+++ b/apache_hbase_reference_guide.pdf
@@ -5,16 +5,16 @@
 /Author (Apache HBase Team)
 /Creator (Asciidoctor PDF 1.5.0.alpha.15, based on Prawn 2.2.2)
 /Producer (Apache HBase Team)
-/ModDate (D:20171129144743+00'00')
-/CreationDate (D:20171129144743+00'00')
+/ModDate (D:20171130144806+00'00')
+/CreationDate (D:20171130144806+00'00')
 >>
 endobj
 2 0 obj
 << /Type /Catalog
 /Pages 3 0 R
 /Names 26 0 R
-/Outlines 4510 0 R
-/PageLabels 4734 0 R
+/Outlines 4515 0 R
+/PageLabels 4739 0 R
 /PageMode /UseOutlines
 /OpenAction [7 0 R /FitH 842.89]
 /ViewerPreferences << /DisplayDocTitle true
@@ -23,8 +23,8 @@ endobj
 endobj
 3 0 obj
 << /Type /Pages
-/Count 699
-/Kids [7 0 R 12 0 R 14 0 R 16 0 R 18 0 R 20 0 R 22 0 R 24 0 R 44 0 R 47 0 R 50 
0 R 54 0 R 63 0 R 66 0 R 69 0 R 71 0 R 76 0 R 80 0 R 83 0 R 89 0 R 91 0 R 94 0 
R 96 0 R 103 0 R 109 0 R 114 0 R 116 0 R 130 0 R 133 0 R 142 0 R 151 0 R 161 0 
R 170 0 R 181 0 R 185 0 R 187 0 R 191 0 R 202 0 R 209 0 R 218 0 R 226 0 R 231 0 
R 240 0 R 248 0 R 257 0 R 271 0 R 278 0 R 287 0 R 296 0 R 304 0 R 311 0 R 319 0 
R 325 0 R 332 0 R 339 0 R 347 0 R 356 0 R 366 0 R 378 0 R 387 0 R 394 0 R 401 0 
R 409 0 R 417 0 R 426 0 R 435 0 R 443 0 R 451 0 R 463 0 R 471 0 R 478 0 R 486 0 
R 494 0 R 502 0 R 511 0 R 516 0 R 519 0 R 524 0 R 528 0 R 544 0 R 554 0 R 559 0 
R 573 0 R 579 0 R 584 0 R 586 0 R 588 0 R 593 0 R 601 0 R 607 0 R 612 0 R 617 0 
R 628 0 R 639 0 R 644 0 R 652 0 R 656 0 R 660 0 R 662 0 R 677 0 R 691 0 R 701 0 
R 703 0 R 705 0 R 714 0 R 726 0 R 736 0 R 744 0 R 750 0 R 753 0 R 757 0 R 761 0 
R 764 0 R 767 0 R 769 0 R 772 0 R 776 0 R 778 0 R 783 0 R 787 0 R 792 0 R 796 0 
R 800 0 R 806 0 R 808 0 R 812 0 R 821 0 
 R 823 0 R 826 0 R 830 0 R 833 0 R 836 0 R 850 0 R 857 0 R 865 0 R 876 0 R 882 
0 R 894 0 R 898 0 R 901 0 R 905 0 R 908 0 R 913 0 R 922 0 R 930 0 R 934 0 R 938 
0 R 943 0 R 947 0 R 949 0 R 964 0 R 975 0 R 980 0 R 987 0 R 990 0 R 999 0 R 
1008 0 R 1012 0 R 1017 0 R 1022 0 R 1024 0 R 1026 0 R 1028 0 R 1038 0 R 1046 0 
R 1050 0 R 1057 0 R 1064 0 R 1072 0 R 1077 0 R 1082 0 R 1087 0 R 1095 0 R 1099 
0 R 1104 0 R 1106 0 R 1113 0 R 1119 0 R 1121 0 R 1128 0 R 1138 0 R 1142 0 R 
1144 0 R 1146 0 R 1150 0 R 1153 0 R 1158 0 R 1161 0 R 1173 0 R 1177 0 R 1183 0 
R 1191 0 R 1196 0 R 1200 0 R 1204 0 R 1206 0 R 1209 0 R 1212 0 R 1215 0 R 1220 
0 R 1224 0 R 1228 0 R 1233 0 R 1237 0 R 1241 0 R 1243 0 R 1253 0 R 1256 0 R 
1264 0 R 1273 0 R 1279 0 R 1283 0 R 1285 0 R 1295 0 R 1298 0 R 1304 0 R 1313 0 
R 1316 0 R 1323 0 R 1331 0 R 1333 0 R 1335 0 R 1344 0 R 1346 0 R 1348 0 R 1351 
0 R 1353 0 R 1355 0 R 1357 0 R 1359 0 R 1362 0 R 1366 0 R 1371 0 R 1373 0 R 
1375 0 R 1377 0 R 1382 0 R 1389 0 R 1395 0 R 1398 0 R 1400 0 
 R 1403 0 R 1407 0 R 1409 0 R 1412 0 R 1414 0 R 1416 0 R 1419 0 R 1424 0 R 1430 
0 R 1438 0 R 1452 0 R 1466 0 R 1469 0 R 1474 0 R 1487 0 R 1496 0 R 1510 0 R 
1516 0 R 1525 0 R 1540 0 R 1554 0 R 1566 0 R 1571 0 R 1577 0 R 1588 0 R 1594 0 
R 1599 0 R 1607 0 R 1610 0 R 1619 0 R 1626 0 R 1630 0 R 1643 0 R 1645 0 R 1651 
0 R 1655 0 R 1657 0 R 1665 0 R 1673 0 R 1677 0 R 1679 0 R 1681 0 R 1693 0 R 
1699 0 R 1708 0 R 1714 0 R 1728 0 R 1733 0 R 1742 0 R 1750 0 R 1756 0 R 1763 0 
R 1767 0 R 1770 0 R 1772 0 R 1780 0 R 1784 0 R 1790 0 R 1794 0 R 1802 0 R 1807 
0 R 1813 0 R 1818 0 R 1820 0 R 1829 0 R 1836 0 R 1842 0 R 1847 0 R 1851 0 R 
1854 0 R 1859 0 R 1865 0 R 1872 0 R 1874 0 R 1876 0 R 1879 0 R 1887 0 R 1890 0 
R 1897 0 R 1906 0 R 1909 0 R 1914 0 R 1916 0 R 1919 0 R 1922 0 R 1925 0 R 1932 
0 R 1937 0 R 1939 0 R 1947 0 R 1954 0 R 1961 0 R 1967 0 R 1972 0 R 1974 0 R 
1983 0 R 1993 0 R 2003 0 R 2009 0 R 2017 0 R 2019 0 R 2022 0 R 2024 0 R 2027 0 
R 2030 0 R 2033 0 R 2038 0 R 2042 0 R 2053 0 R 2056 0 R 2061 
 0 R 2064 0 R 2066 0 R 2071 0 R 2081 0 R 2083 0 R 2085 0 R 2087 0 R 2089 0 R 
2092 0 R 2094 0 R 2096 0 R 2099 0 R 2101 0 R 2103 0 R 2107 0 R 2112 0 R 2121 0 
R 2123 0 R 2125 0 R 2132 0 R 2134 0 R 2139 0 R 2141 0 R 2143 0 R 2150 0 R 2155 
0 R 2159 0 R 2163 0 R 2167 0 R 2169 0 R 2171 0 R 2175 0 R 2178 0 R 2180 0 R 
2182 0 R 2186 0 R 2188 0 R 2191 0 R 2193 0 R 2195 0 R 2197 0 R 2204 0 R 2207 0 
R 2212 0 R 2214 0 R 2216 0 R 2218 0 R 2220 0 R 2228 0 R 2239 0 R 2253 0 R 2264 
0 R 2268 0 R 2274 0 R 2278 0 R 2281 0 R 2286 0 R 2292 0 R 2294 0 R 2297 0 R 
2299 0 R 2301 0 R 2303 0 R 2307 0 R 2309 0 R 2322 0 R 2325 0 R 2333 0 R 2339 0 
R 2351 0 R 2365 0 R 2379 0 R 2396 0 R 2400 0 R 2402 0 R 2406 0 R 2424 0 R 2430 
0 R 2442 0 R 2446 0 R 2450 0 R 2459 0 R 2469 0 R 2474 0 R 2485 0 R 2498 0 R 
2516 0 R 2525 0 R 2528 0 R 2537 0 R 2555 0 R 2562 0 R 2565 0 R 2570 0 R 2574 0 
R 2577 0 R 2586 0 R 2595 0 R 2598 0 R 2600 0 R 2604 0 R 2619 0 R 2627 0 R 2632 
0 R 2637 0 R 2640 0 R 2642 0 R 2644 

[16/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.State.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.State.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.State.html
index de6cb11..dd54dd2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.State.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.State.html
@@ -25,563 +25,558 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.asyncfs;
 019
-020import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
-021import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.WRITER_IDLE;
-022import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.HEART_BEAT_SEQNO;
-023import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.READ_TIMEOUT;
-024import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile;
-025import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.endFileLease;
-026import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.getStatus;
+020import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.HEART_BEAT_SEQNO;
+021import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.READ_TIMEOUT;
+022import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile;
+023import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.endFileLease;
+024import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.getStatus;
+025import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
+026import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.WRITER_IDLE;
 027import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
 028
-029
-030import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBuf;
-031import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufAllocator;
-032import 
org.apache.hadoop.hbase.shaded.io.netty.channel.Channel;
-033import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandler.Sharable;
-034import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerContext;
-035import 
org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoop;
-036import 
org.apache.hadoop.hbase.shaded.io.netty.channel.SimpleChannelInboundHandler;
-037import 
org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufDecoder;
-038import 
org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
-039import 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateEvent;
-040import 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateHandler;
-041import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Future;
-042import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Promise;
-043import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.PromiseCombiner;
-044
-045import java.io.IOException;
-046import java.nio.ByteBuffer;
-047import java.util.ArrayDeque;
-048import java.util.Collection;
-049import java.util.Collections;
-050import java.util.Deque;
-051import java.util.IdentityHashMap;
-052import java.util.List;
-053import java.util.Set;
-054import 
java.util.concurrent.CompletableFuture;
-055import java.util.concurrent.TimeUnit;
-056import java.util.function.Supplier;
-057
-058import 
org.apache.hadoop.conf.Configuration;
-059import 
org.apache.hadoop.crypto.Encryptor;
-060import org.apache.hadoop.fs.Path;
+029import java.io.IOException;
+030import java.io.InterruptedIOException;
+031import java.nio.ByteBuffer;
+032import java.util.Collection;
+033import java.util.Collections;
+034import java.util.Iterator;
+035import java.util.List;
+036import java.util.Set;
+037import 
java.util.concurrent.CompletableFuture;
+038import 
java.util.concurrent.ConcurrentHashMap;
+039import 
java.util.concurrent.ConcurrentLinkedDeque;
+040import 
java.util.concurrent.ExecutionException;
+041import java.util.concurrent.TimeUnit;
+042import java.util.function.Supplier;
+043
+044import 
org.apache.hadoop.conf.Configuration;
+045import 
org.apache.hadoop.crypto.Encryptor;
+046import org.apache.hadoop.fs.Path;
+047import 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.CancelOnClose;
+048import 
org.apache.hadoop.hbase.util.CancelableProgressable;
+049import 
org.apache.hadoop.hbase.util.FSUtils;
+050import 
org.apache.hadoop.hdfs.DFSClient;
+051import 
org.apache.hadoop.hdfs.DistributedFileSystem;
+0

[51/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
Published site at .


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/713d773f
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/713d773f
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/713d773f

Branch: refs/heads/asf-site
Commit: 713d773f1e863e053f0294d4dd5429846dad3254
Parents: 51b7ea7
Author: jenkins 
Authored: Thu Nov 30 15:18:20 2017 +
Committer: jenkins 
Committed: Thu Nov 30 15:18:20 2017 +

--
 acid-semantics.html | 4 +-
 apache_hbase_reference_guide.pdf| 20704 +
 apidocs/constant-values.html|   176 +-
 apidocs/index-all.html  | 4 +
 apidocs/org/apache/hadoop/hbase/HConstants.html |   508 +-
 .../org/apache/hadoop/hbase/HConstants.html |   997 +-
 book.html   |90 +-
 bulk-loads.html | 4 +-
 checkstyle-aggregate.html   | 39980 -
 checkstyle.rss  |74 +-
 coc.html| 4 +-
 cygwin.html | 4 +-
 dependencies.html   | 4 +-
 dependency-convergence.html | 4 +-
 dependency-info.html| 4 +-
 dependency-management.html  | 4 +-
 devapidocs/allclasses-frame.html| 3 +-
 devapidocs/allclasses-noframe.html  | 3 +-
 devapidocs/constant-values.html |   208 +-
 devapidocs/index-all.html   |   180 +-
 .../org/apache/hadoop/hbase/HConstants.html |   510 +-
 .../hadoop/hbase/backup/package-tree.html   | 2 +-
 .../hadoop/hbase/class-use/ServerName.html  |97 +-
 .../ConnectionUtils.MasterlessConnection.html   | 6 +-
 ...nUtils.ShortCircuitingClusterConnection.html |16 +-
 .../hadoop/hbase/client/ConnectionUtils.html|62 +-
 .../hbase/client/class-use/RegionInfo.html  |   128 +-
 .../hadoop/hbase/client/package-tree.html   |22 +-
 .../class-use/UnexpectedStateException.html | 2 +-
 .../hadoop/hbase/filter/package-tree.html   | 8 +-
 .../hadoop/hbase/io/asyncfs/AsyncFSOutput.html  |24 +-
 .../hbase/io/asyncfs/AsyncFSOutputHelper.html   |14 +-
 ...FanOutOneBlockAsyncDFSOutput.AckHandler.html |14 +-
 .../FanOutOneBlockAsyncDFSOutput.Callback.html  |24 +-
 .../FanOutOneBlockAsyncDFSOutput.State.html | 4 +-
 .../asyncfs/FanOutOneBlockAsyncDFSOutput.html   |   285 +-
 ...OneBlockAsyncDFSOutputHelper.BlockAdder.html | 4 +-
 ...BlockAsyncDFSOutputHelper.CancelOnClose.html | 8 +-
 ...ockAsyncDFSOutputHelper.ChecksumCreater.html | 4 +-
 ...ckAsyncDFSOutputHelper.DFSClientAdaptor.html | 4 +-
 ...neBlockAsyncDFSOutputHelper.FileCreator.html | 6 +-
 ...eBlockAsyncDFSOutputHelper.LeaseManager.html | 6 +-
 ...kAsyncDFSOutputHelper.NameNodeException.html | 6 +-
 ...utOneBlockAsyncDFSOutputHelper.PBHelper.html | 6 +-
 ...DFSOutputHelper.PipelineAckStatusGetter.html | 4 +-
 ...kAsyncDFSOutputHelper.StorageTypeSetter.html | 4 +-
 .../FanOutOneBlockAsyncDFSOutputHelper.html |   106 +-
 ...utputSaslHelper.TransparentCryptoHelper.html | 4 +-
 .../hbase/io/asyncfs/SendBufSizePredictor.html  |   340 +
 .../io/asyncfs/class-use/AsyncFSOutput.html | 4 +-
 .../FanOutOneBlockAsyncDFSOutput.Callback.html  | 2 +-
 .../class-use/FanOutOneBlockAsyncDFSOutput.html | 8 +-
 .../asyncfs/class-use/SendBufSizePredictor.html |   165 +
 .../hadoop/hbase/io/asyncfs/package-frame.html  | 1 +
 .../hbase/io/asyncfs/package-summary.html   | 6 +
 .../hadoop/hbase/io/asyncfs/package-tree.html   | 1 +
 .../hadoop/hbase/io/asyncfs/package-use.html| 5 +
 .../hadoop/hbase/io/hfile/package-tree.html | 2 +-
 .../hadoop/hbase/ipc/NettyRpcDuplexHandler.html | 4 +-
 .../hadoop/hbase/ipc/NettyRpcFrameDecoder.html  |   528 +
 .../apache/hadoop/hbase/ipc/NettyRpcServer.html |45 +-
 .../ipc/NettyRpcServerPreambleHandler.html  |25 +-
 .../ipc/class-use/NettyRpcFrameDecoder.html |   125 +
 .../NettyRpcServerPreambleHandler.html  |44 +-
 .../ipc/class-use/NettyServerRpcConnection.html |21 +
 .../apache/hadoop/hbase/ipc/package-frame.html  | 1 +
 .../hadoop/hbase/ipc/package-summary.html   |62 +-
 .../apache/hadoop/hbase/ipc/package-tree.html   | 7 +-
 .../apache/hadoop/hbase/ipc/package-use.html|59 +-
 .../hadoop/hbase/mapreduce/package-tree.html| 4 +-
 .../hadoop/hbase/master/RegionState.State.html  |36 +-
 .../apache/hadoop/hbase/master/RegionState.html |   215 +-
 .../master/assignment/AssignmentManager.html|52 +-
 .../RegionStateStore

[24/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.OperationStatusCode.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.OperationStatusCode.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.OperationStatusCode.html
index 11bfb15..915e78a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.OperationStatusCode.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.OperationStatusCode.html
@@ -782,557 +782,562 @@
 774  /**
 775   * Default value of {@link 
#HBASE_CLIENT_RETRIES_NUMBER}.
 776   */
-777  public static final int 
DEFAULT_HBASE_CLIENT_RETRIES_NUMBER = 35;
+777  public static final int 
DEFAULT_HBASE_CLIENT_RETRIES_NUMBER = 10;
 778
-779  /**
-780   * Parameter name to set the default 
scanner caching for all clients.
-781   */
-782  public static final String 
HBASE_CLIENT_SCANNER_CACHING = "hbase.client.scanner.caching";
+779  public static final String 
HBASE_CLIENT_SERVERSIDE_RETRIES_MULTIPLIER =
+780  
"hbase.client.serverside.retries.multiplier";
+781
+782  public static final int 
DEFAULT_HBASE_CLIENT_SERVERSIDE_RETRIES_MULTIPLIER = 3;
 783
 784  /**
-785   * Default value for {@link 
#HBASE_CLIENT_SCANNER_CACHING}
+785   * Parameter name to set the default 
scanner caching for all clients.
 786   */
-787  public static final int 
DEFAULT_HBASE_CLIENT_SCANNER_CACHING = Integer.MAX_VALUE;
+787  public static final String 
HBASE_CLIENT_SCANNER_CACHING = "hbase.client.scanner.caching";
 788
 789  /**
-790   * Parameter name for number of rows 
that will be fetched when calling next on
-791   * a scanner if it is not served from 
memory. Higher caching values will
-792   * enable faster scanners but will eat 
up more memory and some calls of next
-793   * may take longer and longer times 
when the cache is empty.
-794   */
-795  public static final String 
HBASE_META_SCANNER_CACHING = "hbase.meta.scanner.caching";
-796
-797  /**
-798   * Default value of {@link 
#HBASE_META_SCANNER_CACHING}.
+790   * Default value for {@link 
#HBASE_CLIENT_SCANNER_CACHING}
+791   */
+792  public static final int 
DEFAULT_HBASE_CLIENT_SCANNER_CACHING = Integer.MAX_VALUE;
+793
+794  /**
+795   * Parameter name for number of rows 
that will be fetched when calling next on
+796   * a scanner if it is not served from 
memory. Higher caching values will
+797   * enable faster scanners but will eat 
up more memory and some calls of next
+798   * may take longer and longer times 
when the cache is empty.
 799   */
-800  public static final int 
DEFAULT_HBASE_META_SCANNER_CACHING = 100;
+800  public static final String 
HBASE_META_SCANNER_CACHING = "hbase.meta.scanner.caching";
 801
 802  /**
-803   * Parameter name for number of 
versions, kept by meta table.
+803   * Default value of {@link 
#HBASE_META_SCANNER_CACHING}.
 804   */
-805  public static final String 
HBASE_META_VERSIONS = "hbase.meta.versions";
+805  public static final int 
DEFAULT_HBASE_META_SCANNER_CACHING = 100;
 806
 807  /**
-808   * Default value of {@link 
#HBASE_META_VERSIONS}.
+808   * Parameter name for number of 
versions, kept by meta table.
 809   */
-810  public static final int 
DEFAULT_HBASE_META_VERSIONS = 3;
+810  public static final String 
HBASE_META_VERSIONS = "hbase.meta.versions";
 811
 812  /**
-813   * Parameter name for number of 
versions, kept by meta table.
+813   * Default value of {@link 
#HBASE_META_VERSIONS}.
 814   */
-815  public static final String 
HBASE_META_BLOCK_SIZE = "hbase.meta.blocksize";
+815  public static final int 
DEFAULT_HBASE_META_VERSIONS = 3;
 816
 817  /**
-818   * Default value of {@link 
#HBASE_META_BLOCK_SIZE}.
+818   * Parameter name for number of 
versions, kept by meta table.
 819   */
-820  public static final int 
DEFAULT_HBASE_META_BLOCK_SIZE = 8 * 1024;
+820  public static final String 
HBASE_META_BLOCK_SIZE = "hbase.meta.blocksize";
 821
 822  /**
-823   * Parameter name for unique identifier 
for this {@link org.apache.hadoop.conf.Configuration}
-824   * instance. If there are two or more 
{@link org.apache.hadoop.conf.Configuration} instances that,
-825   * for all intents and purposes, are 
the same except for their instance ids, then they will not be
-826   * able to share the same 
org.apache.hadoop.hbase.client.HConnection instance. On the other hand,
-827   * even if the instance ids are the 
same, it could result in non-shared
-828   * 
org.apache.hadoop.hbase.client.HConnection instances if some of the other 
connection parameters
-829   * differ.
-830   */
-831  public static final String 
HBASE_CLIENT_INSTANCE_ID = "hbase.client.instance.id";
-832
-833  /**
-834   * The client scanner timeout period in 
milliseconds.
+823   * Default value of {@link 
#HBASE_META_BLOCK_SIZE}.
+824   */
+825  public static final int 
DEFAULT_HBASE_META_BLOCK_SIZE = 8 * 1024;
+826
+827  /**

[15/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.html
index de6cb11..dd54dd2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.html
@@ -25,563 +25,558 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.asyncfs;
 019
-020import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
-021import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.WRITER_IDLE;
-022import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.HEART_BEAT_SEQNO;
-023import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.READ_TIMEOUT;
-024import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile;
-025import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.endFileLease;
-026import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.getStatus;
+020import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.HEART_BEAT_SEQNO;
+021import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.READ_TIMEOUT;
+022import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile;
+023import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.endFileLease;
+024import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.getStatus;
+025import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
+026import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.WRITER_IDLE;
 027import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
 028
-029
-030import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBuf;
-031import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufAllocator;
-032import 
org.apache.hadoop.hbase.shaded.io.netty.channel.Channel;
-033import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandler.Sharable;
-034import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerContext;
-035import 
org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoop;
-036import 
org.apache.hadoop.hbase.shaded.io.netty.channel.SimpleChannelInboundHandler;
-037import 
org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufDecoder;
-038import 
org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
-039import 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateEvent;
-040import 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateHandler;
-041import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Future;
-042import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Promise;
-043import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.PromiseCombiner;
-044
-045import java.io.IOException;
-046import java.nio.ByteBuffer;
-047import java.util.ArrayDeque;
-048import java.util.Collection;
-049import java.util.Collections;
-050import java.util.Deque;
-051import java.util.IdentityHashMap;
-052import java.util.List;
-053import java.util.Set;
-054import 
java.util.concurrent.CompletableFuture;
-055import java.util.concurrent.TimeUnit;
-056import java.util.function.Supplier;
-057
-058import 
org.apache.hadoop.conf.Configuration;
-059import 
org.apache.hadoop.crypto.Encryptor;
-060import org.apache.hadoop.fs.Path;
+029import java.io.IOException;
+030import java.io.InterruptedIOException;
+031import java.nio.ByteBuffer;
+032import java.util.Collection;
+033import java.util.Collections;
+034import java.util.Iterator;
+035import java.util.List;
+036import java.util.Set;
+037import 
java.util.concurrent.CompletableFuture;
+038import 
java.util.concurrent.ConcurrentHashMap;
+039import 
java.util.concurrent.ConcurrentLinkedDeque;
+040import 
java.util.concurrent.ExecutionException;
+041import java.util.concurrent.TimeUnit;
+042import java.util.function.Supplier;
+043
+044import 
org.apache.hadoop.conf.Configuration;
+045import 
org.apache.hadoop.crypto.Encryptor;
+046import org.apache.hadoop.fs.Path;
+047import 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.CancelOnClose;
+048import 
org.apache.hadoop.hbase.util.CancelableProgressable;
+049import 
org.apache.hadoop.hbase.util.FSUtils;
+050import 
org.apache.hadoop.hdfs.DFSClient;
+051import 
org.apache.hadoop.hdfs.DistributedFileSystem;
+052import 
org.apache.hadoop.hd

[48/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/apidocs/org/apache/hadoop/hbase/HConstants.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/HConstants.html 
b/apidocs/org/apache/hadoop/hbase/HConstants.html
index d511d60..30890fe 100644
--- a/apidocs/org/apache/hadoop/hbase/HConstants.html
+++ b/apidocs/org/apache/hadoop/hbase/HConstants.html
@@ -430,447 +430,451 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 
+static int
+DEFAULT_HBASE_CLIENT_SERVERSIDE_RETRIES_MULTIPLIER 
+
+
 static double
 DEFAULT_HBASE_MASTER_BALANCER_MAX_RIT_PERCENT
 Default value for the max percent of regions in 
transition
 
 
-
+
 static int
 DEFAULT_HBASE_META_BLOCK_SIZE
 Default value of HBASE_META_BLOCK_SIZE.
 
 
-
+
 static int
 DEFAULT_HBASE_META_SCANNER_CACHING
 Default value of HBASE_META_SCANNER_CACHING.
 
 
-
+
 static int
 DEFAULT_HBASE_META_VERSIONS
 Default value of HBASE_META_VERSIONS.
 
 
-
+
 static int
 DEFAULT_HBASE_RPC_SHORTOPERATION_TIMEOUT
 Default value of HBASE_RPC_SHORTOPERATION_TIMEOUT_KEY
 
 
-
+
 static int
 DEFAULT_HBASE_RPC_TIMEOUT
 Default value of HBASE_RPC_TIMEOUT_KEY
 
 
-
+
 static int
 DEFAULT_HBASE_SERVER_PAUSE
 Default value of HBASE_SERVER_PAUSE.
 
 
-
+
 static long
 DEFAULT_HBASE_SERVER_SCANNER_MAX_RESULT_SIZE
 Maximum number of bytes returned when calling a scanner's 
next method.
 
 
-
+
 static int
 DEFAULT_HEALTH_FAILURE_THRESHOLD 
 
-
+
 static long
 DEFAULT_HEALTH_SCRIPT_TIMEOUT 
 
-
+
 static float
 DEFAULT_HEAP_OCCUPANCY_HIGH_WATERMARK 
 
-
+
 static float
 DEFAULT_HEAP_OCCUPANCY_LOW_WATERMARK 
 
-
+
 static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 DEFAULT_HOST
 default host address
 
 
-
+
 static boolean
 DEFAULT_HREGION_EDITS_REPLAY_SKIP_ERRORS 
 
-
+
 static int
 DEFAULT_HREGION_MEMSTORE_BLOCK_MULTIPLIER
 Default value for 
hbase.hregion.memstore.block.multiplier
 
 
-
+
 static int
 DEFAULT_HSTORE_OPEN_AND_CLOSE_THREADS_MAX
 The default number for the max number of threads used for 
opening and
  closing stores or store files in parallel
 
 
-
+
 static int
 DEFAULT_MASTER_INFOPORT
 default port for master web api
 
 
-
+
 static int
 DEFAULT_MASTER_PORT
 default port that the master listens on
 
 
-
+
 static boolean
 DEFAULT_MASTER_TYPE_BACKUP
 by default every master is a possible primary master unless 
the conf explicitly overrides it
 
 
-
+
 static long
 DEFAULT_MAX_FILE_SIZE
 Default maximum file size
 
 
-
+
 static int
 DEFAULT_META_REPLICA_NUM 
 
-
+
 static double
 DEFAULT_REGION_SERVER_HANDLER_ABORT_ON_ERROR_PERCENT 
 
-
+
 static int
 DEFAULT_REGION_SERVER_HANDLER_COUNT 
 
-
+
 static int
 DEFAULT_REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT 
 
-
+
 static int
 DEFAULT_REGION_SERVER_REPLICATION_HANDLER_COUNT 
 
-
+
 static int
 DEFAULT_REGIONSERVER_INFOPORT
 default port for region server web api
 
 
-
+
 static long
 DEFAULT_REGIONSERVER_METRICS_PERIOD 
 
-
+
 static int
 DEFAULT_REGIONSERVER_PORT
 Default port region server listens on.
 
 
-
+
 static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 DEFAULT_SNAPSHOT_RESTORE_FAILSAFE_NAME 
 
-
+
 static boolean
 DEFAULT_SNAPSHOT_RESTORE_TAKE_FAILSAFE_SNAPSHOT 
 
-
+
 static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 DEFAULT_STATUS_MULTICAST_ADDRESS 
 
-
+
 static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 DEFAULT_STATUS_MULTICAST_BIND_ADDRESS 
 
-
+
 static int
 DEFAULT_STATUS_MULTICAST_PORT 
 
-
+
 static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 DEFAULT_TEMPORARY_HDFS_DIRECTORY 
 
-
+
 static int
 DEFAULT_THREAD_WAKE_FREQUENCY
 Default value for thread wake frequency
 
 
-
+
 static boolean
 DEFAULT_USE_META_REPLICAS 
 
-
+
 static int
 DEFAULT_VERSION_FILE_WRITE_ATTEMPTS
 Parameter name for how often we should try to write a 
version file, before failing
 
 
-
+
 static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 DEFAULT_WAL_STORAGE_POLICY 
 
-
+
 static int
 DEFAULT_ZK_SESSION_TIMEOUT
 Default value for ZooKeeper session timeout
 
 
-
+
 static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 DEFAULT_ZOOKEEPER_ZNODE_PARENT 
 
-
+
 static int
 DEFAULT_ZOOKEPER_CLIENT_PORT
 Default client port that the zookeeper listens on
 
 
-
+
 static int
 DEFAULT_ZOOKEPER_MAX_CLIENT_CNXNS
 Default limit on concurrent client-side zookeeper 
connections
 
 
-
+
 static int
 DELIMITER
 delimiter used between portions 

[47/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/apidocs/src-html/org/apache/hadoop/hbase/HConstants.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/HConstants.html 
b/apidocs/src-html/org/apache/hadoop/hbase/HConstants.html
index 11bfb15..915e78a 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/HConstants.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/HConstants.html
@@ -782,557 +782,562 @@
 774  /**
 775   * Default value of {@link 
#HBASE_CLIENT_RETRIES_NUMBER}.
 776   */
-777  public static final int 
DEFAULT_HBASE_CLIENT_RETRIES_NUMBER = 35;
+777  public static final int 
DEFAULT_HBASE_CLIENT_RETRIES_NUMBER = 10;
 778
-779  /**
-780   * Parameter name to set the default 
scanner caching for all clients.
-781   */
-782  public static final String 
HBASE_CLIENT_SCANNER_CACHING = "hbase.client.scanner.caching";
+779  public static final String 
HBASE_CLIENT_SERVERSIDE_RETRIES_MULTIPLIER =
+780  
"hbase.client.serverside.retries.multiplier";
+781
+782  public static final int 
DEFAULT_HBASE_CLIENT_SERVERSIDE_RETRIES_MULTIPLIER = 3;
 783
 784  /**
-785   * Default value for {@link 
#HBASE_CLIENT_SCANNER_CACHING}
+785   * Parameter name to set the default 
scanner caching for all clients.
 786   */
-787  public static final int 
DEFAULT_HBASE_CLIENT_SCANNER_CACHING = Integer.MAX_VALUE;
+787  public static final String 
HBASE_CLIENT_SCANNER_CACHING = "hbase.client.scanner.caching";
 788
 789  /**
-790   * Parameter name for number of rows 
that will be fetched when calling next on
-791   * a scanner if it is not served from 
memory. Higher caching values will
-792   * enable faster scanners but will eat 
up more memory and some calls of next
-793   * may take longer and longer times 
when the cache is empty.
-794   */
-795  public static final String 
HBASE_META_SCANNER_CACHING = "hbase.meta.scanner.caching";
-796
-797  /**
-798   * Default value of {@link 
#HBASE_META_SCANNER_CACHING}.
+790   * Default value for {@link 
#HBASE_CLIENT_SCANNER_CACHING}
+791   */
+792  public static final int 
DEFAULT_HBASE_CLIENT_SCANNER_CACHING = Integer.MAX_VALUE;
+793
+794  /**
+795   * Parameter name for number of rows 
that will be fetched when calling next on
+796   * a scanner if it is not served from 
memory. Higher caching values will
+797   * enable faster scanners but will eat 
up more memory and some calls of next
+798   * may take longer and longer times 
when the cache is empty.
 799   */
-800  public static final int 
DEFAULT_HBASE_META_SCANNER_CACHING = 100;
+800  public static final String 
HBASE_META_SCANNER_CACHING = "hbase.meta.scanner.caching";
 801
 802  /**
-803   * Parameter name for number of 
versions, kept by meta table.
+803   * Default value of {@link 
#HBASE_META_SCANNER_CACHING}.
 804   */
-805  public static final String 
HBASE_META_VERSIONS = "hbase.meta.versions";
+805  public static final int 
DEFAULT_HBASE_META_SCANNER_CACHING = 100;
 806
 807  /**
-808   * Default value of {@link 
#HBASE_META_VERSIONS}.
+808   * Parameter name for number of 
versions, kept by meta table.
 809   */
-810  public static final int 
DEFAULT_HBASE_META_VERSIONS = 3;
+810  public static final String 
HBASE_META_VERSIONS = "hbase.meta.versions";
 811
 812  /**
-813   * Parameter name for number of 
versions, kept by meta table.
+813   * Default value of {@link 
#HBASE_META_VERSIONS}.
 814   */
-815  public static final String 
HBASE_META_BLOCK_SIZE = "hbase.meta.blocksize";
+815  public static final int 
DEFAULT_HBASE_META_VERSIONS = 3;
 816
 817  /**
-818   * Default value of {@link 
#HBASE_META_BLOCK_SIZE}.
+818   * Parameter name for number of 
versions, kept by meta table.
 819   */
-820  public static final int 
DEFAULT_HBASE_META_BLOCK_SIZE = 8 * 1024;
+820  public static final String 
HBASE_META_BLOCK_SIZE = "hbase.meta.blocksize";
 821
 822  /**
-823   * Parameter name for unique identifier 
for this {@link org.apache.hadoop.conf.Configuration}
-824   * instance. If there are two or more 
{@link org.apache.hadoop.conf.Configuration} instances that,
-825   * for all intents and purposes, are 
the same except for their instance ids, then they will not be
-826   * able to share the same 
org.apache.hadoop.hbase.client.HConnection instance. On the other hand,
-827   * even if the instance ids are the 
same, it could result in non-shared
-828   * 
org.apache.hadoop.hbase.client.HConnection instances if some of the other 
connection parameters
-829   * differ.
-830   */
-831  public static final String 
HBASE_CLIENT_INSTANCE_ID = "hbase.client.instance.id";
-832
-833  /**
-834   * The client scanner timeout period in 
milliseconds.
+823   * Default value of {@link 
#HBASE_META_BLOCK_SIZE}.
+824   */
+825  public static final int 
DEFAULT_HBASE_META_BLOCK_SIZE = 8 * 1024;
+826
+827  /**
+828   * Parameter name for unique identifier 
for this {@link org.apache.hadoop.conf.Configuration}
+829   * instance.

[32/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.html 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.html
index e81ea36..e1c6c45 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":9,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":9,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class RegionStates
+public class RegionStates
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 RegionStates contains a set of Maps that describes the 
in-memory state of the AM, with
  the regions available in the system, the region in transition, the offline 
regions and
@@ -257,8 +257,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 RegionStates.ServerStateNode
-addRegionToServer(ServerName serverName,
- RegionStates.RegionStateNode regionNode) 
+addRegionToServer(RegionStates.RegionStateNode regionNode) 
 
 
 RegionStates.RegionFailedOpen
@@ -274,222 +273,214 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 protected RegionStates.RegionStateNode
-createRegionNode(RegionInfo regionInfo) 
+createRegionStateNode(RegionInfo regionInfo) 
 
 
-private RegionState
-createRegionState(RegionStates.RegionStateNode node) 
-
-
 void
 deleteRegion(RegionInfo regionInfo) 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
 getAssignedRegions() 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapMapList>>
 getAssignmentsByTable() 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapMapList>>
 getAssignmentsByTable(boolean forceByCluster)
 This is an EXPENSIVE clone.
 
 
-
+
 double
 getAverageLoad() 
 
-
+
 RegionStates.RegionFailedOpen
 getFailedOpen(RegionInfo regionInfo) 
 
-
+
 protected RegionStates.RegionStateNode
-getOrCreateRegionNode(RegionInfo regionInfo) 
+getOrCreateRegionStateNode(RegionInfo regionInfo) 
 
-
+
 RegionStates.ServerStateNode
 getOrCreateServer(ServerName serverName) 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">Map
 getRegionAssignments() 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapList>
 getRegionByStateOfTable(TableName tableName) 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class

[41/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html
index ae84eb3..3480c51 100644
--- a/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/backup/package-tree.html
@@ -168,9 +168,9 @@
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
 org.apache.hadoop.hbase.backup.BackupInfo.BackupPhase
+org.apache.hadoop.hbase.backup.BackupInfo.BackupState
 org.apache.hadoop.hbase.backup.BackupType
 org.apache.hadoop.hbase.backup.BackupRestoreConstants.BackupCommand
-org.apache.hadoop.hbase.backup.BackupInfo.BackupState
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html
index f6741a4..a1176d2 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html
@@ -3975,11 +3975,6 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
-RegionStates.ServerStateNode
-RegionStates.addRegionToServer(ServerName serverName,
- RegionStates.RegionStateNode regionNode) 
-
-
 protected boolean
 RegionTransitionProcedure.addToRemoteDispatcher(MasterProcedureEnv env,
  ServerName targetServer)
@@ -3989,31 +3984,31 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
  to pick it up.
 
 
-
+
 void
 AssignmentManager.assignMeta(RegionInfo metaRegionInfo,
   ServerName serverName) 
 
-
+
 AssignProcedure
 AssignmentManager.createAssignProcedure(RegionInfo regionInfo,
  ServerName targetServer) 
 
-
+
 UnassignProcedure
 AssignmentManager.createUnassignProcedure(RegionInfo regionInfo,
ServerName destinationServer,
boolean force) 
 
-
+
 private http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
 AssignmentManager.getCarryingSystemTables(ServerName serverName) 
 
-
+
 RegionStates.ServerStateNode
 RegionStates.getOrCreateServer(ServerName serverName) 
 
-
+
 (package private) static 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse
 Util.getRegionInfoResponse(MasterProcedureEnv env,
  ServerName regionLocation,
@@ -4021,50 +4016,50 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 Raw call to remote regionserver to get info on a particular 
region.
 
 
-
+
 (package private) static 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse
 Util.getRegionInfoResponse(MasterProcedureEnv env,
  ServerName regionLocation,
  RegionInfo hri,
  boolean includeBestSplitRow) 
 
-
+
 protected RegionStates.ServerStateNode
 RegionStates.getServerNode(ServerName serverName) 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
 RegionStates.getServerRegionInfoSet(ServerName serverName)
 Returns the set of regions hosted by the specified 
server
 
 
-
+
 int
 AssignmentManager.getServerVersion(ServerName serverName) 
 
-
+
 boolean
 AssignmentManager.isCarryingMeta(ServerName serverName) 
 
-
+
 private boolean
 AssignmentManager.isCarryingRegion(ServerName serverName,
 RegionInfo regionInfo) 
 
-
+
 protected boolean
 RegionTransitionProcedure.isServerOnline(MasterProcedureEnv env,
   ServerName serverName) 
 
-
+
 void
 AssignmentManager.killRegionServer(ServerName serverName) 
 
-
+
 void
 RegionStates.logSplit(ServerName serverName) 
 
-
+
 void
 AssignmentManager.markRegionAsMerged(RegionInfo child,
   ServerName serverName,
@@ -4073,62 +4068,62 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 When called here, the merge has happened.
 
 
-
+
 void
 AssignmentManager.markRegionAsSplit(RegionInfo parent,
  ServerName serverName,
  RegionInfo daughterA,

[44/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/checkstyle.rss
--
diff --git a/checkstyle.rss b/checkstyle.rss
index 3283c25..25f623a 100644
--- a/checkstyle.rss
+++ b/checkstyle.rss
@@ -25,8 +25,8 @@ under the License.
 en-us
 ©2007 - 2017 The Apache Software Foundation
 
-  File: 3437,
- Errors: 20803,
+  File: 3440,
+ Errors: 20775,
  Warnings: 0,
  Infos: 0
   
@@ -2575,7 +2575,7 @@ under the License.
   0
 
 
-  8
+  7
 
   
   
@@ -2594,6 +2594,20 @@ under the License.
   
   
 
+  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.io.asyncfs.SendBufSizePredictor.java";>org/apache/hadoop/hbase/io/asyncfs/SendBufSizePredictor.java
+
+
+  0
+
+
+  0
+
+
+  0
+
+  
+  
+
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.regionserver.compactions.StripeCompactor.java";>org/apache/hadoop/hbase/regionserver/compactions/StripeCompactor.java
 
 
@@ -5893,7 +5907,7 @@ under the License.
   0
 
 
-  2
+  0
 
   
   
@@ -6159,7 +6173,7 @@ under the License.
   0
 
 
-  1
+  0
 
   
   
@@ -12613,7 +12627,7 @@ under the License.
   0
 
 
-  4
+  0
 
   
   
@@ -13873,7 +13887,7 @@ under the License.
   0
 
 
-  1
+  0
 
   
   
@@ -16911,7 +16925,7 @@ under the License.
   0
 
 
-  31
+  30
 
   
   
@@ -21606,6 +21620,20 @@ under the License.
   
   
 
+  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.ipc.NettyRpcFrameDecoder.java";>org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.java
+
+
+  0
+
+
+  0
+
+
+  0
+
+  
+  
+
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.coprocessor.RegionObserver.java";>org/apache/hadoop/hbase/coprocessor/RegionObserver.java
 
 
@@ -24602,6 +24630,20 @@ under the License.
   
   
 
+  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.io.asyncfs.TestSendBufSizePredictor.java";>org/apache/hadoop/hbase/io/asyncfs/TestSendBufSizePredictor.java
+
+
+  0
+
+
+  0
+
+
+  1
+
+  
+  
+
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.util.ServerCommandLine.java";>org/apache/hadoop/hbase/util/ServerCommandLine.java
 
 
@@ -26781,7 +26823,7 @@ under the License.
   0
 
 
-  62
+  61
 
   
   
@@ -28461,7 +28503,7 @@ under the License.
   0
 
 
-  1
+  0
 
   
   
@@ -28671,7 +28713,7 @@ under the License.
   0
 
 
-  16
+  13
 
   
   
@@ -30449,7 +30491,7 @@ under the License.
   0
 
 
-  2
+  0
 
   
   
@@ -32885,7 +32927,7 @@ under the License.
   0
 
 
-  2
+  0
 
   

hbase-site git commit: INFRA-10751 Empty commit

2017-11-30 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 713d773f1 -> 1c7795626


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/1c779562
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/1c779562
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/1c779562

Branch: refs/heads/asf-site
Commit: 1c77956265b8351c46fe10d414d2555e0591c745
Parents: 713d773
Author: jenkins 
Authored: Thu Nov 30 15:18:45 2017 +
Committer: jenkins 
Committed: Thu Nov 30 15:18:45 2017 +

--

--




[43/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/index-all.html
--
diff --git a/devapidocs/index-all.html b/devapidocs/index-all.html
index d892158..a81108a 100644
--- a/devapidocs/index-all.html
+++ b/devapidocs/index-all.html
@@ -645,6 +645,8 @@
 
 Set ack/noAck mode.
 
+ackedBlockLength
 - Variable in class org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput
+ 
 ackedLength
 - Variable in class org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.Callback
  
 AckHandler(int)
 - Constructor for class org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.AckHandler
@@ -2155,7 +2157,7 @@
  
 addRegionToRestore(RegionInfo)
 - Method in class org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper.RestoreMetaChanges
  
-addRegionToServer(ServerName,
 RegionStates.RegionStateNode) - Method in class 
org.apache.hadoop.hbase.master.assignment.RegionStates
+addRegionToServer(RegionStates.RegionStateNode)
 - Method in class org.apache.hadoop.hbase.master.assignment.RegionStates
  
 addRegionToSnapshot(SnapshotProtos.SnapshotDescription,
 ForeignExceptionSnare) - Method in class 
org.apache.hadoop.hbase.regionserver.HRegion
 
@@ -5909,6 +5911,8 @@
 
 Calculate the expected BlobVar encoded length based on 
unencoded length.
 
+block
 - Variable in class org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput
+ 
 block(MasterServices,
 long) - Static method in class org.apache.hadoop.hbase.master.TableNamespaceManager
 
 An ugly utility to be removed when refactor 
TableNamespaceManager.
@@ -8459,7 +8463,7 @@
  
 callback
 - Variable in class org.apache.hadoop.hbase.client.RawAsyncTableImpl.CoprocessorServiceBuilderImpl
  
-Callback(Promise,
 long, Collection) - Constructor for class 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.Callback
+Callback(CompletableFuture,
 long, Collection) - Constructor for class 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.Callback
  
 callback - 
Variable in class org.apache.hadoop.hbase.ipc.Call
  
@@ -8942,7 +8946,7 @@
 
 Anticipated number of metric entries
 
-capacity
 - Variable in class org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput
+capacity
 - Variable in class org.apache.hadoop.hbase.io.asyncfs.SendBufSizePredictor
  
 capacity
 - Variable in class org.apache.hadoop.hbase.io.hfile.bucket.ByteBufferIOEngine
  
@@ -16266,6 +16270,8 @@
 
 Deprecated.
  
+connection
 - Variable in class org.apache.hadoop.hbase.ipc.NettyRpcFrameDecoder
+ 
 connection
 - Variable in class org.apache.hadoop.hbase.ipc.NettyRpcServerRequestDecoder
  
 connection
 - Variable in class org.apache.hadoop.hbase.ipc.ServerCall
@@ -16454,7 +16460,7 @@
 
 connectTO
 - Variable in class org.apache.hadoop.hbase.ipc.AbstractRpcClient
  
-connectToDataNodes(Configuration,
 DFSClient, String, LocatedBlock, long, long, BlockConstructionStage, 
DataChecksum, EventLoop, Class) - Static 
method in class org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper
+connectToDataNodes(Configuration,
 DFSClient, String, LocatedBlock, long, long, BlockConstructionStage, 
DataChecksum, EventLoopGroup, Class) - 
Static method in class org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper
  
 connectToPeers()
 - Method in class org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint
  
@@ -19069,6 +19075,8 @@
  
 createFlushWALEdit(RegionInfo,
 WALProtos.FlushDescriptor) - Static method in class 
org.apache.hadoop.hbase.wal.WALEdit
  
+createForTesting(RegionInfo,
 RegionState.State) - Static method in class 
org.apache.hadoop.hbase.master.RegionState
+ 
 createFromFileInfo(HFile.FileInfo)
 - Static method in class org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl
  
 createFromHFileLink(Configuration,
 FileSystem, Path, String) - Static method in class 
org.apache.hadoop.hbase.io.HFileLink
@@ -19410,6 +19418,10 @@
  
 createNamespaceTable(MasterServices)
 - Method in class org.apache.hadoop.hbase.master.TableNamespaceManager
  
+createNettyRpcServerPreambleHandler()
 - Method in class org.apache.hadoop.hbase.ipc.NettyRpcServer
+ 
+createNettyServerRpcConnection(Channel)
 - Method in class org.apache.hadoop.hbase.ipc.NettyRpcServerPreambleHandler
+ 
 createNewMeta(String)
 - Method in class org.apache.hadoop.hbase.util.HBaseFsck
 
 This borrows code from MasterFileSystem.bootstrap().
@@ -19467,14 +19479,14 @@
  
 createOrUpdate(NamespacesInstanceModel,
 UriInfo, Admin, boolean) - Method in class 
org.apache.hadoop.hbase.rest.NamespacesInstanceResource
  
-createOutput(FileSystem,
 Path, boolean, boolean, short, long, EventLoop, Class) - Static method in class 
org.apache.hadoop.hbase.io.asyncfs.AsyncFSOutputHelper
+createOutput(FileSystem,
 Path, boolean, boolean, short, long, EventLoopGrou

[39/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.Callback.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.Callback.html
 
b/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.Callback.html
index 21cebba..b946de1 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.Callback.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.Callback.html
@@ -107,7 +107,7 @@
 
 
 
-private static final class FanOutOneBlockAsyncDFSOutput.Callback
+private static final class FanOutOneBlockAsyncDFSOutput.Callback
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 
 
@@ -132,11 +132,11 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 ackedLength 
 
 
-private 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.PromiseVoid>
-promise 
+private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureLong>
+future 
 
 
-private http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in 
java.util">Set
+private http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in 
java.util">Set
 unfinishedReplicas 
 
 
@@ -154,7 +154,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 Constructor and Description
 
 
-Callback(org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.PromiseVoid> promise,
+Callback(http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureLong> future,
 long ackedLength,
 http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true";
 title="class or interface in 
java.util">Collection replicas) 
 
@@ -188,13 +188,13 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 Field Detail
-
+
 
 
 
 
-promise
-private 
final org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.PromiseVoid> promise
+future
+private final http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureLong> future
 
 
 
@@ -203,7 +203,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 ackedLength
-private final long ackedLength
+private final long ackedLength
 
 
 
@@ -212,7 +212,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 unfinishedReplicas
-private final http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in 
java.util">Set
 unfinishedReplicas
+private final http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in 
java.util">Set
 unfinishedReplicas
 
 
 
@@ -223,13 +223,13 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 Constructor Detail
-
+
 
 
 
 
 Callback
-public Callback(org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.PromiseVoid> promise,
+public Callback(http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureLong> futur

[42/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/org/apache/hadoop/hbase/HConstants.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/HConstants.html 
b/devapidocs/org/apache/hadoop/hbase/HConstants.html
index 3b14bfa..8bf6d22 100644
--- a/devapidocs/org/apache/hadoop/hbase/HConstants.html
+++ b/devapidocs/org/apache/hadoop/hbase/HConstants.html
@@ -451,447 +451,451 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 
+static int
+DEFAULT_HBASE_CLIENT_SERVERSIDE_RETRIES_MULTIPLIER 
+
+
 static double
 DEFAULT_HBASE_MASTER_BALANCER_MAX_RIT_PERCENT
 Default value for the max percent of regions in 
transition
 
 
-
+
 static int
 DEFAULT_HBASE_META_BLOCK_SIZE
 Default value of HBASE_META_BLOCK_SIZE.
 
 
-
+
 static int
 DEFAULT_HBASE_META_SCANNER_CACHING
 Default value of HBASE_META_SCANNER_CACHING.
 
 
-
+
 static int
 DEFAULT_HBASE_META_VERSIONS
 Default value of HBASE_META_VERSIONS.
 
 
-
+
 static int
 DEFAULT_HBASE_RPC_SHORTOPERATION_TIMEOUT
 Default value of HBASE_RPC_SHORTOPERATION_TIMEOUT_KEY
 
 
-
+
 static int
 DEFAULT_HBASE_RPC_TIMEOUT
 Default value of HBASE_RPC_TIMEOUT_KEY
 
 
-
+
 static int
 DEFAULT_HBASE_SERVER_PAUSE
 Default value of HBASE_SERVER_PAUSE.
 
 
-
+
 static long
 DEFAULT_HBASE_SERVER_SCANNER_MAX_RESULT_SIZE
 Maximum number of bytes returned when calling a scanner's 
next method.
 
 
-
+
 static int
 DEFAULT_HEALTH_FAILURE_THRESHOLD 
 
-
+
 static long
 DEFAULT_HEALTH_SCRIPT_TIMEOUT 
 
-
+
 static float
 DEFAULT_HEAP_OCCUPANCY_HIGH_WATERMARK 
 
-
+
 static float
 DEFAULT_HEAP_OCCUPANCY_LOW_WATERMARK 
 
-
+
 static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 DEFAULT_HOST
 default host address
 
 
-
+
 static boolean
 DEFAULT_HREGION_EDITS_REPLAY_SKIP_ERRORS 
 
-
+
 static int
 DEFAULT_HREGION_MEMSTORE_BLOCK_MULTIPLIER
 Default value for 
hbase.hregion.memstore.block.multiplier
 
 
-
+
 static int
 DEFAULT_HSTORE_OPEN_AND_CLOSE_THREADS_MAX
 The default number for the max number of threads used for 
opening and
  closing stores or store files in parallel
 
 
-
+
 static int
 DEFAULT_MASTER_INFOPORT
 default port for master web api
 
 
-
+
 static int
 DEFAULT_MASTER_PORT
 default port that the master listens on
 
 
-
+
 static boolean
 DEFAULT_MASTER_TYPE_BACKUP
 by default every master is a possible primary master unless 
the conf explicitly overrides it
 
 
-
+
 static long
 DEFAULT_MAX_FILE_SIZE
 Default maximum file size
 
 
-
+
 static int
 DEFAULT_META_REPLICA_NUM 
 
-
+
 static double
 DEFAULT_REGION_SERVER_HANDLER_ABORT_ON_ERROR_PERCENT 
 
-
+
 static int
 DEFAULT_REGION_SERVER_HANDLER_COUNT 
 
-
+
 static int
 DEFAULT_REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT 
 
-
+
 static int
 DEFAULT_REGION_SERVER_REPLICATION_HANDLER_COUNT 
 
-
+
 static int
 DEFAULT_REGIONSERVER_INFOPORT
 default port for region server web api
 
 
-
+
 static long
 DEFAULT_REGIONSERVER_METRICS_PERIOD 
 
-
+
 static int
 DEFAULT_REGIONSERVER_PORT
 Default port region server listens on.
 
 
-
+
 static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 DEFAULT_SNAPSHOT_RESTORE_FAILSAFE_NAME 
 
-
+
 static boolean
 DEFAULT_SNAPSHOT_RESTORE_TAKE_FAILSAFE_SNAPSHOT 
 
-
+
 static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 DEFAULT_STATUS_MULTICAST_ADDRESS 
 
-
+
 static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 DEFAULT_STATUS_MULTICAST_BIND_ADDRESS 
 
-
+
 static int
 DEFAULT_STATUS_MULTICAST_PORT 
 
-
+
 static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 DEFAULT_TEMPORARY_HDFS_DIRECTORY 
 
-
+
 static int
 DEFAULT_THREAD_WAKE_FREQUENCY
 Default value for thread wake frequency
 
 
-
+
 static boolean
 DEFAULT_USE_META_REPLICAS 
 
-
+
 static int
 DEFAULT_VERSION_FILE_WRITE_ATTEMPTS
 Parameter name for how often we should try to write a 
version file, before failing
 
 
-
+
 static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 DEFAULT_WAL_STORAGE_POLICY 
 
-
+
 static int
 DEFAULT_ZK_SESSION_TIMEOUT
 Default value for ZooKeeper session timeout
 
 
-
+
 static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 DEFAULT_ZOOKEEPER_ZNODE_PARENT 
 
-
+
 static int
 DEFAULT_ZOOKEPER_CLIENT_PORT
 Default client port that the zookeeper listens on
 
 
-
+
 static int
 DEFAULT_ZOOKEPER_MAX_CLIENT_CNXNS
 Default limit on concurrent client-side zookeeper 
connections
 
 
-
+
 static int
 DELIMITER
 delimiter used be

[31/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.html
index bd4be7d..3cbdb37 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.html
@@ -123,23 +123,25 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public abstract class RegionTransitionProcedure
+public abstract class RegionTransitionProcedure
 extends Procedure
 implements TableProcedureInterface, RemoteProcedureDispatcher.RemoteProcedure
 Base class for the Assign and Unassign Procedure.
- There can only be one RegionTransitionProcedure per region running at a time
- since each procedure takes a lock on the region (see 
MasterProcedureScheduler).
+
+ Locking:
+ Takes exclusive lock on the region being assigned/unassigned. Thus, there can 
only be one
+ RegionTransitionProcedure per region running at a time (see 
MasterProcedureScheduler).
 
  This procedure is asynchronous and responds to external events.
  The AssignmentManager will notify this procedure when the RS completes
  the operation and reports the transitioned state
- (see the Assign and Unassign class for more detail).
+ (see the Assign and Unassign class for more detail).
 
  Procedures move from the REGION_TRANSITION_QUEUE state when they are
  first submitted, to the REGION_TRANSITION_DISPATCH state when the request
  to remote server is sent and the Procedure is suspended waiting on external
  event to be woken again. Once the external event is triggered, Procedure
- moves to the REGION_TRANSITION_FINISH state.
+ moves to the REGION_TRANSITION_FINISH state.
 
  NOTE: AssignProcedure and 
UnassignProcedure 
should not be thought of
  as being asymmetric, at least currently.
@@ -152,12 +154,13 @@ implements AssignProcedure.forceNewPlan.
 When
- the number of attempts reach hreshold configuration 
'hbase.assignment.maximum.attempts',
+ the number of attempts reaches threshold configuration 
'hbase.assignment.maximum.attempts',
  the procedure is aborted. For UnassignProcedure, 
similar re-attempts are
  intentionally not implemented. It is a 'one shot' procedure. See its class 
doc for how it
  handles failure.
  
  
+ 
 
  TODO: Considering it is a priority doing all we can to get make a region 
available as soon as possible,
  re-attempting with any target makes sense if specified target fails in case of
@@ -300,7 +303,7 @@ implements RegionStates.RegionStateNode regionNode) 
 
 
-RegionInfo
+protected RegionInfo
 getRegionInfo() 
 
 
@@ -318,7 +321,7 @@ implements getTableName() 
 
 
-protected 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState
+(package private) 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState
 getTransitionState() 
 
 
@@ -405,7 +408,7 @@ implements setRegionInfo(RegionInfo regionInfo) 
 
 
-protected void
+(package private) void
 setTransitionState(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState state) 
 
 
@@ -482,7 +485,7 @@ implements 
 
 LOG
-private static final org.apache.commons.logging.Log LOG
+private static final org.apache.commons.logging.Log LOG
 
 
 
@@ -491,7 +494,7 @@ implements 
 
 aborted
-protected final http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicBoolean.html?is-external=true";
 title="class or interface in java.util.concurrent.atomic">AtomicBoolean aborted
+protected final http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicBoolean.html?is-external=true";
 title="class or interface in java.util.concurrent.atomic">AtomicBoolean aborted
 
 
 
@@ -500,7 +503,7 @@ implements 
 
 transitionState
-private org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState
 transitionState
+private org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState
 transitionState
 
 
 
@@ -509,7 +512,7 @@ implements 
 
 regionInfo
-private RegionInfo regionInfo
+private RegionInfo regionInfo
 
 
 
@@ -518,7 +521,7 @@ implements 
 
 lock
-private volatile boolean lock
+private volatile boolean lock
 
 
 
@@ -535,7 +538,7 @@ implements 
 
 RegionTransitionProcedure
-public RegionTransitionProcedure()
+public RegionTransitionProcedure()
 
 
 
@@ -561,7 +564,7 @@ implements 
 
 getRegionInfo
-public RegionInfo getRegionInfo()
+protected RegionInfo getRegionInfo()
 
 
 
@@ -630,7 +633,7 @@ implements 
 
 setTransitionState
-protected void setTransitionState(org.apache.

[28/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.html
index cc13777..a2313bc 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.html
@@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class RSProcedureDispatcher
+public class RSProcedureDispatcher
 extends RemoteProcedureDispatcher
 implements ServerListener
 A remote procecdure dispatcher for regionservers.
@@ -148,11 +148,17 @@ implements 
 private class 
-RSProcedureDispatcher.CloseRegionRemoteCall 
+RSProcedureDispatcher.CloseRegionRemoteCall
+Compatibility class used by RSProcedureDispatcher.CompatRemoteProcedureResolver
 to close regions using old
+ AdminService#closeRegion(RpcController, CloseRegionRequest, 
RpcCallback) rpc.
+
 
 
 protected class 
-RSProcedureDispatcher.CompatRemoteProcedureResolver 
+RSProcedureDispatcher.CompatRemoteProcedureResolver
+Compatibility class to open and close regions using old 
endpoints (openRegion/closeRegion) in
+ AdminProtos.AdminService.
+
 
 
 protected class 
@@ -160,7 +166,10 @@ implements 
 private class 
-RSProcedureDispatcher.OpenRegionRemoteCall 
+RSProcedureDispatcher.OpenRegionRemoteCall
+Compatibility class used by RSProcedureDispatcher.CompatRemoteProcedureResolver
 to open regions using old
+ AdminService#openRegion(RpcController, OpenRegionRequest, 
RpcCallback) rpc.
+
 
 
 static class 
@@ -178,10 +187,6 @@ implements private static interface 
 RSProcedureDispatcher.RemoteProcedureResolver 
 
-
-static class 
-RSProcedureDispatcher.ServerOperation 
-
 
 
 
@@ -217,15 +222,19 @@ implements master 
 
 
+private MasterProcedureEnv
+procedureEnv 
+
+
 static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 RS_RPC_STARTUP_WAIT_TIME_CONF_KEY 
 
-
+
 private static int
 RS_VERSION_WITH_EXEC_PROCS 
 
-
-protected long
+
+private long
 rsStartupWaitTime 
 
 
@@ -281,7 +290,7 @@ implements 
 protected void
 remoteDispatch(ServerName serverName,
-  http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in java.util">Set operations) 
+  http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in java.util">Set remoteProcedures) 
 
 
 void
@@ -298,8 +307,11 @@ implements 
 void
 splitAndResolveOperation(ServerName serverName,
-http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in java.util">Set operations,
-RSProcedureDispatcher.RemoteProcedureResolver resolver) 
+http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in java.util">Set remoteProcedures,
+RSProcedureDispatcher.RemoteProcedureResolver resolver)
+Fetches RemoteProcedureDispatcher.RemoteOperations
+ from the given remoteProcedures and groups them by class of the 
returned operation.
+
 
 
 boolean
@@ -351,7 +363,7 @@ implements 
 
 LOG
-private static final org.apache.commons.logging.Log LOG
+private static final org.apache.commons.logging.Log LOG
 
 
 
@@ -360,7 +372,7 @@ implements 
 
 RS_RPC_STARTUP_WAIT_TIME_CONF_KEY
-public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String RS_RPC_STARTUP_WAIT_TIME_CONF_KEY
+public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String RS_RPC_STARTUP_WAIT_TIME_CONF_KEY
 
 See Also:
 Constant
 Field Values
@@ -373,7 +385,7 @@ implements 
 
 DEFAULT_RS_RPC_STARTUP_WAIT_TIME
-private static final int DEFAULT_RS_RPC_STARTUP_WAIT_TIME
+private static final int DEFAULT_RS_RPC_STARTUP_WAIT_TIME
 
 See Also:
 Constant
 Field Values
@@ -386,7 +398,7 @@ implements 
 
 RS_VERSION_WITH_EXEC_PROCS
-private static final int RS_VERSION_WITH_EXEC_PROCS
+private static final int RS_VERSION_WITH_EXEC_PROCS
 
 See Also:
 Constant
 Field Values
@@ -399,16 +411,25 @@ implements 
 
 master
-protected final MasterServices master
+protected final MasterServices master
 
 
 
 
 
-
+
 
 rsStartupWaitTime
-protected final long rsStartupWaitTime
+private final long rsStartupWaitTime
+
+
+

[26/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/org/apache/hadoop/hbase/regionserver/wal/FSHLog.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/wal/FSHLog.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/wal/FSHLog.html
index 560e87a..f61a992 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/wal/FSHLog.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/wal/FSHLog.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":9,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":9,"i22":10,"i23":10,"i24":9};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":9,"i16":10,"i17":10,"i18":10,"i19":10,"i20":9,"i21":10,"i22":10,"i23":9};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -400,39 +400,34 @@ extends 
 private SyncFuture
-publishSyncOnRingBuffer(long sequence) 
+publishSyncOnRingBuffer() 
 
 
 private SyncFuture
-publishSyncOnRingBuffer(long sequence,
-   org.apache.htrace.core.Span span) 
+publishSyncOnRingBuffer(long sequence) 
 
 
-private SyncFuture
-publishSyncOnRingBuffer(org.apache.htrace.core.Span span) 
-
-
 private void
 publishSyncThenBlockOnCompletion(org.apache.htrace.core.TraceScope scope) 
 
-
+
 private static void
 split(org.apache.hadoop.conf.Configuration conf,
  org.apache.hadoop.fs.Path p) 
 
-
+
 void
 sync()
 Sync what we have in the WAL.
 
 
-
+
 void
 sync(long txid)
 Sync the WAL if the txId was not already sync'd.
 
 
-
+
 private static void
 usage() 
 
@@ -442,7 +437,7 @@ extends AbstractFSWAL
-abortCacheFlush,
 append,
 atHeadOfRingBufferEventHandlerAppend,
 blockOnSync,
 close,
 completeCacheFlush,
 c
 omputeFilename, findRegionsToForceFlush,
 getCoprocessorHost,
 getCurrentFileName,
 getEarliestMemStoreSeqNum,
 getEarliestMemStoreSeqNum,
 getFilenum,
 
 getFileNumFromFileName, getFiles,
 getLogFileSize,
 getLogFileSizeIfBeingWritten,
 getNumLogFiles,
 getNumRolledLogFiles,
 getOldPath,
 getPreallocatedEventCount,
 getSyncFuture,
 getUnflushedEntriesCount,
 getWALArchivePath,
 isUnflushedEntries,
 postSync,
 registerWALActionsListener,
 replaceWriter,
 requestLogRoll,
 requestLogRoll,
 rollWriter,
 rollWriter,
 shutdown,
 stampSequenc
 eIdAndPublishToRingBuffer, startCacheFlush,
 startCacheFlush,
 toString,
 unregisterWALActionsListener,
 updateStore
+abortCacheFlush,
 append,
 atHeadOfRingBufferEventHandlerAppend,
 blockOnSync,
 close,
 completeCacheFlush,
 c
 omputeFilename, findRegionsToForceFlush,
 getCoprocessorHost,
 getCurrentFileName,
 getEarliestMemStoreSeqNum,
 getEarliestMemStoreSeqNum,
 getFilenum,
 
 getFileNumFromFileName, getFiles,
 getLogFileSize,
 getLogFileSizeIfBeingWritten,
 getNumLogFiles,
 getNumRolledLogFiles,
 getOldPath,
 getPreallocatedEventCount,
 getSyncFuture,
 getUnflushedEntriesCount,
 getWALArchivePath,
 isUnflushedEntries,
 postSync,
 registerWALActionsListener,
 replaceWriter, requestLogRoll,
 requestLogRoll,
 rollWriter,
 rollWriter,
 shutdown,
 stampSequenceIdAndPublishToRingBuffer, href="../../../../../../org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.html#startCacheFlush-byte:A-java.util.Map-">startCacheFlush,
 > href="../../../../../../org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.html#startCacheFlush-byte:A-java.util.Set-">startCacheFlush,
 > href="../../../../../../org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.html#toString--">toString,
 > href="../../../../../../org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.html#unregisterWALActionsListener-org.apache.hadoop.hbase.regionserver.wal.WALActionsListener-">unregisterWALActionsListener,
 > href="../../../../../../org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.html#updateStore-byte:A-byte:A-java.lang.Long-boolean-">updateStore
 
 
 
@@ -581,7 +576,7 @@ extends 
 
 FIXED_OVERHEAD
-public static final long FIXED_OVERHEAD
+public static final long FIXED_OVERHEAD
 
 
 
@@ -856,41 +851,31 @@ extends 
-
-
-
-
-publishSyncOnRingBuffer
-private SyncFuture publishSyncOnRingBuffer(long sequence)
-
-
 
 
 
 
 
 getSequenceOnRingBuffer
-private long getSequenceOnRingBuffer()
+private long getSequenceOnRingBuffer()
 
 
-
+
 
 
 
 
 publishSyncOnRingBuffer
-private SyncFuture publishSyncOnRingBuffer(org.apache.htrace.core.Span span)
+private SyncFuture publishSyncOnRingBuffer()
 
 
-
+
 
 
 
 
 publishSyncOnRingBuffer
-private SyncFuture publishSyncOnRingBuffer(long sequence,
-   
org.apache.htrace.core.Span span)
+private SyncFuture publishSyncOnRingBuffer(long sequence)
 
 
 
@@ -899,7 +884,

[36/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/org/apache/hadoop/hbase/ipc/package-summary.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/ipc/package-summary.html 
b/devapidocs/org/apache/hadoop/hbase/ipc/package-summary.html
index 7aad337..81d9def 100644
--- a/devapidocs/org/apache/hadoop/hbase/ipc/package-summary.html
+++ b/devapidocs/org/apache/hadoop/hbase/ipc/package-summary.html
@@ -397,171 +397,177 @@
 
 
 
+NettyRpcFrameDecoder
+
+Decoder for extracting frame
+
+
+
 NettyRpcServer
 
 An RPC server with Netty4 implementation.
 
 
-
+
 NettyRpcServerPreambleHandler
 
 Handle connection preamble.
 
 
-
+
 NettyRpcServerRequestDecoder
 
 Decoder for rpc request.
 
 
-
+
 NettyRpcServerResponseEncoder
 
 Encoder for RpcResponse.
 
 
-
+
 NettyServerCall
 
 Datastructure that holds all necessary to a method 
invocation and then afterward, carries the
  result.
 
 
-
+
 NettyServerRpcConnection
 
 RpcConnection implementation for netty rpc server.
 
 
-
+
 RpcClientFactory
 
 Factory to create a RpcClient
 
 
-
+
 RpcConnection
 
 Base class for ipc connection.
 
 
-
+
 RpcControllerFactory
 
 Factory to create a HBaseRpcController
 
 
-
+
 RpcExecutor
 
 Runs the CallRunners passed here via RpcExecutor.dispatch(CallRunner).
 
 
-
+
 RpcExecutor.CallPriorityComparator
 
 Comparator used by the "normal callQueue" if 
DEADLINE_CALL_QUEUE_CONF_KEY is set to true.
 
 
-
+
 RpcExecutor.QueueBalancer
  
 
-
+
 RpcExecutor.RandomQueueBalancer
 
 Queue balancer that just randomly selects a queue in the 
range [0, num queues).
 
 
-
+
 RpcScheduler
 
 An interface for RPC request scheduling algorithm.
 
 
-
+
 RpcScheduler.Context
 
 Exposes runtime information of a RpcServer 
that a RpcScheduler may need.
 
 
-
+
 RpcSchedulerContext
  
 
-
+
 RpcServer
 
 An RPC server that hosts protobuf described Services.
 
 
-
+
 RpcServer.BlockingServiceAndInterface
 
 Datastructure for passing a BlockingService 
and its associated class of
  protobuf service interface.
 
 
-
+
 RpcServerFactory
  
 
-
+
 RWQueueRpcExecutor
 
 RPC Executor that uses different queues for reads and 
writes.
 
 
-
+
 ServerCall
 
 Datastructure that holds all necessary to a method 
invocation and then afterward, carries
  the result.
 
 
-
+
 ServerRpcConnection
 
 Reads calls from a connection and queues them for 
handling.
 
 
-
+
 ServerRpcConnection.ByteBuffByteInput
  
 
-
+
 ServerRpcController
 
 Used for server-side protobuf RPC service invocations.
 
 
-
+
 SimpleRpcScheduler
 
 The default scheduler.
 
 
-
+
 SimpleRpcServer
 
 The RPC server with native java NIO implementation deriving 
from Hadoop to
  host protobuf described Services.
 
 
-
+
 SimpleRpcServerResponder
 
 Sends responses of RPC back to clients.
 
 
-
+
 SimpleServerCall
 
 Datastructure that holds all necessary to a method 
invocation and then afterward, carries the
  result.
 
 
-
+
 SimpleServerRpcConnection
 
 Reads calls from a connection and queues them for 
handling.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/org/apache/hadoop/hbase/ipc/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/ipc/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/ipc/package-tree.html
index 9f3fe52..8aaf9f2 100644
--- a/devapidocs/org/apache/hadoop/hbase/ipc/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/ipc/package-tree.html
@@ -122,6 +122,11 @@
 
 org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelInboundHandlerAdapter
 (implements 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelInboundHandler)
 
+org.apache.hadoop.hbase.shaded.io.netty.handler.codec.ByteToMessageDecoder
+
+org.apache.hadoop.hbase.ipc.NettyRpcFrameDecoder
+
+
 org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelDuplexHandler
 (implements 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelOutboundHandler)
 
 org.apache.hadoop.hbase.ipc.BufferCallBeforeInitHandler
@@ -343,8 +348,8 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.ipc.MetricsHBaseServerSourceFactoryImpl.SourceStorage
 org.apache.hadoop.hbase.ipc.CallEvent.Type
+org.apache.hadoop.hbase.ipc.MetricsHBaseServerSourceFactoryImpl.SourceStorage
 org.apache.hadoop.hbase.ipc.BufferCallBeforeInitHandler.BufferCallAction
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/org/apache/hadoop/hbase/ipc/package-use.html
-

[27/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
index e571d5c..3803d9a 100644
--- a/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
@@ -206,12 +206,12 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.quotas.ThrottlingException.Type
 org.apache.hadoop.hbase.quotas.OperationQuota.OperationType
-org.apache.hadoop.hbase.quotas.SpaceViolationPolicy
-org.apache.hadoop.hbase.quotas.QuotaType
 org.apache.hadoop.hbase.quotas.QuotaScope
 org.apache.hadoop.hbase.quotas.ThrottleType
+org.apache.hadoop.hbase.quotas.QuotaType
+org.apache.hadoop.hbase.quotas.SpaceViolationPolicy
+org.apache.hadoop.hbase.quotas.ThrottlingException.Type
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.html
index b091edd..ca8621b 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.html
@@ -400,7 +400,7 @@ extends 
 
 triggerFlushInPrimaryRegion
-void triggerFlushInPrimaryRegion(HRegion region)
+void triggerFlushInPrimaryRegion(HRegion region)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException,
  http://docs.oracle.com/javase/8/docs/api/java/lang/RuntimeException.html?is-external=true";
 title="class or interface in java.lang">RuntimeException
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
index c99a865..85b1f93 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
@@ -699,19 +699,19 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl.FactoryStorage
-org.apache.hadoop.hbase.regionserver.BloomType
-org.apache.hadoop.hbase.regionserver.Region.Operation
 org.apache.hadoop.hbase.regionserver.HRegion.FlushResult.Result
-org.apache.hadoop.hbase.regionserver.ScannerContext.NextState
-org.apache.hadoop.hbase.regionserver.DefaultHeapMemoryTuner.StepDirection
-org.apache.hadoop.hbase.regionserver.FlushType
-org.apache.hadoop.hbase.regionserver.CompactingMemStore.IndexType
 org.apache.hadoop.hbase.regionserver.ScanType
+org.apache.hadoop.hbase.regionserver.DefaultHeapMemoryTuner.StepDirection
 org.apache.hadoop.hbase.regionserver.TimeRangeTracker.Type
+org.apache.hadoop.hbase.regionserver.ScannerContext.NextState
+org.apache.hadoop.hbase.regionserver.BloomType
+org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl.FactoryStorage
+org.apache.hadoop.hbase.regionserver.Region.Operation
+org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope
+org.apache.hadoop.hbase.regionserver.FlushType
 org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor.Status
 org.apache.hadoop.hbase.regionserver.MemStoreCompactionStrategy.Action
-org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope
+org.apache.hadoop.hbase.regionserver.CompactingMemStore.IndexType
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/

[13/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.CancelOnClose.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.CancelOnClose.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.CancelOnClose.html
index 25e368d..d0f781f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.CancelOnClose.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.CancelOnClose.html
@@ -25,798 +25,798 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.asyncfs;
 019
-020import static 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelOption.CONNECT_TIMEOUT_MILLIS;
-021import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
-022import static 
org.apache.hadoop.fs.CreateFlag.CREATE;
-023import static 
org.apache.hadoop.fs.CreateFlag.OVERWRITE;
-024import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor;
-025import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.trySaslNegotiate;
+020import static 
org.apache.hadoop.fs.CreateFlag.CREATE;
+021import static 
org.apache.hadoop.fs.CreateFlag.OVERWRITE;
+022import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor;
+023import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.trySaslNegotiate;
+024import static 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelOption.CONNECT_TIMEOUT_MILLIS;
+025import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
 026import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
 027import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME;
 028import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT;
 029import static 
org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage.PIPELINE_SETUP_CREATE;
 030
-031import 
org.apache.hadoop.hbase.shaded.com.google.common.base.Throwables;
-032import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableMap;
-033import 
com.google.protobuf.CodedOutputStream;
-034
-035import 
org.apache.hadoop.hbase.shaded.io.netty.bootstrap.Bootstrap;
-036import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBuf;
-037import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufAllocator;
-038import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufOutputStream;
-039import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.PooledByteBufAllocator;
-040import 
org.apache.hadoop.hbase.shaded.io.netty.channel.Channel;
-041import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelFuture;
-042import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelFutureListener;
-043import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandler;
-044import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerContext;
-045import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelInitializer;
-046import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelPipeline;
-047import 
org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoop;
-048import 
org.apache.hadoop.hbase.shaded.io.netty.channel.SimpleChannelInboundHandler;
-049import 
org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufDecoder;
-050import 
org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
-051import 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateEvent;
-052import 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateHandler;
-053import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Future;
-054import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.FutureListener;
-055import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Promise;
-056
-057import java.io.IOException;
-058import 
java.lang.reflect.InvocationTargetException;
-059import java.lang.reflect.Method;
-060import java.util.ArrayList;
-061import java.util.EnumSet;
-062import java.util.List;
-063import java.util.concurrent.TimeUnit;
-064
-065import org.apache.commons.logging.Log;
-066import 
org.apache.commons.logging.LogFactory;
-067import 
org.apache.hadoop.conf.Configuration;
-068import 
org.apache.hadoop.crypto.CryptoProtocolVersion;
-069import 
org.apache.hadoop.crypto.Encryptor;
-070import org.apache.hadoop.fs.CreateFlag;
-071import org.apache.hadoop.fs.FileSystem;
-072import 
org.apache.hadoop.fs.FileSystemLinkResolver;
-073import org.apache.hadoop.fs.Path;
-074import 
org.apache.hadoop.fs.UnresolvedLinkException;
-075import 
org.apache.hadoop.fs.permiss

[11/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.DFSClientAdaptor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.DFSClientAdaptor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.DFSClientAdaptor.html
index 25e368d..d0f781f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.DFSClientAdaptor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.DFSClientAdaptor.html
@@ -25,798 +25,798 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.asyncfs;
 019
-020import static 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelOption.CONNECT_TIMEOUT_MILLIS;
-021import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
-022import static 
org.apache.hadoop.fs.CreateFlag.CREATE;
-023import static 
org.apache.hadoop.fs.CreateFlag.OVERWRITE;
-024import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor;
-025import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.trySaslNegotiate;
+020import static 
org.apache.hadoop.fs.CreateFlag.CREATE;
+021import static 
org.apache.hadoop.fs.CreateFlag.OVERWRITE;
+022import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor;
+023import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.trySaslNegotiate;
+024import static 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelOption.CONNECT_TIMEOUT_MILLIS;
+025import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
 026import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
 027import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME;
 028import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT;
 029import static 
org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage.PIPELINE_SETUP_CREATE;
 030
-031import 
org.apache.hadoop.hbase.shaded.com.google.common.base.Throwables;
-032import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableMap;
-033import 
com.google.protobuf.CodedOutputStream;
-034
-035import 
org.apache.hadoop.hbase.shaded.io.netty.bootstrap.Bootstrap;
-036import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBuf;
-037import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufAllocator;
-038import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufOutputStream;
-039import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.PooledByteBufAllocator;
-040import 
org.apache.hadoop.hbase.shaded.io.netty.channel.Channel;
-041import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelFuture;
-042import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelFutureListener;
-043import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandler;
-044import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerContext;
-045import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelInitializer;
-046import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelPipeline;
-047import 
org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoop;
-048import 
org.apache.hadoop.hbase.shaded.io.netty.channel.SimpleChannelInboundHandler;
-049import 
org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufDecoder;
-050import 
org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
-051import 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateEvent;
-052import 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateHandler;
-053import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Future;
-054import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.FutureListener;
-055import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Promise;
-056
-057import java.io.IOException;
-058import 
java.lang.reflect.InvocationTargetException;
-059import java.lang.reflect.Method;
-060import java.util.ArrayList;
-061import java.util.EnumSet;
-062import java.util.List;
-063import java.util.concurrent.TimeUnit;
-064
-065import org.apache.commons.logging.Log;
-066import 
org.apache.commons.logging.LogFactory;
-067import 
org.apache.hadoop.conf.Configuration;
-068import 
org.apache.hadoop.crypto.CryptoProtocolVersion;
-069import 
org.apache.hadoop.crypto.Encryptor;
-070import org.apache.hadoop.fs.CreateFlag;
-071import org.apache.hadoop.fs.FileSystem;
-072import 
org.apache.hadoop.fs.FileSystemLinkResolver;
-073import org.apache.hadoop.fs.Path;
-074import 
org.apache.hadoop.fs.UnresolvedLinkException;
-075import 
org.apache.ha

[17/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.Callback.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.Callback.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.Callback.html
index de6cb11..dd54dd2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.Callback.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.Callback.html
@@ -25,563 +25,558 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.asyncfs;
 019
-020import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
-021import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.WRITER_IDLE;
-022import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.HEART_BEAT_SEQNO;
-023import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.READ_TIMEOUT;
-024import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile;
-025import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.endFileLease;
-026import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.getStatus;
+020import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.HEART_BEAT_SEQNO;
+021import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.READ_TIMEOUT;
+022import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile;
+023import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.endFileLease;
+024import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.getStatus;
+025import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
+026import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.WRITER_IDLE;
 027import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
 028
-029
-030import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBuf;
-031import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufAllocator;
-032import 
org.apache.hadoop.hbase.shaded.io.netty.channel.Channel;
-033import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandler.Sharable;
-034import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerContext;
-035import 
org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoop;
-036import 
org.apache.hadoop.hbase.shaded.io.netty.channel.SimpleChannelInboundHandler;
-037import 
org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufDecoder;
-038import 
org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
-039import 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateEvent;
-040import 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateHandler;
-041import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Future;
-042import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Promise;
-043import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.PromiseCombiner;
-044
-045import java.io.IOException;
-046import java.nio.ByteBuffer;
-047import java.util.ArrayDeque;
-048import java.util.Collection;
-049import java.util.Collections;
-050import java.util.Deque;
-051import java.util.IdentityHashMap;
-052import java.util.List;
-053import java.util.Set;
-054import 
java.util.concurrent.CompletableFuture;
-055import java.util.concurrent.TimeUnit;
-056import java.util.function.Supplier;
-057
-058import 
org.apache.hadoop.conf.Configuration;
-059import 
org.apache.hadoop.crypto.Encryptor;
-060import org.apache.hadoop.fs.Path;
+029import java.io.IOException;
+030import java.io.InterruptedIOException;
+031import java.nio.ByteBuffer;
+032import java.util.Collection;
+033import java.util.Collections;
+034import java.util.Iterator;
+035import java.util.List;
+036import java.util.Set;
+037import 
java.util.concurrent.CompletableFuture;
+038import 
java.util.concurrent.ConcurrentHashMap;
+039import 
java.util.concurrent.ConcurrentLinkedDeque;
+040import 
java.util.concurrent.ExecutionException;
+041import java.util.concurrent.TimeUnit;
+042import java.util.function.Supplier;
+043
+044import 
org.apache.hadoop.conf.Configuration;
+045import 
org.apache.hadoop.crypto.Encryptor;
+046import org.apache.hadoop.fs.Path;
+047import 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.CancelOnClose;
+048import 
org.apache.hadoop.hbase.util.CancelableProgressable;
+049import 
org.apache.hadoop.hbase.util.FSUtils;
+050import 
org.apache.hadoop.hdfs.DFSClient;
+051import 
org.apache.hadoop.hdfs.Distribute

[23/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.html
index 11bfb15..915e78a 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.html
@@ -782,557 +782,562 @@
 774  /**
 775   * Default value of {@link 
#HBASE_CLIENT_RETRIES_NUMBER}.
 776   */
-777  public static final int 
DEFAULT_HBASE_CLIENT_RETRIES_NUMBER = 35;
+777  public static final int 
DEFAULT_HBASE_CLIENT_RETRIES_NUMBER = 10;
 778
-779  /**
-780   * Parameter name to set the default 
scanner caching for all clients.
-781   */
-782  public static final String 
HBASE_CLIENT_SCANNER_CACHING = "hbase.client.scanner.caching";
+779  public static final String 
HBASE_CLIENT_SERVERSIDE_RETRIES_MULTIPLIER =
+780  
"hbase.client.serverside.retries.multiplier";
+781
+782  public static final int 
DEFAULT_HBASE_CLIENT_SERVERSIDE_RETRIES_MULTIPLIER = 3;
 783
 784  /**
-785   * Default value for {@link 
#HBASE_CLIENT_SCANNER_CACHING}
+785   * Parameter name to set the default 
scanner caching for all clients.
 786   */
-787  public static final int 
DEFAULT_HBASE_CLIENT_SCANNER_CACHING = Integer.MAX_VALUE;
+787  public static final String 
HBASE_CLIENT_SCANNER_CACHING = "hbase.client.scanner.caching";
 788
 789  /**
-790   * Parameter name for number of rows 
that will be fetched when calling next on
-791   * a scanner if it is not served from 
memory. Higher caching values will
-792   * enable faster scanners but will eat 
up more memory and some calls of next
-793   * may take longer and longer times 
when the cache is empty.
-794   */
-795  public static final String 
HBASE_META_SCANNER_CACHING = "hbase.meta.scanner.caching";
-796
-797  /**
-798   * Default value of {@link 
#HBASE_META_SCANNER_CACHING}.
+790   * Default value for {@link 
#HBASE_CLIENT_SCANNER_CACHING}
+791   */
+792  public static final int 
DEFAULT_HBASE_CLIENT_SCANNER_CACHING = Integer.MAX_VALUE;
+793
+794  /**
+795   * Parameter name for number of rows 
that will be fetched when calling next on
+796   * a scanner if it is not served from 
memory. Higher caching values will
+797   * enable faster scanners but will eat 
up more memory and some calls of next
+798   * may take longer and longer times 
when the cache is empty.
 799   */
-800  public static final int 
DEFAULT_HBASE_META_SCANNER_CACHING = 100;
+800  public static final String 
HBASE_META_SCANNER_CACHING = "hbase.meta.scanner.caching";
 801
 802  /**
-803   * Parameter name for number of 
versions, kept by meta table.
+803   * Default value of {@link 
#HBASE_META_SCANNER_CACHING}.
 804   */
-805  public static final String 
HBASE_META_VERSIONS = "hbase.meta.versions";
+805  public static final int 
DEFAULT_HBASE_META_SCANNER_CACHING = 100;
 806
 807  /**
-808   * Default value of {@link 
#HBASE_META_VERSIONS}.
+808   * Parameter name for number of 
versions, kept by meta table.
 809   */
-810  public static final int 
DEFAULT_HBASE_META_VERSIONS = 3;
+810  public static final String 
HBASE_META_VERSIONS = "hbase.meta.versions";
 811
 812  /**
-813   * Parameter name for number of 
versions, kept by meta table.
+813   * Default value of {@link 
#HBASE_META_VERSIONS}.
 814   */
-815  public static final String 
HBASE_META_BLOCK_SIZE = "hbase.meta.blocksize";
+815  public static final int 
DEFAULT_HBASE_META_VERSIONS = 3;
 816
 817  /**
-818   * Default value of {@link 
#HBASE_META_BLOCK_SIZE}.
+818   * Parameter name for number of 
versions, kept by meta table.
 819   */
-820  public static final int 
DEFAULT_HBASE_META_BLOCK_SIZE = 8 * 1024;
+820  public static final String 
HBASE_META_BLOCK_SIZE = "hbase.meta.blocksize";
 821
 822  /**
-823   * Parameter name for unique identifier 
for this {@link org.apache.hadoop.conf.Configuration}
-824   * instance. If there are two or more 
{@link org.apache.hadoop.conf.Configuration} instances that,
-825   * for all intents and purposes, are 
the same except for their instance ids, then they will not be
-826   * able to share the same 
org.apache.hadoop.hbase.client.HConnection instance. On the other hand,
-827   * even if the instance ids are the 
same, it could result in non-shared
-828   * 
org.apache.hadoop.hbase.client.HConnection instances if some of the other 
connection parameters
-829   * differ.
-830   */
-831  public static final String 
HBASE_CLIENT_INSTANCE_ID = "hbase.client.instance.id";
-832
-833  /**
-834   * The client scanner timeout period in 
milliseconds.
+823   * Default value of {@link 
#HBASE_META_BLOCK_SIZE}.
+824   */
+825  public static final int 
DEFAULT_HBASE_META_BLOCK_SIZE = 8 * 1024;
+826
+827  /**
+828   * Parameter name for unique identifier 
for this {@link org.apache.hadoop.conf.Configuration}
+82

[20/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.html
index 1bddf29..f667b93 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.html
@@ -124,380 +124,381 @@
 116  
HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
 117// Go big. Multiply by 10. If we 
can't get to meta after this many retries
 118// then something seriously wrong.
-119int serversideMultiplier = 
c.getInt("hbase.client.serverside.retries.multiplier", 10);
-120int retries = hcRetries * 
serversideMultiplier;
-121
c.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, retries);
-122log.info(sn + " server-side 
Connection retries=" + retries);
-123  }
-124
-125  /**
-126   * A ClusterConnection that will 
short-circuit RPC making direct invocations against the
-127   * localhost if the invocation target 
is 'this' server; save on network and protobuf
-128   * invocations.
-129   */
-130  // TODO This has to still do PB 
marshalling/unmarshalling stuff. Check how/whether we can avoid.
-131  @VisibleForTesting // Class is visible 
so can assert we are short-circuiting when expected.
-132  public static class 
ShortCircuitingClusterConnection extends ConnectionImplementation {
-133private final ServerName 
serverName;
-134private final 
AdminService.BlockingInterface localHostAdmin;
-135private final 
ClientService.BlockingInterface localHostClient;
-136
-137private 
ShortCircuitingClusterConnection(Configuration conf, ExecutorService pool, User 
user,
-138ServerName serverName, 
AdminService.BlockingInterface admin,
-139ClientService.BlockingInterface 
client)
-140throws IOException {
-141  super(conf, pool, user);
-142  this.serverName = serverName;
-143  this.localHostAdmin = admin;
-144  this.localHostClient = client;
-145}
-146
-147@Override
-148public AdminService.BlockingInterface 
getAdmin(ServerName sn) throws IOException {
-149  return serverName.equals(sn) ? 
this.localHostAdmin : super.getAdmin(sn);
-150}
-151
-152@Override
-153public 
ClientService.BlockingInterface getClient(ServerName sn) throws IOException {
-154  return serverName.equals(sn) ? 
this.localHostClient : super.getClient(sn);
-155}
-156
-157@Override
-158public MasterKeepAliveConnection 
getKeepAliveMasterService() throws MasterNotRunningException {
-159  if (this.localHostClient instanceof 
MasterService.BlockingInterface) {
-160return new 
ShortCircuitMasterConnection((MasterService.BlockingInterface)this.localHostClient);
-161  }
-162  return 
super.getKeepAliveMasterService();
-163}
-164  }
-165
-166  /**
-167   * Creates a short-circuit connection 
that can bypass the RPC layer (serialization,
-168   * deserialization, networking, etc..) 
when talking to a local server.
-169   * @param conf the current 
configuration
-170   * @param pool the thread pool to use 
for batch operations
-171   * @param user the user the connection 
is for
-172   * @param serverName the local server 
name
-173   * @param admin the admin interface of 
the local server
-174   * @param client the client interface 
of the local server
-175   * @return an short-circuit 
connection.
-176   * @throws IOException if IO failure 
occurred
-177   */
-178  public static ClusterConnection 
createShortCircuitConnection(final Configuration conf,
-179  ExecutorService pool, User user, 
final ServerName serverName,
-180  final 
AdminService.BlockingInterface admin, final ClientService.BlockingInterface 
client)
-181  throws IOException {
-182if (user == null) {
-183  user = 
UserProvider.instantiate(conf).getCurrent();
-184}
-185return new 
ShortCircuitingClusterConnection(conf, pool, user, serverName, admin, 
client);
-186  }
-187
-188  /**
-189   * Setup the connection class, so that 
it will not depend on master being online. Used for testing
-190   * @param conf configuration to set
-191   */
-192  @VisibleForTesting
-193  public static void 
setupMasterlessConnection(Configuration conf) {
-194
conf.set(ClusterConnection.HBASE_CLIENT_CONNECTION_IMPL, 
MasterlessConnection.class.getName());
-195  }
-196
-197  /**
-198   * Some tests shut down the master. But 
table availability is a master RPC which is performed on
-199   * region re-lookups.
-200   */
-201  static class MasterlessConnection 
extends ConnectionImplementation {
-202MasterlessConnection(Configuration 
conf, ExecutorService pool, User user) throws IOException {
-203  super(conf, pool, user);
-204}
-205
-206@Override
-207public boolea

[10/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.FileCreator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.FileCreator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.FileCreator.html
index 25e368d..d0f781f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.FileCreator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.FileCreator.html
@@ -25,798 +25,798 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.asyncfs;
 019
-020import static 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelOption.CONNECT_TIMEOUT_MILLIS;
-021import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
-022import static 
org.apache.hadoop.fs.CreateFlag.CREATE;
-023import static 
org.apache.hadoop.fs.CreateFlag.OVERWRITE;
-024import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor;
-025import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.trySaslNegotiate;
+020import static 
org.apache.hadoop.fs.CreateFlag.CREATE;
+021import static 
org.apache.hadoop.fs.CreateFlag.OVERWRITE;
+022import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor;
+023import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.trySaslNegotiate;
+024import static 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelOption.CONNECT_TIMEOUT_MILLIS;
+025import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
 026import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
 027import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME;
 028import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT;
 029import static 
org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage.PIPELINE_SETUP_CREATE;
 030
-031import 
org.apache.hadoop.hbase.shaded.com.google.common.base.Throwables;
-032import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableMap;
-033import 
com.google.protobuf.CodedOutputStream;
-034
-035import 
org.apache.hadoop.hbase.shaded.io.netty.bootstrap.Bootstrap;
-036import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBuf;
-037import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufAllocator;
-038import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufOutputStream;
-039import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.PooledByteBufAllocator;
-040import 
org.apache.hadoop.hbase.shaded.io.netty.channel.Channel;
-041import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelFuture;
-042import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelFutureListener;
-043import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandler;
-044import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerContext;
-045import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelInitializer;
-046import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelPipeline;
-047import 
org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoop;
-048import 
org.apache.hadoop.hbase.shaded.io.netty.channel.SimpleChannelInboundHandler;
-049import 
org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufDecoder;
-050import 
org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
-051import 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateEvent;
-052import 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateHandler;
-053import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Future;
-054import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.FutureListener;
-055import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Promise;
-056
-057import java.io.IOException;
-058import 
java.lang.reflect.InvocationTargetException;
-059import java.lang.reflect.Method;
-060import java.util.ArrayList;
-061import java.util.EnumSet;
-062import java.util.List;
-063import java.util.concurrent.TimeUnit;
-064
-065import org.apache.commons.logging.Log;
-066import 
org.apache.commons.logging.LogFactory;
-067import 
org.apache.hadoop.conf.Configuration;
-068import 
org.apache.hadoop.crypto.CryptoProtocolVersion;
-069import 
org.apache.hadoop.crypto.Encryptor;
-070import org.apache.hadoop.fs.CreateFlag;
-071import org.apache.hadoop.fs.FileSystem;
-072import 
org.apache.hadoop.fs.FileSystemLinkResolver;
-073import org.apache.hadoop.fs.Path;
-074import 
org.apache.hadoop.fs.UnresolvedLinkException;
-075import 
org.apache.hadoop.fs.permission.FsPerm

[02/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionState.State.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionState.State.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionState.State.html
index 2daacb5..fb5cc60 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionState.State.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionState.State.html
@@ -25,413 +25,414 @@
 017 */
 018package org.apache.hadoop.hbase.master;
 019
-020import java.util.Date;
-021
-022import 
org.apache.hadoop.hbase.ServerName;
-023import 
org.apache.hadoop.hbase.client.RegionInfo;
-024import 
org.apache.yetus.audience.InterfaceAudience;
-025import 
org.apache.yetus.audience.InterfaceStability;
-026
-027import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-028import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
-029
-030/**
-031 * State of a Region while undergoing 
transitions.
-032 * This class is immutable.
-033 */
-034@InterfaceAudience.Private
-035public class RegionState {
-036
-037  @InterfaceAudience.Private
-038  @InterfaceStability.Evolving
-039  public enum State {
-040OFFLINE,// region is in an 
offline state
-041OPENING,// server has begun 
to open but not yet done
-042OPEN,   // server opened 
region and updated meta
-043CLOSING,// server has begun 
to close but not yet done
-044CLOSED, // server closed 
region and updated meta
-045SPLITTING,  // server started 
split of a region
-046SPLIT,  // server completed 
split of a region
-047FAILED_OPEN,// failed to open, 
and won't retry any more
-048FAILED_CLOSE,   // failed to close, 
and won't retry any more
-049MERGING,// server started 
merge a region
-050MERGED, // server completed 
merge a region
-051SPLITTING_NEW,  // new region to be 
created when RS splits a parent
-052// region but hasn't 
be created yet, or master doesn't
-053// know it's already 
created
-054MERGING_NEW;// new region to be 
created when RS merges two
-055// daughter regions 
but hasn't be created yet, or
-056// master doesn't 
know it's already created
-057
-058/**
-059 * Convert to protobuf 
ClusterStatusProtos.RegionState.State
-060 */
-061public 
ClusterStatusProtos.RegionState.State convert() {
-062  
ClusterStatusProtos.RegionState.State rs;
-063  switch (this) {
-064  case OFFLINE:
-065rs = 
ClusterStatusProtos.RegionState.State.OFFLINE;
-066break;
-067  case OPENING:
-068rs = 
ClusterStatusProtos.RegionState.State.OPENING;
-069break;
-070  case OPEN:
-071rs = 
ClusterStatusProtos.RegionState.State.OPEN;
-072break;
-073  case CLOSING:
-074rs = 
ClusterStatusProtos.RegionState.State.CLOSING;
-075break;
-076  case CLOSED:
-077rs = 
ClusterStatusProtos.RegionState.State.CLOSED;
-078break;
-079  case SPLITTING:
-080rs = 
ClusterStatusProtos.RegionState.State.SPLITTING;
-081break;
-082  case SPLIT:
-083rs = 
ClusterStatusProtos.RegionState.State.SPLIT;
-084break;
-085  case FAILED_OPEN:
-086rs = 
ClusterStatusProtos.RegionState.State.FAILED_OPEN;
-087break;
-088  case FAILED_CLOSE:
-089rs = 
ClusterStatusProtos.RegionState.State.FAILED_CLOSE;
-090break;
-091  case MERGING:
-092rs = 
ClusterStatusProtos.RegionState.State.MERGING;
-093break;
-094  case MERGED:
-095rs = 
ClusterStatusProtos.RegionState.State.MERGED;
-096break;
-097  case SPLITTING_NEW:
-098rs = 
ClusterStatusProtos.RegionState.State.SPLITTING_NEW;
-099break;
-100  case MERGING_NEW:
-101rs = 
ClusterStatusProtos.RegionState.State.MERGING_NEW;
-102break;
-103  default:
-104throw new 
IllegalStateException("");
-105  }
-106  return rs;
-107}
-108
-109/**
-110 * Convert a protobuf 
HBaseProtos.RegionState.State to a RegionState.State
-111 *
-112 * @return the RegionState.State
-113 */
-114public static State 
convert(ClusterStatusProtos.RegionState.State protoState) {
-115  State state;
-116  switch (protoState) {
-117  case OFFLINE:
-118state = OFFLINE;
-119break;
-120  case PENDING_OPEN:
-121  case OPENING:
-122state = OPENING;
-123break;
-124  case OPEN:
-125state = OPEN;
-126break;
-127  case PENDING_CLOSE:
-128  case CLOSING:
-129state = CLOSING;
-130break;
-131  case CLOSED:
-132state = CLOSED;
-133break;
-134  case SPLITTING:
-135 

[19/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutputHelper.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutputHelper.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutputHelper.html
index c6e457f..9b7087a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutputHelper.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutputHelper.html
@@ -25,166 +25,161 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.asyncfs;
 019
-020import 
org.apache.hadoop.hbase.shaded.com.google.common.base.Throwables;
-021import 
org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
-022
-023import 
org.apache.hadoop.hbase.shaded.io.netty.channel.Channel;
-024import 
org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoop;
-025
-026import java.io.IOException;
-027import java.io.InterruptedIOException;
-028import java.nio.ByteBuffer;
-029import 
java.util.concurrent.CompletableFuture;
-030import 
java.util.concurrent.ExecutionException;
-031import 
java.util.concurrent.ExecutorService;
-032import java.util.concurrent.Executors;
-033
-034import 
org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-035import 
org.apache.hadoop.fs.FSDataOutputStream;
-036import org.apache.hadoop.fs.FileSystem;
-037import org.apache.hadoop.fs.Path;
-038import 
org.apache.yetus.audience.InterfaceAudience;
-039import 
org.apache.hadoop.hbase.io.ByteArrayOutputStream;
-040import 
org.apache.hadoop.hbase.util.CancelableProgressable;
-041import 
org.apache.hadoop.hbase.util.CommonFSUtils;
-042import 
org.apache.hadoop.hdfs.DistributedFileSystem;
-043import 
org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-044
-045/**
-046 * Helper class for creating 
AsyncFSOutput.
-047 */
-048@InterfaceAudience.Private
-049public final class AsyncFSOutputHelper 
{
-050
-051  private AsyncFSOutputHelper() {
-052  }
-053
-054  /**
-055   * Create {@link 
FanOutOneBlockAsyncDFSOutput} for {@link DistributedFileSystem}, and a simple
-056   * implementation for other {@link 
FileSystem} which wraps around a {@link FSDataOutputStream}.
-057   */
-058  public static AsyncFSOutput 
createOutput(FileSystem fs, Path f, boolean overwrite,
-059  boolean createParent, short 
replication, long blockSize, EventLoop eventLoop,
-060  Class 
channelClass)
-061  throws IOException, 
CommonFSUtils.StreamLacksCapabilityException {
-062if (fs instanceof 
DistributedFileSystem) {
-063  return 
FanOutOneBlockAsyncDFSOutputHelper.createOutput((DistributedFileSystem) fs, 
f,
-064overwrite, createParent, 
replication, blockSize, eventLoop, channelClass);
-065}
-066final FSDataOutputStream fsOut;
-067int bufferSize = 
fs.getConf().getInt(CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
-068  
CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
-069if (createParent) {
-070  fsOut = fs.create(f, overwrite, 
bufferSize, replication, blockSize, null);
-071} else {
-072  fsOut = fs.createNonRecursive(f, 
overwrite, bufferSize, replication, blockSize, null);
-073}
-074// After we create the stream but 
before we attempt to use it at all
-075// ensure that we can provide the 
level of data safety we're configured
-076// to provide.
-077if 
(!(CommonFSUtils.hasCapability(fsOut, "hflush") &&
-078
CommonFSUtils.hasCapability(fsOut, "hsync"))) {
-079  throw new 
CommonFSUtils.StreamLacksCapabilityException("hflush and hsync");
-080}
-081final ExecutorService flushExecutor 
=
-082
Executors.newSingleThreadExecutor(new ThreadFactoryBuilder().setDaemon(true)
-083
.setNameFormat("AsyncFSOutputFlusher-" + f.toString().replace("%", 
"%%")).build());
-084return new AsyncFSOutput() {
-085
-086  private final ByteArrayOutputStream 
out = new ByteArrayOutputStream();
-087
-088  @Override
-089  public void write(final byte[] b, 
final int off, final int len) {
-090if (eventLoop.inEventLoop()) {
-091  out.write(b, off, len);
-092} else {
-093  eventLoop.submit(() -> 
out.write(b, off, len)).syncUninterruptibly();
-094}
+020import java.io.IOException;
+021import java.io.InterruptedIOException;
+022import java.nio.ByteBuffer;
+023import 
java.util.concurrent.CompletableFuture;
+024import 
java.util.concurrent.ExecutionException;
+025import 
java.util.concurrent.ExecutorService;
+026import java.util.concurrent.Executors;
+027
+028import 
org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+029import 
org.apache.hadoop.fs.FSDataOutputStream;
+030import org.apache.hadoop.fs.FileSystem;
+031import org.apache.hadoop.fs.Path;
+032import 
org.apache.hadoop.hbase.io.ByteArrayOutputStream;
+033import 
org.apache.hadoop.hbase.util.Cancela

[18/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.AckHandler.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.AckHandler.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.AckHandler.html
index de6cb11..dd54dd2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.AckHandler.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.AckHandler.html
@@ -25,563 +25,558 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.asyncfs;
 019
-020import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
-021import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.WRITER_IDLE;
-022import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.HEART_BEAT_SEQNO;
-023import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.READ_TIMEOUT;
-024import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile;
-025import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.endFileLease;
-026import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.getStatus;
+020import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.HEART_BEAT_SEQNO;
+021import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.READ_TIMEOUT;
+022import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile;
+023import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.endFileLease;
+024import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.getStatus;
+025import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
+026import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.WRITER_IDLE;
 027import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
 028
-029
-030import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBuf;
-031import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufAllocator;
-032import 
org.apache.hadoop.hbase.shaded.io.netty.channel.Channel;
-033import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandler.Sharable;
-034import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerContext;
-035import 
org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoop;
-036import 
org.apache.hadoop.hbase.shaded.io.netty.channel.SimpleChannelInboundHandler;
-037import 
org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufDecoder;
-038import 
org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
-039import 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateEvent;
-040import 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateHandler;
-041import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Future;
-042import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Promise;
-043import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.PromiseCombiner;
-044
-045import java.io.IOException;
-046import java.nio.ByteBuffer;
-047import java.util.ArrayDeque;
-048import java.util.Collection;
-049import java.util.Collections;
-050import java.util.Deque;
-051import java.util.IdentityHashMap;
-052import java.util.List;
-053import java.util.Set;
-054import 
java.util.concurrent.CompletableFuture;
-055import java.util.concurrent.TimeUnit;
-056import java.util.function.Supplier;
-057
-058import 
org.apache.hadoop.conf.Configuration;
-059import 
org.apache.hadoop.crypto.Encryptor;
-060import org.apache.hadoop.fs.Path;
+029import java.io.IOException;
+030import java.io.InterruptedIOException;
+031import java.nio.ByteBuffer;
+032import java.util.Collection;
+033import java.util.Collections;
+034import java.util.Iterator;
+035import java.util.List;
+036import java.util.Set;
+037import 
java.util.concurrent.CompletableFuture;
+038import 
java.util.concurrent.ConcurrentHashMap;
+039import 
java.util.concurrent.ConcurrentLinkedDeque;
+040import 
java.util.concurrent.ExecutionException;
+041import java.util.concurrent.TimeUnit;
+042import java.util.function.Supplier;
+043
+044import 
org.apache.hadoop.conf.Configuration;
+045import 
org.apache.hadoop.crypto.Encryptor;
+046import org.apache.hadoop.fs.Path;
+047import 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.CancelOnClose;
+048import 
org.apache.hadoop.hbase.util.CancelableProgressable;
+049import 
org.apache.hadoop.hbase.util.FSUtils;
+050import 
org.apache.hadoop.hdfs.DFSClient;
+051import 
org.apache.hadoop.hdfs.

[06/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.PipelineAckStatusGetter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.PipelineAckStatusGetter.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.PipelineAckStatusGetter.html
index 25e368d..d0f781f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.PipelineAckStatusGetter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.PipelineAckStatusGetter.html
@@ -25,798 +25,798 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.asyncfs;
 019
-020import static 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelOption.CONNECT_TIMEOUT_MILLIS;
-021import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
-022import static 
org.apache.hadoop.fs.CreateFlag.CREATE;
-023import static 
org.apache.hadoop.fs.CreateFlag.OVERWRITE;
-024import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor;
-025import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.trySaslNegotiate;
+020import static 
org.apache.hadoop.fs.CreateFlag.CREATE;
+021import static 
org.apache.hadoop.fs.CreateFlag.OVERWRITE;
+022import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor;
+023import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.trySaslNegotiate;
+024import static 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelOption.CONNECT_TIMEOUT_MILLIS;
+025import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
 026import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
 027import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME;
 028import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT;
 029import static 
org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage.PIPELINE_SETUP_CREATE;
 030
-031import 
org.apache.hadoop.hbase.shaded.com.google.common.base.Throwables;
-032import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableMap;
-033import 
com.google.protobuf.CodedOutputStream;
-034
-035import 
org.apache.hadoop.hbase.shaded.io.netty.bootstrap.Bootstrap;
-036import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBuf;
-037import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufAllocator;
-038import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufOutputStream;
-039import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.PooledByteBufAllocator;
-040import 
org.apache.hadoop.hbase.shaded.io.netty.channel.Channel;
-041import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelFuture;
-042import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelFutureListener;
-043import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandler;
-044import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerContext;
-045import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelInitializer;
-046import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelPipeline;
-047import 
org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoop;
-048import 
org.apache.hadoop.hbase.shaded.io.netty.channel.SimpleChannelInboundHandler;
-049import 
org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufDecoder;
-050import 
org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
-051import 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateEvent;
-052import 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateHandler;
-053import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Future;
-054import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.FutureListener;
-055import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Promise;
-056
-057import java.io.IOException;
-058import 
java.lang.reflect.InvocationTargetException;
-059import java.lang.reflect.Method;
-060import java.util.ArrayList;
-061import java.util.EnumSet;
-062import java.util.List;
-063import java.util.concurrent.TimeUnit;
-064
-065import org.apache.commons.logging.Log;
-066import 
org.apache.commons.logging.LogFactory;
-067import 
org.apache.hadoop.conf.Configuration;
-068import 
org.apache.hadoop.crypto.CryptoProtocolVersion;
-069import 
org.apache.hadoop.crypto.Encryptor;
-070import org.apache.hadoop.fs.CreateFlag;
-071import org.apache.hadoop.fs.FileSystem;
-072import 
org.apache.hadoop.fs.FileSystemLinkResolver;
-073import org.apache.hadoop.fs.Path;
-074import 
org.apache.hadoop.fs.UnresolvedLinkE

[09/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.LeaseManager.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.LeaseManager.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.LeaseManager.html
index 25e368d..d0f781f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.LeaseManager.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.LeaseManager.html
@@ -25,798 +25,798 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.asyncfs;
 019
-020import static 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelOption.CONNECT_TIMEOUT_MILLIS;
-021import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
-022import static 
org.apache.hadoop.fs.CreateFlag.CREATE;
-023import static 
org.apache.hadoop.fs.CreateFlag.OVERWRITE;
-024import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor;
-025import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.trySaslNegotiate;
+020import static 
org.apache.hadoop.fs.CreateFlag.CREATE;
+021import static 
org.apache.hadoop.fs.CreateFlag.OVERWRITE;
+022import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor;
+023import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.trySaslNegotiate;
+024import static 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelOption.CONNECT_TIMEOUT_MILLIS;
+025import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
 026import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
 027import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME;
 028import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT;
 029import static 
org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage.PIPELINE_SETUP_CREATE;
 030
-031import 
org.apache.hadoop.hbase.shaded.com.google.common.base.Throwables;
-032import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableMap;
-033import 
com.google.protobuf.CodedOutputStream;
-034
-035import 
org.apache.hadoop.hbase.shaded.io.netty.bootstrap.Bootstrap;
-036import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBuf;
-037import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufAllocator;
-038import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufOutputStream;
-039import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.PooledByteBufAllocator;
-040import 
org.apache.hadoop.hbase.shaded.io.netty.channel.Channel;
-041import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelFuture;
-042import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelFutureListener;
-043import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandler;
-044import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerContext;
-045import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelInitializer;
-046import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelPipeline;
-047import 
org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoop;
-048import 
org.apache.hadoop.hbase.shaded.io.netty.channel.SimpleChannelInboundHandler;
-049import 
org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufDecoder;
-050import 
org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
-051import 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateEvent;
-052import 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateHandler;
-053import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Future;
-054import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.FutureListener;
-055import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Promise;
-056
-057import java.io.IOException;
-058import 
java.lang.reflect.InvocationTargetException;
-059import java.lang.reflect.Method;
-060import java.util.ArrayList;
-061import java.util.EnumSet;
-062import java.util.List;
-063import java.util.concurrent.TimeUnit;
-064
-065import org.apache.commons.logging.Log;
-066import 
org.apache.commons.logging.LogFactory;
-067import 
org.apache.hadoop.conf.Configuration;
-068import 
org.apache.hadoop.crypto.CryptoProtocolVersion;
-069import 
org.apache.hadoop.crypto.Encryptor;
-070import org.apache.hadoop.fs.CreateFlag;
-071import org.apache.hadoop.fs.FileSystem;
-072import 
org.apache.hadoop.fs.FileSystemLinkResolver;
-073import org.apache.hadoop.fs.Path;
-074import 
org.apache.hadoop.fs.UnresolvedLinkException;
-075import 
org.apache.hadoop.fs.permission.F

[04/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.html
index 25e368d..d0f781f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.html
@@ -25,798 +25,798 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.asyncfs;
 019
-020import static 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelOption.CONNECT_TIMEOUT_MILLIS;
-021import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
-022import static 
org.apache.hadoop.fs.CreateFlag.CREATE;
-023import static 
org.apache.hadoop.fs.CreateFlag.OVERWRITE;
-024import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor;
-025import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.trySaslNegotiate;
+020import static 
org.apache.hadoop.fs.CreateFlag.CREATE;
+021import static 
org.apache.hadoop.fs.CreateFlag.OVERWRITE;
+022import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor;
+023import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.trySaslNegotiate;
+024import static 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelOption.CONNECT_TIMEOUT_MILLIS;
+025import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
 026import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
 027import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME;
 028import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT;
 029import static 
org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage.PIPELINE_SETUP_CREATE;
 030
-031import 
org.apache.hadoop.hbase.shaded.com.google.common.base.Throwables;
-032import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableMap;
-033import 
com.google.protobuf.CodedOutputStream;
-034
-035import 
org.apache.hadoop.hbase.shaded.io.netty.bootstrap.Bootstrap;
-036import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBuf;
-037import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufAllocator;
-038import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufOutputStream;
-039import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.PooledByteBufAllocator;
-040import 
org.apache.hadoop.hbase.shaded.io.netty.channel.Channel;
-041import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelFuture;
-042import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelFutureListener;
-043import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandler;
-044import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerContext;
-045import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelInitializer;
-046import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelPipeline;
-047import 
org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoop;
-048import 
org.apache.hadoop.hbase.shaded.io.netty.channel.SimpleChannelInboundHandler;
-049import 
org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufDecoder;
-050import 
org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
-051import 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateEvent;
-052import 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateHandler;
-053import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Future;
-054import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.FutureListener;
-055import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Promise;
-056
-057import java.io.IOException;
-058import 
java.lang.reflect.InvocationTargetException;
-059import java.lang.reflect.Method;
-060import java.util.ArrayList;
-061import java.util.EnumSet;
-062import java.util.List;
-063import java.util.concurrent.TimeUnit;
-064
-065import org.apache.commons.logging.Log;
-066import 
org.apache.commons.logging.LogFactory;
-067import 
org.apache.hadoop.conf.Configuration;
-068import 
org.apache.hadoop.crypto.CryptoProtocolVersion;
-069import 
org.apache.hadoop.crypto.Encryptor;
-070import org.apache.hadoop.fs.CreateFlag;
-071import org.apache.hadoop.fs.FileSystem;
-072import 
org.apache.hadoop.fs.FileSystemLinkResolver;
-073import org.apache.hadoop.fs.Path;
-074import 
org.apache.hadoop.fs.UnresolvedLinkException;
-075import 
org.apache.hadoop.fs.permission.FsPermission;
-076import 
org.apache.yetus.audience.InterfaceAudie

hbase git commit: HBASE-17918 document serial replication

2017-11-30 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/branch-2 29079886c -> e20a7574d


HBASE-17918 document serial replication

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e20a7574
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e20a7574
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e20a7574

Branch: refs/heads/branch-2
Commit: e20a7574d1015d3517e81c2be31092a89d365c2d
Parents: 2907988
Author: meiyi 
Authored: Thu Nov 30 21:27:39 2017 +0800
Committer: zhangduo 
Committed: Thu Nov 30 21:28:25 2017 +0800

--
 src/main/asciidoc/_chapters/ops_mgt.adoc | 41 ++-
 1 file changed, 40 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e20a7574/src/main/asciidoc/_chapters/ops_mgt.adoc
--
diff --git a/src/main/asciidoc/_chapters/ops_mgt.adoc 
b/src/main/asciidoc/_chapters/ops_mgt.adoc
index 4092c4d..66f8d27 100644
--- a/src/main/asciidoc/_chapters/ops_mgt.adoc
+++ b/src/main/asciidoc/_chapters/ops_mgt.adoc
@@ -1323,9 +1323,11 @@ If a slave cluster does run out of room, or is 
inaccessible for other reasons, i
 .Consistency Across Replicated Clusters
 [WARNING]
 
-How your application builds on top of the HBase API matters when replication 
is in play. HBase's replication system provides at-least-once delivery of 
client edits for an enabled column family to each configured destination 
cluster. In the event of failure to reach a given destination, the replication 
system will retry sending edits in a way that might repeat a given message. 
Further more, there is not a guaranteed order of delivery for client edits. In 
the event of a RegionServer failing, recovery of the replication queue happens 
independent of recovery of the individual regions that server was previously 
handling. This means that it is possible for the not-yet-replicated edits to be 
serviced by a RegionServer that is currently slower to replicate than the one 
that handles edits from after the failure.
+How your application builds on top of the HBase API matters when replication 
is in play. HBase's replication system provides at-least-once delivery of 
client edits for an enabled column family to each configured destination 
cluster. In the event of failure to reach a given destination, the replication 
system will retry sending edits in a way that might repeat a given message. 
HBase provides two ways of replication, one is the original replication and the 
other is serial replication. In the previous way of replication, there is not a 
guaranteed order of delivery for client edits. In the event of a RegionServer 
failing, recovery of the replication queue happens independent of recovery of 
the individual regions that server was previously handling. This means that it 
is possible for the not-yet-replicated edits to be serviced by a RegionServer 
that is currently slower to replicate than the one that handles edits from 
after the failure.
 
 The combination of these two properties (at-least-once delivery and the lack 
of message ordering) means that some destination clusters may end up in a 
different state if your application makes use of operations that are not 
idempotent, e.g. Increments.
+
+To solve the problem, HBase now supports serial replication, which sends edits 
to destination cluster as the order of requests from client.
 
 
 .Terminology Changes
@@ -1366,6 +1368,9 @@ Instead of SQL statements, entire WALEdits (consisting of 
multiple cell inserts
 LOG.info("Replicating "+clusterId + " -> " + peerClusterId);
 
 
+.Serial Replication Configuration
+See <>
+
 .Cluster Management Commands
 add_peer  ::
   Adds a replication relationship between two clusters. +
@@ -1387,6 +1392,40 @@ enable_table_replication ::
 disable_table_replication ::
   Disable the table replication switch for all its column families.
 
+=== Serial Replication
+
+Note: this feature is introduced in HBase 1.5
+
+.Function of serial replication
+
+Serial replication supports to push logs to the destination cluster in the 
same order as logs reach to the source cluster.
+
+.Why need serial replication?
+In replication of HBase, we push mutations to destination cluster by reading 
WAL in each region server. We have a queue for WAL files so we can read them in 
order of creation time. However, when region-move or RS failure occurs in 
source cluster, the hlog entries that are not pushed before region-move or 
RS-failure will be pushed by original RS(for region move) or another RS which 
takes over the remained hlog of dead RS(for RS failure), and the new entries 
for the same region(s) will be pushed by the RS which now serves the region(s), 
but they push the hlog entries of a same re

hbase git commit: HBASE-17918 document serial replication

2017-11-30 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/master 9692b61a0 -> 6a6409a30


HBASE-17918 document serial replication

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6a6409a3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6a6409a3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6a6409a3

Branch: refs/heads/master
Commit: 6a6409a30aa634875467683203de0e21e0491986
Parents: 9692b61
Author: meiyi 
Authored: Thu Nov 30 21:27:39 2017 +0800
Committer: zhangduo 
Committed: Thu Nov 30 21:27:39 2017 +0800

--
 src/main/asciidoc/_chapters/ops_mgt.adoc | 41 ++-
 1 file changed, 40 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6a6409a3/src/main/asciidoc/_chapters/ops_mgt.adoc
--
diff --git a/src/main/asciidoc/_chapters/ops_mgt.adoc 
b/src/main/asciidoc/_chapters/ops_mgt.adoc
index 2bb2510..d4478fa 100644
--- a/src/main/asciidoc/_chapters/ops_mgt.adoc
+++ b/src/main/asciidoc/_chapters/ops_mgt.adoc
@@ -1367,9 +1367,11 @@ If a slave cluster does run out of room, or is 
inaccessible for other reasons, i
 .Consistency Across Replicated Clusters
 [WARNING]
 
-How your application builds on top of the HBase API matters when replication 
is in play. HBase's replication system provides at-least-once delivery of 
client edits for an enabled column family to each configured destination 
cluster. In the event of failure to reach a given destination, the replication 
system will retry sending edits in a way that might repeat a given message. 
Further more, there is not a guaranteed order of delivery for client edits. In 
the event of a RegionServer failing, recovery of the replication queue happens 
independent of recovery of the individual regions that server was previously 
handling. This means that it is possible for the not-yet-replicated edits to be 
serviced by a RegionServer that is currently slower to replicate than the one 
that handles edits from after the failure.
+How your application builds on top of the HBase API matters when replication 
is in play. HBase's replication system provides at-least-once delivery of 
client edits for an enabled column family to each configured destination 
cluster. In the event of failure to reach a given destination, the replication 
system will retry sending edits in a way that might repeat a given message. 
HBase provides two ways of replication, one is the original replication and the 
other is serial replication. In the previous way of replication, there is not a 
guaranteed order of delivery for client edits. In the event of a RegionServer 
failing, recovery of the replication queue happens independent of recovery of 
the individual regions that server was previously handling. This means that it 
is possible for the not-yet-replicated edits to be serviced by a RegionServer 
that is currently slower to replicate than the one that handles edits from 
after the failure.
 
 The combination of these two properties (at-least-once delivery and the lack 
of message ordering) means that some destination clusters may end up in a 
different state if your application makes use of operations that are not 
idempotent, e.g. Increments.
+
+To solve the problem, HBase now supports serial replication, which sends edits 
to destination cluster as the order of requests from client.
 
 
 .Terminology Changes
@@ -1410,6 +1412,9 @@ Instead of SQL statements, entire WALEdits (consisting of 
multiple cell inserts
 LOG.info("Replicating "+clusterId + " -> " + peerClusterId);
 
 
+.Serial Replication Configuration
+See <>
+
 .Cluster Management Commands
 add_peer  ::
   Adds a replication relationship between two clusters. +
@@ -1431,6 +1436,40 @@ enable_table_replication ::
 disable_table_replication ::
   Disable the table replication switch for all its column families.
 
+=== Serial Replication
+
+Note: this feature is introduced in HBase 1.5
+
+.Function of serial replication
+
+Serial replication supports to push logs to the destination cluster in the 
same order as logs reach to the source cluster.
+
+.Why need serial replication?
+In replication of HBase, we push mutations to destination cluster by reading 
WAL in each region server. We have a queue for WAL files so we can read them in 
order of creation time. However, when region-move or RS failure occurs in 
source cluster, the hlog entries that are not pushed before region-move or 
RS-failure will be pushed by original RS(for region move) or another RS which 
takes over the remained hlog of dead RS(for RS failure), and the new entries 
for the same region(s) will be pushed by the RS which now serves the region(s), 
but they push the hlog entries of a same region

hbase git commit: HBASE-18112 Write RequestTooBigException back to client for NettyRpcServer

2017-11-30 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/branch-2 d32ba0977 -> 29079886c


HBASE-18112 Write RequestTooBigException back to client for NettyRpcServer

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/29079886
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/29079886
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/29079886

Branch: refs/heads/branch-2
Commit: 29079886caddc85f6953cd07f1afaded5086f7db
Parents: d32ba09
Author: Toshihiro Suzuki 
Authored: Thu Nov 30 16:58:52 2017 +0800
Committer: zhangduo 
Committed: Thu Nov 30 17:07:22 2017 +0800

--
 .../hadoop/hbase/ipc/NettyRpcFrameDecoder.java  | 237 +++
 .../apache/hadoop/hbase/ipc/NettyRpcServer.java |  13 +-
 .../ipc/NettyRpcServerPreambleHandler.java  |   9 +-
 .../hadoop/hbase/ipc/AbstractTestIPC.java   | 106 -
 .../hadoop/hbase/ipc/TestBlockingIPC.java   |  47 
 .../apache/hadoop/hbase/ipc/TestNettyIPC.java   |  63 -
 6 files changed, 399 insertions(+), 76 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/29079886/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.java
new file mode 100644
index 000..d600712
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.java
@@ -0,0 +1,237 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.ipc;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.client.VersionInfoUtil;
+import org.apache.hadoop.hbase.exceptions.RequestTooBigException;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBuf;
+import org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelFutureListener;
+import org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerContext;
+import 
org.apache.hadoop.hbase.shaded.io.netty.handler.codec.ByteToMessageDecoder;
+import 
org.apache.hadoop.hbase.shaded.io.netty.handler.codec.CorruptedFrameException;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos;
+
+
+/**
+ * Decoder for extracting frame
+ *
+ * @since 2.0.0
+ */
+@InterfaceAudience.Private
+public class NettyRpcFrameDecoder extends ByteToMessageDecoder {
+
+  private static int FRAME_LENGTH_FIELD_LENGTH = 4;
+
+  private final int maxFrameLength;
+  private boolean requestTooBig;
+  private String requestTooBigMessage;
+
+  public NettyRpcFrameDecoder(int maxFrameLength) {
+this.maxFrameLength = maxFrameLength;
+  }
+
+  private NettyServerRpcConnection connection;
+
+  void setConnection(NettyServerRpcConnection connection) {
+this.connection = connection;
+  }
+
+  @Override
+  protected void decode(ChannelHandlerContext ctx, ByteBuf in, List 
out)
+throws Exception {
+if (requestTooBig) {
+  handleTooBigRequest(in);
+  return;
+}
+
+if (in.readableBytes() < FRAME_LENGTH_FIELD_LENGTH) {
+  return;
+}
+
+long frameLength = in.getUnsignedInt(in.readerIndex());
+
+if (frameLength < 0) {
+  throw new IOException("negative frame length field: " + frameLength);
+}
+
+if (frameLength > maxFrameLength) {
+  requestTooBig = true;
+  requestTooBigMessage =
+"RPC data length of " + frameLength + " received from " + 
connection.getHostAddress()
+  + " is greater than max allowed " + 
connection.rpcServer.maxRequestSize + ". Set \""
+  + SimpleRpcServer.MAX_REQUEST_SIZE
+  + "\" on server to override this limit (not recommended)";
+
+  NettyRpcServer.LOG.warn(requestTooBigMessage);
+
+  if (conn

hbase git commit: HBASE-18112 Write RequestTooBigException back to client for NettyRpcServer

2017-11-30 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/master ddbff4fd8 -> 9692b61a0


HBASE-18112 Write RequestTooBigException back to client for NettyRpcServer

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9692b61a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9692b61a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9692b61a

Branch: refs/heads/master
Commit: 9692b61a0ab572d0d37e01bd262700995e675975
Parents: ddbff4f
Author: Toshihiro Suzuki 
Authored: Thu Nov 30 16:58:52 2017 +0800
Committer: zhangduo 
Committed: Thu Nov 30 17:06:34 2017 +0800

--
 .../hadoop/hbase/ipc/NettyRpcFrameDecoder.java  | 237 +++
 .../apache/hadoop/hbase/ipc/NettyRpcServer.java |  13 +-
 .../ipc/NettyRpcServerPreambleHandler.java  |   9 +-
 .../hadoop/hbase/ipc/AbstractTestIPC.java   | 106 -
 .../hadoop/hbase/ipc/TestBlockingIPC.java   |  47 
 .../apache/hadoop/hbase/ipc/TestNettyIPC.java   |  63 -
 6 files changed, 399 insertions(+), 76 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9692b61a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.java
new file mode 100644
index 000..d600712
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.java
@@ -0,0 +1,237 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.ipc;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.client.VersionInfoUtil;
+import org.apache.hadoop.hbase.exceptions.RequestTooBigException;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBuf;
+import org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelFutureListener;
+import org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerContext;
+import 
org.apache.hadoop.hbase.shaded.io.netty.handler.codec.ByteToMessageDecoder;
+import 
org.apache.hadoop.hbase.shaded.io.netty.handler.codec.CorruptedFrameException;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos;
+
+
+/**
+ * Decoder for extracting frame
+ *
+ * @since 2.0.0
+ */
+@InterfaceAudience.Private
+public class NettyRpcFrameDecoder extends ByteToMessageDecoder {
+
+  private static int FRAME_LENGTH_FIELD_LENGTH = 4;
+
+  private final int maxFrameLength;
+  private boolean requestTooBig;
+  private String requestTooBigMessage;
+
+  public NettyRpcFrameDecoder(int maxFrameLength) {
+this.maxFrameLength = maxFrameLength;
+  }
+
+  private NettyServerRpcConnection connection;
+
+  void setConnection(NettyServerRpcConnection connection) {
+this.connection = connection;
+  }
+
+  @Override
+  protected void decode(ChannelHandlerContext ctx, ByteBuf in, List 
out)
+throws Exception {
+if (requestTooBig) {
+  handleTooBigRequest(in);
+  return;
+}
+
+if (in.readableBytes() < FRAME_LENGTH_FIELD_LENGTH) {
+  return;
+}
+
+long frameLength = in.getUnsignedInt(in.readerIndex());
+
+if (frameLength < 0) {
+  throw new IOException("negative frame length field: " + frameLength);
+}
+
+if (frameLength > maxFrameLength) {
+  requestTooBig = true;
+  requestTooBigMessage =
+"RPC data length of " + frameLength + " received from " + 
connection.getHostAddress()
+  + " is greater than max allowed " + 
connection.rpcServer.maxRequestSize + ". Set \""
+  + SimpleRpcServer.MAX_REQUEST_SIZE
+  + "\" on server to override this limit (not recommended)";
+
+  NettyRpcServer.LOG.warn(requestTooBigMessage);
+
+  if (connecti

hbase git commit: HBASE-19386 Enable Arm64 unaligned support for HBase

2017-11-30 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/master 9434d52c1 -> ddbff4fd8


HBASE-19386 Enable Arm64 unaligned support for HBase

On Arm64, java.nio.Bits.unaligned() wrongly returns false due to a JDK bug.
This causes HBase UnsafeAvailChecker wrongly returning false on Arm64.
And it slso cause FuzzyRowFilter Unit test failed.
Fix it by providing a hard-code to enbale Arm64 unaligned support.

Jira: HBASE-19386

Change-Id: I3ab821dacbe42b18cd515080da1fa3dc1f1e1d28
Signed-off-by: Yuqi Gu 
Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ddbff4fd
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ddbff4fd
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ddbff4fd

Branch: refs/heads/master
Commit: ddbff4fd8700b17229d0192353c109b3ef1c5858
Parents: 9434d52
Author: Yuqi Gu 
Authored: Thu Nov 16 08:17:15 2017 +
Committer: tedyu 
Committed: Thu Nov 30 00:42:21 2017 -0800

--
 .../main/java/org/apache/hadoop/hbase/util/UnsafeAvailChecker.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ddbff4fd/hbase-common/src/main/java/org/apache/hadoop/hbase/util/UnsafeAvailChecker.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/UnsafeAvailChecker.java
 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/UnsafeAvailChecker.java
index f6744d7..8fe7044 100644
--- 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/UnsafeAvailChecker.java
+++ 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/UnsafeAvailChecker.java
@@ -52,7 +52,7 @@ public class UnsafeAvailChecker {
 // When Unsafe itself is not available/accessible consider unaligned as 
false.
 if (avail) {
   String arch = System.getProperty("os.arch");
-  if ("ppc64".equals(arch) || "ppc64le".equals(arch)) {
+  if ("ppc64".equals(arch) || "ppc64le".equals(arch) || 
"aarch64".equals(arch)) {
 // java.nio.Bits.unaligned() wrongly returns false on ppc 
(JDK-8165231),
 unaligned = true;
   } else {



hbase git commit: HBASE-19386 Enable Arm64 unaligned support for HBase

2017-11-30 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/branch-2 e2e08866f -> d32ba0977


HBASE-19386 Enable Arm64 unaligned support for HBase

On Arm64, java.nio.Bits.unaligned() wrongly returns false due to a JDK bug.
This causes HBase UnsafeAvailChecker wrongly returning false on Arm64.
And it slso cause FuzzyRowFilter Unit test failed.
Fix it by providing a hard-code to enbale Arm64 unaligned support.

Jira: HBASE-19386

Change-Id: I3ab821dacbe42b18cd515080da1fa3dc1f1e1d28
Signed-off-by: Yuqi Gu 
Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d32ba097
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d32ba097
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d32ba097

Branch: refs/heads/branch-2
Commit: d32ba0977f6f499faf5373273513429798051a14
Parents: e2e0886
Author: Yuqi Gu 
Authored: Thu Nov 16 08:17:15 2017 +
Committer: tedyu 
Committed: Thu Nov 30 00:42:58 2017 -0800

--
 .../main/java/org/apache/hadoop/hbase/util/UnsafeAvailChecker.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d32ba097/hbase-common/src/main/java/org/apache/hadoop/hbase/util/UnsafeAvailChecker.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/UnsafeAvailChecker.java
 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/UnsafeAvailChecker.java
index f6744d7..8fe7044 100644
--- 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/UnsafeAvailChecker.java
+++ 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/UnsafeAvailChecker.java
@@ -52,7 +52,7 @@ public class UnsafeAvailChecker {
 // When Unsafe itself is not available/accessible consider unaligned as 
false.
 if (avail) {
   String arch = System.getProperty("os.arch");
-  if ("ppc64".equals(arch) || "ppc64le".equals(arch)) {
+  if ("ppc64".equals(arch) || "ppc64le".equals(arch) || 
"aarch64".equals(arch)) {
 // java.nio.Bits.unaligned() wrongly returns false on ppc 
(JDK-8165231),
 unaligned = true;
   } else {