hbase git commit: HBASE-19346 Use EventLoopGroup to create AsyncFSOutput

2017-11-29 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/branch-2 0e6f1a024 -> e2e08866f


HBASE-19346 Use EventLoopGroup to create AsyncFSOutput


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e2e08866
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e2e08866
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e2e08866

Branch: refs/heads/branch-2
Commit: e2e08866f6da2a6c992be226e187c26053b2f68b
Parents: 0e6f1a0
Author: zhangduo 
Authored: Tue Nov 28 17:56:13 2017 +0800
Committer: zhangduo 
Committed: Thu Nov 30 15:23:22 2017 +0800

--
 .../hadoop/hbase/io/asyncfs/AsyncFSOutput.java  |   2 +
 .../hbase/io/asyncfs/AsyncFSOutputHelper.java   |  59 ++-
 .../asyncfs/FanOutOneBlockAsyncDFSOutput.java   | 419 +--
 .../FanOutOneBlockAsyncDFSOutputHelper.java |  80 ++--
 .../hbase/io/asyncfs/SendBufSizePredictor.java  |  57 +++
 .../hadoop/hbase/wal/AsyncFSWALProvider.java|  23 +-
 .../TestFanOutOneBlockAsyncDFSOutput.java   |  68 +--
 .../hbase/io/asyncfs/TestLocalAsyncOutput.java  |   2 +-
 .../TestSaslFanOutOneBlockAsyncDFSOutput.java   |  64 ++-
 .../io/asyncfs/TestSendBufSizePredictor.java|  44 ++
 10 files changed, 456 insertions(+), 362 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e2e08866/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutput.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutput.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutput.java
index 68adca9..bfe66de 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutput.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutput.java
@@ -28,6 +28,8 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 
 /**
  * Interface for asynchronous filesystem output stream.
+ * 
+ * The implementation is not required to be thread safe.
  */
 @InterfaceAudience.Private
 public interface AsyncFSOutput extends Closeable {

http://git-wip-us.apache.org/repos/asf/hbase/blob/e2e08866/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutputHelper.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutputHelper.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutputHelper.java
index 1f5462f..6a7e4fa 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutputHelper.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutputHelper.java
@@ -17,12 +17,6 @@
  */
 package org.apache.hadoop.hbase.io.asyncfs;
 
-import org.apache.hadoop.hbase.shaded.com.google.common.base.Throwables;
-import 
org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
-
-import org.apache.hadoop.hbase.shaded.io.netty.channel.Channel;
-import org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoop;
-
 import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.nio.ByteBuffer;
@@ -35,12 +29,17 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.io.ByteArrayOutputStream;
 import org.apache.hadoop.hbase.util.CancelableProgressable;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hadoop.hbase.shaded.com.google.common.base.Throwables;
+import 
org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.apache.hadoop.hbase.shaded.io.netty.channel.Channel;
+import org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoopGroup;
 
 /**
  * Helper class for creating AsyncFSOutput.
@@ -56,12 +55,12 @@ public final class AsyncFSOutputHelper {
* implementation for other {@link FileSystem} which wraps around a {@link 
FSDataOutputStream}.
*/
   public static AsyncFSOutput createOutput(FileSystem fs, Path f, boolean 
overwrite,
-  boolean createParent, short replication, long blockSize, EventLoop 
eventLoop,
+  boolean createParent, short replication, long blockSize, EventLoopGroup 
eventLoopGroup,
   Class channelClass)
-  throws IOException, CommonFSUtils.StreamLacksCapabilityException {
+  throws IOException, 

hbase git commit: HBASE-19346 Use EventLoopGroup to create AsyncFSOutput

2017-11-29 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/master 91e75b2a2 -> 9434d52c1


HBASE-19346 Use EventLoopGroup to create AsyncFSOutput


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9434d52c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9434d52c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9434d52c

Branch: refs/heads/master
Commit: 9434d52c190386f15188e0473ce005e96bf78413
Parents: 91e75b2
Author: zhangduo 
Authored: Tue Nov 28 17:56:13 2017 +0800
Committer: zhangduo 
Committed: Thu Nov 30 15:22:23 2017 +0800

--
 .../hadoop/hbase/io/asyncfs/AsyncFSOutput.java  |   2 +
 .../hbase/io/asyncfs/AsyncFSOutputHelper.java   |  59 ++-
 .../asyncfs/FanOutOneBlockAsyncDFSOutput.java   | 419 +--
 .../FanOutOneBlockAsyncDFSOutputHelper.java |  80 ++--
 .../hbase/io/asyncfs/SendBufSizePredictor.java  |  57 +++
 .../hadoop/hbase/wal/AsyncFSWALProvider.java|  23 +-
 .../TestFanOutOneBlockAsyncDFSOutput.java   |  68 +--
 .../hbase/io/asyncfs/TestLocalAsyncOutput.java  |   2 +-
 .../TestSaslFanOutOneBlockAsyncDFSOutput.java   |  64 ++-
 .../io/asyncfs/TestSendBufSizePredictor.java|  44 ++
 10 files changed, 456 insertions(+), 362 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9434d52c/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutput.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutput.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutput.java
index 68adca9..bfe66de 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutput.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutput.java
@@ -28,6 +28,8 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 
 /**
  * Interface for asynchronous filesystem output stream.
+ * 
+ * The implementation is not required to be thread safe.
  */
 @InterfaceAudience.Private
 public interface AsyncFSOutput extends Closeable {

http://git-wip-us.apache.org/repos/asf/hbase/blob/9434d52c/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutputHelper.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutputHelper.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutputHelper.java
index 1f5462f..6a7e4fa 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutputHelper.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutputHelper.java
@@ -17,12 +17,6 @@
  */
 package org.apache.hadoop.hbase.io.asyncfs;
 
-import org.apache.hadoop.hbase.shaded.com.google.common.base.Throwables;
-import 
org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
-
-import org.apache.hadoop.hbase.shaded.io.netty.channel.Channel;
-import org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoop;
-
 import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.nio.ByteBuffer;
@@ -35,12 +29,17 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.io.ByteArrayOutputStream;
 import org.apache.hadoop.hbase.util.CancelableProgressable;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hadoop.hbase.shaded.com.google.common.base.Throwables;
+import 
org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.apache.hadoop.hbase.shaded.io.netty.channel.Channel;
+import org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoopGroup;
 
 /**
  * Helper class for creating AsyncFSOutput.
@@ -56,12 +55,12 @@ public final class AsyncFSOutputHelper {
* implementation for other {@link FileSystem} which wraps around a {@link 
FSDataOutputStream}.
*/
   public static AsyncFSOutput createOutput(FileSystem fs, Path f, boolean 
overwrite,
-  boolean createParent, short replication, long blockSize, EventLoop 
eventLoop,
+  boolean createParent, short replication, long blockSize, EventLoopGroup 
eventLoopGroup,
   Class channelClass)
-  throws IOException, CommonFSUtils.StreamLacksCapabilityException {
+  throws IOException, 

hbase git commit: HBASE-19385 [1.3] TestReplicator failed 1.3 nightly

2017-11-29 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master 81b95afbe -> 91e75b2a2


HBASE-19385 [1.3] TestReplicator failed 1.3 nightly


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/91e75b2a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/91e75b2a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/91e75b2a

Branch: refs/heads/master
Commit: 91e75b2a2f4b61599de7bd5a727192b480f48de9
Parents: 81b95af
Author: Michael Stack 
Authored: Wed Nov 29 23:06:31 2017 -0800
Committer: Michael Stack 
Committed: Wed Nov 29 23:14:22 2017 -0800

--
 .../replication/regionserver/TestReplicator.java | 19 +--
 1 file changed, 13 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/91e75b2a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java
index 6149721..4a46074 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java
@@ -22,6 +22,7 @@ import static org.junit.Assert.assertEquals;
 import java.io.IOException;
 import java.util.List;
 import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -99,6 +100,7 @@ public class TestReplicator extends TestReplicationBase {
   Waiter.waitFor(conf1, 6, new Waiter.ExplainingPredicate() 
{
 @Override
 public boolean evaluate() throws Exception {
+  LOG.info("Count=" + ReplicationEndpointForTest.getBatchCount());
   return ReplicationEndpointForTest.getBatchCount() >= NUM_ROWS;
 }
 
@@ -180,7 +182,7 @@ public class TestReplicator extends TestReplicationBase {
 
   public static class ReplicationEndpointForTest extends 
HBaseInterClusterReplicationEndpoint {
 
-private static int batchCount;
+private static AtomicInteger batchCount = new AtomicInteger(0);
 private static int entriesCount;
 private static final Object latch = new Object();
 private static AtomicBoolean useLatch = new AtomicBoolean(false);
@@ -199,17 +201,20 @@ public class TestReplicator extends TestReplicationBase {
 public static void await() throws InterruptedException {
   if (useLatch.get()) {
 LOG.info("Waiting on latch");
-latch.wait();
+synchronized(latch) {
+  latch.wait();
+}
 LOG.info("Waited on latch, now proceeding");
   }
 }
 
 public static int getBatchCount() {
-  return batchCount;
+  return batchCount.get();
 }
 
 public static void setBatchCount(int i) {
-  batchCount = i;
+  LOG.info("SetBatchCount=" + i + ", old=" + getBatchCount());
+  batchCount.set(i);
 }
 
 public static int getEntriesCount() {
@@ -217,6 +222,7 @@ public class TestReplicator extends TestReplicationBase {
 }
 
 public static void setEntriesCount(int i) {
+  LOG.info("SetEntriesCount=" + i);
   entriesCount = i;
 }
 
@@ -242,8 +248,9 @@ public class TestReplicator extends TestReplicationBase {
   super.replicateEntries(rrs, entries, replicationClusterId, 
baseNamespaceDir,
 hfileArchiveDir);
   entriesCount += entries.size();
-  batchCount++;
-  LOG.info("Completed replicating batch " + 
System.identityHashCode(entries));
+  int count = batchCount.incrementAndGet();
+  LOG.info("Completed replicating batch " + 
System.identityHashCode(entries) +
+  " count=" + count);
 } catch (IOException e) {
   LOG.info("Failed to replicate batch " + 
System.identityHashCode(entries), e);
   throw e;



hbase git commit: HBASE-19385 [1.3] TestReplicator failed 1.3 nightly

2017-11-29 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2 fd9c4322c -> 0e6f1a024


HBASE-19385 [1.3] TestReplicator failed 1.3 nightly


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0e6f1a02
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0e6f1a02
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0e6f1a02

Branch: refs/heads/branch-2
Commit: 0e6f1a0240460e18acd8e515f7f5caf1e233489a
Parents: fd9c432
Author: Michael Stack 
Authored: Wed Nov 29 23:06:31 2017 -0800
Committer: Michael Stack 
Committed: Wed Nov 29 23:13:25 2017 -0800

--
 .../replication/regionserver/TestReplicator.java | 19 +--
 1 file changed, 13 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0e6f1a02/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java
index 6149721..4a46074 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java
@@ -22,6 +22,7 @@ import static org.junit.Assert.assertEquals;
 import java.io.IOException;
 import java.util.List;
 import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -99,6 +100,7 @@ public class TestReplicator extends TestReplicationBase {
   Waiter.waitFor(conf1, 6, new Waiter.ExplainingPredicate() 
{
 @Override
 public boolean evaluate() throws Exception {
+  LOG.info("Count=" + ReplicationEndpointForTest.getBatchCount());
   return ReplicationEndpointForTest.getBatchCount() >= NUM_ROWS;
 }
 
@@ -180,7 +182,7 @@ public class TestReplicator extends TestReplicationBase {
 
   public static class ReplicationEndpointForTest extends 
HBaseInterClusterReplicationEndpoint {
 
-private static int batchCount;
+private static AtomicInteger batchCount = new AtomicInteger(0);
 private static int entriesCount;
 private static final Object latch = new Object();
 private static AtomicBoolean useLatch = new AtomicBoolean(false);
@@ -199,17 +201,20 @@ public class TestReplicator extends TestReplicationBase {
 public static void await() throws InterruptedException {
   if (useLatch.get()) {
 LOG.info("Waiting on latch");
-latch.wait();
+synchronized(latch) {
+  latch.wait();
+}
 LOG.info("Waited on latch, now proceeding");
   }
 }
 
 public static int getBatchCount() {
-  return batchCount;
+  return batchCount.get();
 }
 
 public static void setBatchCount(int i) {
-  batchCount = i;
+  LOG.info("SetBatchCount=" + i + ", old=" + getBatchCount());
+  batchCount.set(i);
 }
 
 public static int getEntriesCount() {
@@ -217,6 +222,7 @@ public class TestReplicator extends TestReplicationBase {
 }
 
 public static void setEntriesCount(int i) {
+  LOG.info("SetEntriesCount=" + i);
   entriesCount = i;
 }
 
@@ -242,8 +248,9 @@ public class TestReplicator extends TestReplicationBase {
   super.replicateEntries(rrs, entries, replicationClusterId, 
baseNamespaceDir,
 hfileArchiveDir);
   entriesCount += entries.size();
-  batchCount++;
-  LOG.info("Completed replicating batch " + 
System.identityHashCode(entries));
+  int count = batchCount.incrementAndGet();
+  LOG.info("Completed replicating batch " + 
System.identityHashCode(entries) +
+  " count=" + count);
 } catch (IOException e) {
   LOG.info("Failed to replicate batch " + 
System.identityHashCode(entries), e);
   throw e;



hbase git commit: HBASE-19385 [1.3] TestReplicator failed 1.3 nightly

2017-11-29 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 6891e8195 -> 04f1029c0


HBASE-19385 [1.3] TestReplicator failed 1.3 nightly


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/04f1029c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/04f1029c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/04f1029c

Branch: refs/heads/branch-1.3
Commit: 04f1029c03cca0c3303595fec5d654a304db2c03
Parents: 6891e81
Author: Michael Stack 
Authored: Wed Nov 29 23:06:31 2017 -0800
Committer: Michael Stack 
Committed: Wed Nov 29 23:10:46 2017 -0800

--
 .../replication/regionserver/TestReplicator.java | 19 +--
 1 file changed, 13 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/04f1029c/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java
index 6d15a1b..b328b70 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java
@@ -22,6 +22,7 @@ import static org.junit.Assert.assertEquals;
 import java.io.IOException;
 import java.util.List;
 import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -95,6 +96,7 @@ public class TestReplicator extends TestReplicationBase {
   Waiter.waitFor(conf1, 6, new Waiter.ExplainingPredicate() 
{
 @Override
 public boolean evaluate() throws Exception {
+  LOG.info("Count=" + ReplicationEndpointForTest.getBatchCount());
   return ReplicationEndpointForTest.getBatchCount() >= NUM_ROWS;
 }
 
@@ -176,7 +178,7 @@ public class TestReplicator extends TestReplicationBase {
 
   public static class ReplicationEndpointForTest extends 
HBaseInterClusterReplicationEndpoint {
 
-private static int batchCount;
+private static AtomicInteger batchCount = new AtomicInteger(0);
 private static int entriesCount;
 private static final Object latch = new Object();
 private static AtomicBoolean useLatch = new AtomicBoolean(false);
@@ -195,17 +197,20 @@ public class TestReplicator extends TestReplicationBase {
 public static void await() throws InterruptedException {
   if (useLatch.get()) {
 LOG.info("Waiting on latch");
-latch.wait();
+synchronized(latch) {
+  latch.wait();
+}
 LOG.info("Waited on latch, now proceeding");
   }
 }
 
 public static int getBatchCount() {
-  return batchCount;
+  return batchCount.get();
 }
 
 public static void setBatchCount(int i) {
-  batchCount = i;
+  LOG.info("SetBatchCount=" + i + ", old=" + getBatchCount());
+  batchCount.set(i);
 }
 
 public static int getEntriesCount() {
@@ -213,6 +218,7 @@ public class TestReplicator extends TestReplicationBase {
 }
 
 public static void setEntriesCount(int i) {
+  LOG.info("SetEntriesCount=" + i);
   entriesCount = i;
 }
 
@@ -238,8 +244,9 @@ public class TestReplicator extends TestReplicationBase {
   super.replicateEntries(rrs, entries, replicationClusterId, 
baseNamespaceDir,
 hfileArchiveDir);
   entriesCount += entries.size();
-  batchCount++;
-  LOG.info("Completed replicating batch " + 
System.identityHashCode(entries));
+  int count = batchCount.incrementAndGet();
+  LOG.info("Completed replicating batch " + 
System.identityHashCode(entries) +
+  " count=" + count);
 } catch (IOException e) {
   LOG.info("Failed to replicate batch " + 
System.identityHashCode(entries), e);
   throw e;



hbase git commit: HBASE-19385 [1.3] TestReplicator failed 1.3 nightly

2017-11-29 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-1 fb3fee341 -> 1e0067304


HBASE-19385 [1.3] TestReplicator failed 1.3 nightly


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1e006730
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1e006730
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1e006730

Branch: refs/heads/branch-1
Commit: 1e00673047339ad2723f86abc812025f6bfef2bd
Parents: fb3fee3
Author: Michael Stack 
Authored: Wed Nov 29 23:06:31 2017 -0800
Committer: Michael Stack 
Committed: Wed Nov 29 23:12:51 2017 -0800

--
 .../replication/regionserver/TestReplicator.java | 19 +--
 1 file changed, 13 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1e006730/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java
index dbe7031..4b5d331 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.java
@@ -22,6 +22,7 @@ import static org.junit.Assert.assertEquals;
 import java.io.IOException;
 import java.util.List;
 import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -97,6 +98,7 @@ public class TestReplicator extends TestReplicationBase {
   Waiter.waitFor(conf1, 6, new Waiter.ExplainingPredicate() 
{
 @Override
 public boolean evaluate() throws Exception {
+  LOG.info("Count=" + ReplicationEndpointForTest.getBatchCount());
   return ReplicationEndpointForTest.getBatchCount() >= NUM_ROWS;
 }
 
@@ -178,7 +180,7 @@ public class TestReplicator extends TestReplicationBase {
 
   public static class ReplicationEndpointForTest extends 
HBaseInterClusterReplicationEndpoint {
 
-private static int batchCount;
+private static AtomicInteger batchCount = new AtomicInteger(0);
 private static int entriesCount;
 private static final Object latch = new Object();
 private static AtomicBoolean useLatch = new AtomicBoolean(false);
@@ -197,17 +199,20 @@ public class TestReplicator extends TestReplicationBase {
 public static void await() throws InterruptedException {
   if (useLatch.get()) {
 LOG.info("Waiting on latch");
-latch.wait();
+synchronized(latch) {
+  latch.wait();
+}
 LOG.info("Waited on latch, now proceeding");
   }
 }
 
 public static int getBatchCount() {
-  return batchCount;
+  return batchCount.get();
 }
 
 public static void setBatchCount(int i) {
-  batchCount = i;
+  LOG.info("SetBatchCount=" + i + ", old=" + getBatchCount());
+  batchCount.set(i);
 }
 
 public static int getEntriesCount() {
@@ -215,6 +220,7 @@ public class TestReplicator extends TestReplicationBase {
 }
 
 public static void setEntriesCount(int i) {
+  LOG.info("SetEntriesCount=" + i);
   entriesCount = i;
 }
 
@@ -240,8 +246,9 @@ public class TestReplicator extends TestReplicationBase {
   super.replicateEntries(rrs, entries, replicationClusterId, 
baseNamespaceDir,
 hfileArchiveDir);
   entriesCount += entries.size();
-  batchCount++;
-  LOG.info("Completed replicating batch " + 
System.identityHashCode(entries));
+  int count = batchCount.incrementAndGet();
+  LOG.info("Completed replicating batch " + 
System.identityHashCode(entries) +
+  " count=" + count);
 } catch (IOException e) {
   LOG.info("Failed to replicate batch " + 
System.identityHashCode(entries), e);
   throw e;



hbase git commit: HBASE-19383 [1.2] java.lang.AssertionError: expected:<2> but was:<1> at org.apache.hadoop.hbase.TestChoreService.testTriggerNowFailsWhenNotScheduled(TestChoreService.java:707)

2017-11-29 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 9f102b293 -> 6891e8195


HBASE-19383 [1.2] java.lang.AssertionError: expected:<2> but was:<1> at
org.apache.hadoop.hbase.TestChoreService.testTriggerNowFailsWhenNotScheduled(TestChoreService.java:707)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6891e819
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6891e819
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6891e819

Branch: refs/heads/branch-1.3
Commit: 6891e81955c322cc680c897bd296f1bbe01f668c
Parents: 9f102b2
Author: Michael Stack 
Authored: Wed Nov 29 20:43:54 2017 -0800
Committer: Michael Stack 
Committed: Wed Nov 29 23:09:12 2017 -0800

--
 .../apache/hadoop/hbase/TestChoreService.java   | 29 
 1 file changed, 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6891e819/hbase-common/src/test/java/org/apache/hadoop/hbase/TestChoreService.java
--
diff --git 
a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestChoreService.java 
b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestChoreService.java
index 06ce6d0..2712b57 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestChoreService.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestChoreService.java
@@ -689,35 +689,6 @@ public class TestChoreService {
   }
 
   @Test (timeout=2)
-  public void testTriggerNowFailsWhenNotScheduled() throws 
InterruptedException {
-final int period = 100;
-// Small sleep time buffer to allow CountingChore to complete
-final int sleep = 5;
-ChoreService service = new 
ChoreService("testTriggerNowFailsWhenNotScheduled");
-CountingChore chore = new CountingChore("dn", period);
-
-try {
-  assertFalse(chore.triggerNow());
-  assertTrue(chore.getCountOfChoreCalls() == 0);
-
-  service.scheduleChore(chore);
-  Thread.sleep(sleep);
-  assertEquals(1, chore.getCountOfChoreCalls());
-  Thread.sleep(period);
-  assertEquals(2, chore.getCountOfChoreCalls());
-  assertTrue(chore.triggerNow());
-  Thread.sleep(sleep);
-  assertTrue(chore.triggerNow());
-  Thread.sleep(sleep);
-  assertTrue(chore.triggerNow());
-  Thread.sleep(sleep);
-  assertEquals(5, chore.getCountOfChoreCalls());
-} finally {
-  shutdownService(service);
-}
-  }
-
-  @Test (timeout=2)
   public void testStopperForScheduledChores() throws InterruptedException {
 ChoreService service = new ChoreService("testStopperForScheduledChores");
 Stoppable stopperForGroup1 = new SampleStopper();



hbase git commit: HBASE-19382 Update report-flakies.py script to handle yetus builds.

2017-11-29 Thread appy
Repository: hbase
Updated Branches:
  refs/heads/branch-2 4f4aac77e -> fd9c4322c


HBASE-19382 Update report-flakies.py script to handle yetus builds.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/fd9c4322
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/fd9c4322
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/fd9c4322

Branch: refs/heads/branch-2
Commit: fd9c4322c59fb1e0f6b186ea25c6e34e6b46b6f4
Parents: 4f4aac7
Author: Apekshit Sharma 
Authored: Wed Nov 29 17:53:16 2017 -0800
Committer: Apekshit Sharma 
Committed: Wed Nov 29 23:02:36 2017 -0800

--
 dev-support/findHangingTests.py |  5 +++--
 dev-support/report-flakies.py   | 34 ++
 2 files changed, 29 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/fd9c4322/dev-support/findHangingTests.py
--
diff --git a/dev-support/findHangingTests.py b/dev-support/findHangingTests.py
index e7bf906..328516e 100755
--- a/dev-support/findHangingTests.py
+++ b/dev-support/findHangingTests.py
@@ -75,8 +75,9 @@ def get_bad_tests(console_url):
 if "FAILURE!" in line:
 failed_tests_set.add(test_case)
 if test_case not in hanging_tests_set:
-print  ("ERROR! No test '{}' found in hanging_tests. Might get 
wrong results "
-"for this test.".format(test_case))
+print ("ERROR! No test '{}' found in hanging_tests. Might get 
wrong results "
+   "for this test. This may also happen if maven is set to 
retry failing "
+   "tests.".format(test_case))
 else:
 hanging_tests_set.remove(test_case)
 result3 = re.match("^\\s+(\\w*).*\\sTestTimedOut", line)

http://git-wip-us.apache.org/repos/asf/hbase/blob/fd9c4322/dev-support/report-flakies.py
--
diff --git a/dev-support/report-flakies.py b/dev-support/report-flakies.py
index a28c3fb..201980d 100755
--- a/dev-support/report-flakies.py
+++ b/dev-support/report-flakies.py
@@ -51,6 +51,9 @@ parser.add_argument('--max-builds', metavar='n', 
action='append', type=int,
 help='The maximum number of builds to use (if available on 
jenkins). Specify '
  '0 to analyze all builds. Not required, but if 
specified, number of uses '
  'should be same as that of --urls since the values 
are matched.')
+parser.add_argument('--is-yetus', metavar='True/False', action='append', 
choices=['True', 'False'],
+help='True, if build is yetus style i.e. look for maven 
output in artifacts; '
+ 'False, if maven output is in /consoleText 
itself.')
 parser.add_argument(
 "--mvn", action="store_true",
 help="Writes two strings for including/excluding these flaky tests using 
maven flags. These "
@@ -66,18 +69,29 @@ if args.verbose:
 logger.setLevel(logging.INFO)
 
 
-def get_bad_tests(build_url):
+def get_bad_tests(build_url, is_yetus):
 """
-Given url of an executed build, analyzes its console text, and returns
+Given url of an executed build, analyzes its maven output, and returns
 [list of all tests, list of timeout tests, list of failed tests].
-Returns None if can't get console text or if there is any other error.
+Returns None if can't get maven output from the build or if there is any 
other error.
 """
 logger.info("Analyzing %s", build_url)
 response = requests.get(build_url + "/api/json").json()
 if response["building"]:
 logger.info("Skipping this build since it is in progress.")
 return {}
-console_url = build_url + "/consoleText"
+console_url = None
+if is_yetus:
+for artifact in response["artifacts"]:
+if artifact["fileName"] == "patch-unit-root.txt":
+console_url = build_url + "/artifact/" + 
artifact["relativePath"]
+break
+if console_url is None:
+logger.info("Can't find 'patch-unit-root.txt' artifact for Yetus 
build %s\n. Ignoring "
+"this build.", build_url)
+return
+else:
+console_url = build_url + "/consoleText"
 build_result = findHangingTests.get_bad_tests(console_url)
 if not build_result:
 logger.info("Ignoring build %s", build_url)
@@ -93,6 +107,7 @@ def expand_multi_config_projects(cli_args):
 job_urls = cli_args.urls
 excluded_builds_arg = cli_args.excluded_builds
 max_builds_arg = cli_args.max_builds
+is_yetus_arg = cli_args.is_yetus
 if excluded_builds_arg is not None and 

hbase git commit: HBASE-19367 Refactoring in RegionStates, and RSProcedureDispatcher

2017-11-29 Thread appy
Repository: hbase
Updated Branches:
  refs/heads/branch-2 941970999 -> 4f4aac77e


HBASE-19367 Refactoring in RegionStates, and RSProcedureDispatcher

- Adding javadoc comments
- Bug: ServerStateNode#regions is HashSet but there's no synchronization to 
prevent concurrent addRegion/removeRegion. Let's use concurrent set instead.
- Use getRegionsInTransitionCount() directly to avoid instead of 
getRegionsInTransition().size() because the latter copies everything into a new 
array - what a waste for just the size.
- There's mixed use of getRegionNode and getRegionStateNode for same return 
type - RegionStateNode. Changing everything to getRegionStateNode. Similarly 
rename other *RegionNode() fns to *RegionStateNode().
- RegionStateNode#transitionState() return value is useless since it always 
returns it's first param.
- Other minor improvements


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4f4aac77
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4f4aac77
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4f4aac77

Branch: refs/heads/branch-2
Commit: 4f4aac77e1f2ff3b552bfa70ec64a1fc45110b6d
Parents: 9419709
Author: Apekshit Sharma 
Authored: Mon Nov 27 16:17:39 2017 -0800
Committer: Apekshit Sharma 
Committed: Wed Nov 29 22:42:39 2017 -0800

--
 .../apache/hadoop/hbase/master/RegionState.java |  9 +-
 .../hbase/client/TestRegionInfoDisplay.java |  5 +-
 .../procedure2/RemoteProcedureDispatcher.java   |  4 +-
 .../hbase/rsgroup/TestRSGroupsOfflineMode.java  |  2 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |  2 +-
 .../master/assignment/AssignProcedure.java  |  2 +-
 .../master/assignment/AssignmentManager.java| 64 ++---
 .../assignment/MergeTableRegionsProcedure.java  | 10 +-
 .../master/assignment/RegionStateStore.java | 21 ++---
 .../hbase/master/assignment/RegionStates.java   | 95 +--
 .../assignment/RegionTransitionProcedure.java   | 35 ---
 .../assignment/SplitTableRegionProcedure.java   |  4 +-
 .../master/procedure/RSProcedureDispatcher.java | 98 +---
 .../master/TestMasterBalanceThrottling.java |  7 +-
 .../hadoop/hbase/master/TestRegionState.java|  2 +-
 .../master/assignment/MockMasterServices.java   |  5 +-
 .../assignment/TestAssignmentManager.java   |  6 +-
 .../master/assignment/TestRegionStates.java |  8 +-
 .../hbase/regionserver/TestHRegionInfo.java |  2 +-
 .../hbase/zookeeper/MetaTableLocator.java   |  5 +-
 20 files changed, 195 insertions(+), 191 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4f4aac77/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java
index 7598067..55a68fc 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hbase.master;
 
+import com.google.common.annotations.VisibleForTesting;
 import java.util.Date;
 
 import org.apache.hadoop.hbase.ServerName;
@@ -169,12 +170,12 @@ public class RegionState {
   // The duration of region in transition
   private long ritDuration;
 
-  public RegionState(RegionInfo region, State state) {
-this(region, state, System.currentTimeMillis(), null);
+  @VisibleForTesting
+  public static RegionState createForTesting(RegionInfo region, State state) {
+return new RegionState(region, state, System.currentTimeMillis(), null);
   }
 
-  public RegionState(RegionInfo region,
-  State state, ServerName serverName) {
+  public RegionState(RegionInfo region, State state, ServerName serverName) {
 this(region, state, System.currentTimeMillis(), serverName);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/4f4aac77/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoDisplay.java
--
diff --git 
a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoDisplay.java
 
b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoDisplay.java
index 978e8c8..6d8c84c 100644
--- 
a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoDisplay.java
+++ 
b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoDisplay.java
@@ -21,9 +21,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CategoryBasedTimeout;
 import org.apache.hadoop.hbase.HRegionInfo;
 import 

hbase git commit: HBASE-19367 Refactoring in RegionStates, and RSProcedureDispatcher

2017-11-29 Thread appy
Repository: hbase
Updated Branches:
  refs/heads/master 5b7f9c253 -> 81b95afbe


HBASE-19367 Refactoring in RegionStates, and RSProcedureDispatcher

- Adding javadoc comments
- Bug: ServerStateNode#regions is HashSet but there's no synchronization to 
prevent concurrent addRegion/removeRegion. Let's use concurrent set instead.
- Use getRegionsInTransitionCount() directly to avoid instead of 
getRegionsInTransition().size() because the latter copies everything into a new 
array - what a waste for just the size.
- There's mixed use of getRegionNode and getRegionStateNode for same return 
type - RegionStateNode. Changing everything to getRegionStateNode. Similarly 
rename other *RegionNode() fns to *RegionStateNode().
- RegionStateNode#transitionState() return value is useless since it always 
returns it's first param.
- Other minor improvements


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/81b95afb
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/81b95afb
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/81b95afb

Branch: refs/heads/master
Commit: 81b95afbee2da5efed4c323ca73144f9bf71df0e
Parents: 5b7f9c2
Author: Apekshit Sharma 
Authored: Mon Nov 27 16:17:39 2017 -0800
Committer: Apekshit Sharma 
Committed: Wed Nov 29 22:40:11 2017 -0800

--
 .../apache/hadoop/hbase/master/RegionState.java |  9 +-
 .../hbase/client/TestRegionInfoDisplay.java |  5 +-
 .../procedure2/RemoteProcedureDispatcher.java   |  4 +-
 .../hbase/rsgroup/TestRSGroupsOfflineMode.java  |  2 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |  2 +-
 .../master/assignment/AssignProcedure.java  |  2 +-
 .../master/assignment/AssignmentManager.java| 64 ++---
 .../assignment/MergeTableRegionsProcedure.java  | 10 +-
 .../master/assignment/RegionStateStore.java | 21 ++---
 .../hbase/master/assignment/RegionStates.java   | 95 +--
 .../assignment/RegionTransitionProcedure.java   | 35 ---
 .../assignment/SplitTableRegionProcedure.java   |  4 +-
 .../master/procedure/RSProcedureDispatcher.java | 98 +---
 .../master/TestMasterBalanceThrottling.java |  7 +-
 .../hadoop/hbase/master/TestRegionState.java|  2 +-
 .../master/assignment/MockMasterServices.java   |  5 +-
 .../assignment/TestAssignmentManager.java   |  6 +-
 .../master/assignment/TestRegionStates.java |  8 +-
 .../hbase/regionserver/TestHRegionInfo.java |  2 +-
 .../hbase/zookeeper/MetaTableLocator.java   |  5 +-
 20 files changed, 195 insertions(+), 191 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/81b95afb/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java
index 7598067..55a68fc 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hbase.master;
 
+import com.google.common.annotations.VisibleForTesting;
 import java.util.Date;
 
 import org.apache.hadoop.hbase.ServerName;
@@ -169,12 +170,12 @@ public class RegionState {
   // The duration of region in transition
   private long ritDuration;
 
-  public RegionState(RegionInfo region, State state) {
-this(region, state, System.currentTimeMillis(), null);
+  @VisibleForTesting
+  public static RegionState createForTesting(RegionInfo region, State state) {
+return new RegionState(region, state, System.currentTimeMillis(), null);
   }
 
-  public RegionState(RegionInfo region,
-  State state, ServerName serverName) {
+  public RegionState(RegionInfo region, State state, ServerName serverName) {
 this(region, state, System.currentTimeMillis(), serverName);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/81b95afb/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoDisplay.java
--
diff --git 
a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoDisplay.java
 
b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoDisplay.java
index 978e8c8..6d8c84c 100644
--- 
a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoDisplay.java
+++ 
b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoDisplay.java
@@ -21,9 +21,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CategoryBasedTimeout;
 import org.apache.hadoop.hbase.HRegionInfo;
 import 

hbase git commit: HBASE-19382 Update report-flakies.py script to handle yetus builds.

2017-11-29 Thread appy
Repository: hbase
Updated Branches:
  refs/heads/master 79a89beb2 -> 5b7f9c253


HBASE-19382 Update report-flakies.py script to handle yetus builds.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5b7f9c25
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5b7f9c25
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5b7f9c25

Branch: refs/heads/master
Commit: 5b7f9c253583dec22d2121824753a2efbae7bf01
Parents: 79a89be
Author: Apekshit Sharma 
Authored: Wed Nov 29 17:53:16 2017 -0800
Committer: Apekshit Sharma 
Committed: Wed Nov 29 22:21:10 2017 -0800

--
 dev-support/findHangingTests.py |  5 +++--
 dev-support/report-flakies.py   | 34 ++
 2 files changed, 29 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5b7f9c25/dev-support/findHangingTests.py
--
diff --git a/dev-support/findHangingTests.py b/dev-support/findHangingTests.py
index e7bf906..328516e 100755
--- a/dev-support/findHangingTests.py
+++ b/dev-support/findHangingTests.py
@@ -75,8 +75,9 @@ def get_bad_tests(console_url):
 if "FAILURE!" in line:
 failed_tests_set.add(test_case)
 if test_case not in hanging_tests_set:
-print  ("ERROR! No test '{}' found in hanging_tests. Might get 
wrong results "
-"for this test.".format(test_case))
+print ("ERROR! No test '{}' found in hanging_tests. Might get 
wrong results "
+   "for this test. This may also happen if maven is set to 
retry failing "
+   "tests.".format(test_case))
 else:
 hanging_tests_set.remove(test_case)
 result3 = re.match("^\\s+(\\w*).*\\sTestTimedOut", line)

http://git-wip-us.apache.org/repos/asf/hbase/blob/5b7f9c25/dev-support/report-flakies.py
--
diff --git a/dev-support/report-flakies.py b/dev-support/report-flakies.py
index a28c3fb..201980d 100755
--- a/dev-support/report-flakies.py
+++ b/dev-support/report-flakies.py
@@ -51,6 +51,9 @@ parser.add_argument('--max-builds', metavar='n', 
action='append', type=int,
 help='The maximum number of builds to use (if available on 
jenkins). Specify '
  '0 to analyze all builds. Not required, but if 
specified, number of uses '
  'should be same as that of --urls since the values 
are matched.')
+parser.add_argument('--is-yetus', metavar='True/False', action='append', 
choices=['True', 'False'],
+help='True, if build is yetus style i.e. look for maven 
output in artifacts; '
+ 'False, if maven output is in /consoleText 
itself.')
 parser.add_argument(
 "--mvn", action="store_true",
 help="Writes two strings for including/excluding these flaky tests using 
maven flags. These "
@@ -66,18 +69,29 @@ if args.verbose:
 logger.setLevel(logging.INFO)
 
 
-def get_bad_tests(build_url):
+def get_bad_tests(build_url, is_yetus):
 """
-Given url of an executed build, analyzes its console text, and returns
+Given url of an executed build, analyzes its maven output, and returns
 [list of all tests, list of timeout tests, list of failed tests].
-Returns None if can't get console text or if there is any other error.
+Returns None if can't get maven output from the build or if there is any 
other error.
 """
 logger.info("Analyzing %s", build_url)
 response = requests.get(build_url + "/api/json").json()
 if response["building"]:
 logger.info("Skipping this build since it is in progress.")
 return {}
-console_url = build_url + "/consoleText"
+console_url = None
+if is_yetus:
+for artifact in response["artifacts"]:
+if artifact["fileName"] == "patch-unit-root.txt":
+console_url = build_url + "/artifact/" + 
artifact["relativePath"]
+break
+if console_url is None:
+logger.info("Can't find 'patch-unit-root.txt' artifact for Yetus 
build %s\n. Ignoring "
+"this build.", build_url)
+return
+else:
+console_url = build_url + "/consoleText"
 build_result = findHangingTests.get_bad_tests(console_url)
 if not build_result:
 logger.info("Ignoring build %s", build_url)
@@ -93,6 +107,7 @@ def expand_multi_config_projects(cli_args):
 job_urls = cli_args.urls
 excluded_builds_arg = cli_args.excluded_builds
 max_builds_arg = cli_args.max_builds
+is_yetus_arg = cli_args.is_yetus
 if excluded_builds_arg is not None and 

hbase git commit: HBASE-19383 [1.2] java.lang.AssertionError: expected:<2> but was:<1> at org.apache.hadoop.hbase.TestChoreService.testTriggerNowFailsWhenNotScheduled(TestChoreService.java:707)

2017-11-29 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master abb535eef -> 79a89beb2


HBASE-19383 [1.2] java.lang.AssertionError: expected:<2> but was:<1> at
org.apache.hadoop.hbase.TestChoreService.testTriggerNowFailsWhenNotScheduled(TestChoreService.java:707)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/79a89beb
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/79a89beb
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/79a89beb

Branch: refs/heads/master
Commit: 79a89beb2edfc7d56a60ee49a5752f63f23ad8bd
Parents: abb535e
Author: Michael Stack 
Authored: Wed Nov 29 20:43:54 2017 -0800
Committer: Michael Stack 
Committed: Wed Nov 29 20:46:18 2017 -0800

--
 .../apache/hadoop/hbase/TestChoreService.java   | 29 
 1 file changed, 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/79a89beb/hbase-common/src/test/java/org/apache/hadoop/hbase/TestChoreService.java
--
diff --git 
a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestChoreService.java 
b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestChoreService.java
index e5546f6..826c8db 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestChoreService.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestChoreService.java
@@ -689,35 +689,6 @@ public class TestChoreService {
   }
 
   @Test (timeout=2)
-  public void testTriggerNowFailsWhenNotScheduled() throws 
InterruptedException {
-final int period = 100;
-// Small sleep time buffer to allow CountingChore to complete
-final int sleep = 5;
-ChoreService service = new 
ChoreService("testTriggerNowFailsWhenNotScheduled");
-CountingChore chore = new CountingChore("dn", period);
-
-try {
-  assertFalse(chore.triggerNow());
-  assertTrue(chore.getCountOfChoreCalls() == 0);
-
-  service.scheduleChore(chore);
-  Thread.sleep(sleep);
-  assertEquals(1, chore.getCountOfChoreCalls());
-  Thread.sleep(period);
-  assertEquals(2, chore.getCountOfChoreCalls());
-  assertTrue(chore.triggerNow());
-  Thread.sleep(sleep);
-  assertTrue(chore.triggerNow());
-  Thread.sleep(sleep);
-  assertTrue(chore.triggerNow());
-  Thread.sleep(sleep);
-  assertEquals(5, chore.getCountOfChoreCalls());
-} finally {
-  shutdownService(service);
-}
-  }
-
-  @Test (timeout=2)
   public void testStopperForScheduledChores() throws InterruptedException {
 ChoreService service = new ChoreService("testStopperForScheduledChores");
 Stoppable stopperForGroup1 = new SampleStopper();



hbase git commit: HBASE-19383 [1.2] java.lang.AssertionError: expected:<2> but was:<1> at org.apache.hadoop.hbase.TestChoreService.testTriggerNowFailsWhenNotScheduled(TestChoreService.java:707)

2017-11-29 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-1 cf34adaf5 -> fb3fee341


HBASE-19383 [1.2] java.lang.AssertionError: expected:<2> but was:<1> at
org.apache.hadoop.hbase.TestChoreService.testTriggerNowFailsWhenNotScheduled(TestChoreService.java:707)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/fb3fee34
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/fb3fee34
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/fb3fee34

Branch: refs/heads/branch-1
Commit: fb3fee341455ce24fe276aafe3dea58f2a834037
Parents: cf34ada
Author: Michael Stack 
Authored: Wed Nov 29 20:43:54 2017 -0800
Committer: Michael Stack 
Committed: Wed Nov 29 20:45:31 2017 -0800

--
 .../apache/hadoop/hbase/TestChoreService.java   | 29 
 1 file changed, 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/fb3fee34/hbase-common/src/test/java/org/apache/hadoop/hbase/TestChoreService.java
--
diff --git 
a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestChoreService.java 
b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestChoreService.java
index abca0d7..aea689f 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestChoreService.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestChoreService.java
@@ -689,35 +689,6 @@ public class TestChoreService {
   }
 
   @Test (timeout=2)
-  public void testTriggerNowFailsWhenNotScheduled() throws 
InterruptedException {
-final int period = 100;
-// Small sleep time buffer to allow CountingChore to complete
-final int sleep = 5;
-ChoreService service = new 
ChoreService("testTriggerNowFailsWhenNotScheduled");
-CountingChore chore = new CountingChore("dn", period);
-
-try {
-  assertFalse(chore.triggerNow());
-  assertTrue(chore.getCountOfChoreCalls() == 0);
-
-  service.scheduleChore(chore);
-  Thread.sleep(sleep);
-  assertEquals(1, chore.getCountOfChoreCalls());
-  Thread.sleep(period);
-  assertEquals(2, chore.getCountOfChoreCalls());
-  assertTrue(chore.triggerNow());
-  Thread.sleep(sleep);
-  assertTrue(chore.triggerNow());
-  Thread.sleep(sleep);
-  assertTrue(chore.triggerNow());
-  Thread.sleep(sleep);
-  assertEquals(5, chore.getCountOfChoreCalls());
-} finally {
-  shutdownService(service);
-}
-  }
-
-  @Test (timeout=2)
   public void testStopperForScheduledChores() throws InterruptedException {
 ChoreService service = new ChoreService("testStopperForScheduledChores");
 Stoppable stopperForGroup1 = new SampleStopper();



hbase git commit: HBASE-19383 [1.2] java.lang.AssertionError: expected:<2> but was:<1> at org.apache.hadoop.hbase.TestChoreService.testTriggerNowFailsWhenNotScheduled(TestChoreService.java:707)

2017-11-29 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2 9e7ab9ffe -> 941970999


HBASE-19383 [1.2] java.lang.AssertionError: expected:<2> but was:<1> at
org.apache.hadoop.hbase.TestChoreService.testTriggerNowFailsWhenNotScheduled(TestChoreService.java:707)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/94197099
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/94197099
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/94197099

Branch: refs/heads/branch-2
Commit: 94197099952cd09cd36103d79a9dd4e34c191556
Parents: 9e7ab9f
Author: Michael Stack 
Authored: Wed Nov 29 20:43:54 2017 -0800
Committer: Michael Stack 
Committed: Wed Nov 29 20:45:58 2017 -0800

--
 .../apache/hadoop/hbase/TestChoreService.java   | 29 
 1 file changed, 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/94197099/hbase-common/src/test/java/org/apache/hadoop/hbase/TestChoreService.java
--
diff --git 
a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestChoreService.java 
b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestChoreService.java
index e5546f6..826c8db 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestChoreService.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestChoreService.java
@@ -689,35 +689,6 @@ public class TestChoreService {
   }
 
   @Test (timeout=2)
-  public void testTriggerNowFailsWhenNotScheduled() throws 
InterruptedException {
-final int period = 100;
-// Small sleep time buffer to allow CountingChore to complete
-final int sleep = 5;
-ChoreService service = new 
ChoreService("testTriggerNowFailsWhenNotScheduled");
-CountingChore chore = new CountingChore("dn", period);
-
-try {
-  assertFalse(chore.triggerNow());
-  assertTrue(chore.getCountOfChoreCalls() == 0);
-
-  service.scheduleChore(chore);
-  Thread.sleep(sleep);
-  assertEquals(1, chore.getCountOfChoreCalls());
-  Thread.sleep(period);
-  assertEquals(2, chore.getCountOfChoreCalls());
-  assertTrue(chore.triggerNow());
-  Thread.sleep(sleep);
-  assertTrue(chore.triggerNow());
-  Thread.sleep(sleep);
-  assertTrue(chore.triggerNow());
-  Thread.sleep(sleep);
-  assertEquals(5, chore.getCountOfChoreCalls());
-} finally {
-  shutdownService(service);
-}
-  }
-
-  @Test (timeout=2)
   public void testStopperForScheduledChores() throws InterruptedException {
 ChoreService service = new ChoreService("testStopperForScheduledChores");
 Stoppable stopperForGroup1 = new SampleStopper();



hbase git commit: HBASE-19383 [1.2] java.lang.AssertionError: expected:<2> but was:<1> at org.apache.hadoop.hbase.TestChoreService.testTriggerNowFailsWhenNotScheduled(TestChoreService.java:707)

2017-11-29 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 45e856bc8 -> 93b380dd9


HBASE-19383 [1.2] java.lang.AssertionError: expected:<2> but was:<1> at
org.apache.hadoop.hbase.TestChoreService.testTriggerNowFailsWhenNotScheduled(TestChoreService.java:707)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/93b380dd
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/93b380dd
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/93b380dd

Branch: refs/heads/branch-1.2
Commit: 93b380dd9638b46e7abd37082a1e5f64f78586fc
Parents: 45e856b
Author: Michael Stack 
Authored: Wed Nov 29 20:43:54 2017 -0800
Committer: Michael Stack 
Committed: Wed Nov 29 20:43:54 2017 -0800

--
 .../apache/hadoop/hbase/TestChoreService.java   | 29 
 1 file changed, 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/93b380dd/hbase-common/src/test/java/org/apache/hadoop/hbase/TestChoreService.java
--
diff --git 
a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestChoreService.java 
b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestChoreService.java
index cf68601..5367b4a 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestChoreService.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestChoreService.java
@@ -689,35 +689,6 @@ public class TestChoreService {
   }
 
   @Test (timeout=2)
-  public void testTriggerNowFailsWhenNotScheduled() throws 
InterruptedException {
-final int period = 100;
-// Small sleep time buffer to allow CountingChore to complete
-final int sleep = 5;
-ChoreService service = new 
ChoreService("testTriggerNowFailsWhenNotScheduled");
-CountingChore chore = new CountingChore("dn", period);
-
-try {
-  assertFalse(chore.triggerNow());
-  assertTrue(chore.getCountOfChoreCalls() == 0);
-
-  service.scheduleChore(chore);
-  Thread.sleep(sleep);
-  assertEquals(1, chore.getCountOfChoreCalls());
-  Thread.sleep(period);
-  assertEquals(2, chore.getCountOfChoreCalls());
-  assertTrue(chore.triggerNow());
-  Thread.sleep(sleep);
-  assertTrue(chore.triggerNow());
-  Thread.sleep(sleep);
-  assertTrue(chore.triggerNow());
-  Thread.sleep(sleep);
-  assertEquals(5, chore.getCountOfChoreCalls());
-} finally {
-  shutdownService(service);
-}
-  }
-
-  @Test (timeout=2)
   public void testStopperForScheduledChores() throws InterruptedException {
 ChoreService service = 
ChoreService.getInstance("testStopperForScheduledChores");
 Stoppable stopperForGroup1 = new SampleStopper();



[2/2] hbase git commit: HBASE-19379 TestEndToEndSplitTransaction fails with NPE

2017-11-29 Thread apurtell
HBASE-19379 TestEndToEndSplitTransaction fails with NPE


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8492952a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8492952a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8492952a

Branch: refs/heads/branch-1.4
Commit: 8492952a9dcfe9cf2e6c697e512cb2107b8934fb
Parents: ef12ee4
Author: Andrew Purtell 
Authored: Wed Nov 29 19:11:05 2017 -0800
Committer: Andrew Purtell 
Committed: Wed Nov 29 19:14:40 2017 -0800

--
 .../java/org/apache/hadoop/hbase/HRegionLocation.java   | 12 +++-
 1 file changed, 11 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8492952a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
index 373e76b..2b354f8 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
@@ -83,6 +83,9 @@ public class HRegionLocation implements 
Comparable {
*/
   @Override
   public int hashCode() {
+if (serverName == null) {
+  return System.identityHashCode(this);
+}
 return this.serverName.hashCode();
   }
 
@@ -116,6 +119,13 @@ public class HRegionLocation implements 
Comparable {
 
   @Override
   public int compareTo(HRegionLocation o) {
-return serverName.compareTo(o.getServerName());
+if (serverName == null) {
+  if (o.serverName != null) {
+return 1;
+  }
+  return 0;
+} else {
+  return serverName.compareTo(o.getServerName());
+}
   }
 }



[1/2] hbase git commit: HBASE-19379 TestEndToEndSplitTransaction fails with NPE

2017-11-29 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-1 0b704d481 -> cf34adaf5
  refs/heads/branch-1.4 ef12ee480 -> 8492952a9


HBASE-19379 TestEndToEndSplitTransaction fails with NPE


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/cf34adaf
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/cf34adaf
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/cf34adaf

Branch: refs/heads/branch-1
Commit: cf34adaf5ef3ad6b89d57e1a6adb874fbe1cfc68
Parents: 0b704d4
Author: Andrew Purtell 
Authored: Wed Nov 29 19:11:05 2017 -0800
Committer: Andrew Purtell 
Committed: Wed Nov 29 19:11:05 2017 -0800

--
 .../java/org/apache/hadoop/hbase/HRegionLocation.java   | 12 +++-
 1 file changed, 11 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/cf34adaf/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
index 373e76b..2b354f8 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
@@ -83,6 +83,9 @@ public class HRegionLocation implements 
Comparable {
*/
   @Override
   public int hashCode() {
+if (serverName == null) {
+  return System.identityHashCode(this);
+}
 return this.serverName.hashCode();
   }
 
@@ -116,6 +119,13 @@ public class HRegionLocation implements 
Comparable {
 
   @Override
   public int compareTo(HRegionLocation o) {
-return serverName.compareTo(o.getServerName());
+if (serverName == null) {
+  if (o.serverName != null) {
+return 1;
+  }
+  return 0;
+} else {
+  return serverName.compareTo(o.getServerName());
+}
   }
 }



hbase git commit: Revert "HBASE-19379 TestEndToEndSplitTransaction fails with NPE"

2017-11-29 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-1 f3614f20c -> 0b704d481


Revert "HBASE-19379 TestEndToEndSplitTransaction fails with NPE"

This reverts commit f3614f20c00a455dd59d6ca46abaa00123b946f9.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0b704d48
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0b704d48
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0b704d48

Branch: refs/heads/branch-1
Commit: 0b704d4815892963e7355aa7a587825943b107a0
Parents: f3614f2
Author: Andrew Purtell 
Authored: Wed Nov 29 19:06:29 2017 -0800
Committer: Andrew Purtell 
Committed: Wed Nov 29 19:06:29 2017 -0800

--
 .../apache/hadoop/hbase/HRegionLocation.java| 73 +---
 1 file changed, 18 insertions(+), 55 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0b704d48/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
index ff78ddc..373e76b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
@@ -61,71 +61,29 @@ public class HRegionLocation implements 
Comparable {
 + ", hostname=" + this.serverName + ", seqNum=" + seqNum;
   }
 
+  /**
+   * @see java.lang.Object#equals(java.lang.Object)
+   */
   @Override
-  public int hashCode() {
-final int prime = 31;
-int result = 1;
-result = prime * result + ((regionInfo == null) ? 0 : 
regionInfo.hashCode());
-result = prime * result + (int) (seqNum ^ (seqNum >>> 32));
-result = prime * result + ((serverName == null) ? 0 : 
serverName.hashCode());
-return result;
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-if (this == obj) {
+  public boolean equals(Object o) {
+if (this == o) {
   return true;
 }
-if (obj == null) {
-  return false;
-}
-if (getClass() != obj.getClass()) {
-  return false;
-}
-HRegionLocation other = (HRegionLocation) obj;
-if (regionInfo == null) {
-  if (other.regionInfo != null) {
-return false;
-  }
-} else if (!regionInfo.equals(other.regionInfo)) {
+if (o == null) {
   return false;
 }
-if (seqNum != other.seqNum) {
+if (!(o instanceof HRegionLocation)) {
   return false;
 }
-if (serverName == null) {
-  if (other.serverName != null) {
-return false;
-  }
-} else if (!serverName.equals(other.serverName)) {
-  return false;
-}
-return true;
+return this.compareTo((HRegionLocation)o) == 0;
   }
 
+  /**
+   * @see java.lang.Object#hashCode()
+   */
   @Override
-  public int compareTo(HRegionLocation other) {
-if (regionInfo == null) {
-  if (other.regionInfo != null) {
-return 1;
-  }
-} else {
-  int compare = regionInfo.compareTo(other.regionInfo);
-  if (compare != 0) {
-return compare;
-  }
-}
-if (serverName == null) {
-  if (other.serverName != null) {
-return 1;
-  }
-} else {
-  int compare = serverName.compareTo(other.serverName);
-  if (compare != 0) {
-return compare;
-  }
-}
-return Long.compare(seqNum, other.seqNum);
+  public int hashCode() {
+return this.serverName.hashCode();
   }
 
   /** @return HRegionInfo */
@@ -155,4 +113,9 @@ public class HRegionLocation implements 
Comparable {
   public ServerName getServerName() {
 return serverName;
   }
+
+  @Override
+  public int compareTo(HRegionLocation o) {
+return serverName.compareTo(o.getServerName());
+  }
 }



hbase git commit: Revert "HBASE-19379 TestEndToEndSplitTransaction fails with NPE"

2017-11-29 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 ea8123e81 -> ef12ee480


Revert "HBASE-19379 TestEndToEndSplitTransaction fails with NPE"

This reverts commit 39da0d44e0c286d8a4129daf9ed079722b8a8c0c.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ef12ee48
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ef12ee48
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ef12ee48

Branch: refs/heads/branch-1.4
Commit: ef12ee48045ad39b5cb99b9460f02d5c2a98fa57
Parents: ea8123e
Author: Andrew Purtell 
Authored: Wed Nov 29 19:06:04 2017 -0800
Committer: Andrew Purtell 
Committed: Wed Nov 29 19:06:04 2017 -0800

--
 .../apache/hadoop/hbase/HRegionLocation.java| 73 +---
 1 file changed, 18 insertions(+), 55 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ef12ee48/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
index ff78ddc..373e76b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
@@ -61,71 +61,29 @@ public class HRegionLocation implements 
Comparable {
 + ", hostname=" + this.serverName + ", seqNum=" + seqNum;
   }
 
+  /**
+   * @see java.lang.Object#equals(java.lang.Object)
+   */
   @Override
-  public int hashCode() {
-final int prime = 31;
-int result = 1;
-result = prime * result + ((regionInfo == null) ? 0 : 
regionInfo.hashCode());
-result = prime * result + (int) (seqNum ^ (seqNum >>> 32));
-result = prime * result + ((serverName == null) ? 0 : 
serverName.hashCode());
-return result;
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-if (this == obj) {
+  public boolean equals(Object o) {
+if (this == o) {
   return true;
 }
-if (obj == null) {
-  return false;
-}
-if (getClass() != obj.getClass()) {
-  return false;
-}
-HRegionLocation other = (HRegionLocation) obj;
-if (regionInfo == null) {
-  if (other.regionInfo != null) {
-return false;
-  }
-} else if (!regionInfo.equals(other.regionInfo)) {
+if (o == null) {
   return false;
 }
-if (seqNum != other.seqNum) {
+if (!(o instanceof HRegionLocation)) {
   return false;
 }
-if (serverName == null) {
-  if (other.serverName != null) {
-return false;
-  }
-} else if (!serverName.equals(other.serverName)) {
-  return false;
-}
-return true;
+return this.compareTo((HRegionLocation)o) == 0;
   }
 
+  /**
+   * @see java.lang.Object#hashCode()
+   */
   @Override
-  public int compareTo(HRegionLocation other) {
-if (regionInfo == null) {
-  if (other.regionInfo != null) {
-return 1;
-  }
-} else {
-  int compare = regionInfo.compareTo(other.regionInfo);
-  if (compare != 0) {
-return compare;
-  }
-}
-if (serverName == null) {
-  if (other.serverName != null) {
-return 1;
-  }
-} else {
-  int compare = serverName.compareTo(other.serverName);
-  if (compare != 0) {
-return compare;
-  }
-}
-return Long.compare(seqNum, other.seqNum);
+  public int hashCode() {
+return this.serverName.hashCode();
   }
 
   /** @return HRegionInfo */
@@ -155,4 +113,9 @@ public class HRegionLocation implements 
Comparable {
   public ServerName getServerName() {
 return serverName;
   }
+
+  @Override
+  public int compareTo(HRegionLocation o) {
+return serverName.compareTo(o.getServerName());
+  }
 }



[2/3] hbase git commit: HBASE-19379 TestEndToEndSplitTransaction fails with NPE

2017-11-29 Thread apurtell
HBASE-19379 TestEndToEndSplitTransaction fails with NPE


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/39da0d44
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/39da0d44
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/39da0d44

Branch: refs/heads/branch-1.4
Commit: 39da0d44e0c286d8a4129daf9ed079722b8a8c0c
Parents: fb070f1
Author: Andrew Purtell 
Authored: Wed Nov 29 16:37:07 2017 -0800
Committer: Andrew Purtell 
Committed: Wed Nov 29 17:24:54 2017 -0800

--
 .../apache/hadoop/hbase/HRegionLocation.java| 73 +++-
 1 file changed, 55 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/39da0d44/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
index 373e76b..ff78ddc 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
@@ -61,29 +61,71 @@ public class HRegionLocation implements 
Comparable {
 + ", hostname=" + this.serverName + ", seqNum=" + seqNum;
   }
 
-  /**
-   * @see java.lang.Object#equals(java.lang.Object)
-   */
   @Override
-  public boolean equals(Object o) {
-if (this == o) {
+  public int hashCode() {
+final int prime = 31;
+int result = 1;
+result = prime * result + ((regionInfo == null) ? 0 : 
regionInfo.hashCode());
+result = prime * result + (int) (seqNum ^ (seqNum >>> 32));
+result = prime * result + ((serverName == null) ? 0 : 
serverName.hashCode());
+return result;
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+if (this == obj) {
   return true;
 }
-if (o == null) {
+if (obj == null) {
+  return false;
+}
+if (getClass() != obj.getClass()) {
+  return false;
+}
+HRegionLocation other = (HRegionLocation) obj;
+if (regionInfo == null) {
+  if (other.regionInfo != null) {
+return false;
+  }
+} else if (!regionInfo.equals(other.regionInfo)) {
   return false;
 }
-if (!(o instanceof HRegionLocation)) {
+if (seqNum != other.seqNum) {
   return false;
 }
-return this.compareTo((HRegionLocation)o) == 0;
+if (serverName == null) {
+  if (other.serverName != null) {
+return false;
+  }
+} else if (!serverName.equals(other.serverName)) {
+  return false;
+}
+return true;
   }
 
-  /**
-   * @see java.lang.Object#hashCode()
-   */
   @Override
-  public int hashCode() {
-return this.serverName.hashCode();
+  public int compareTo(HRegionLocation other) {
+if (regionInfo == null) {
+  if (other.regionInfo != null) {
+return 1;
+  }
+} else {
+  int compare = regionInfo.compareTo(other.regionInfo);
+  if (compare != 0) {
+return compare;
+  }
+}
+if (serverName == null) {
+  if (other.serverName != null) {
+return 1;
+  }
+} else {
+  int compare = serverName.compareTo(other.serverName);
+  if (compare != 0) {
+return compare;
+  }
+}
+return Long.compare(seqNum, other.seqNum);
   }
 
   /** @return HRegionInfo */
@@ -113,9 +155,4 @@ public class HRegionLocation implements 
Comparable {
   public ServerName getServerName() {
 return serverName;
   }
-
-  @Override
-  public int compareTo(HRegionLocation o) {
-return serverName.compareTo(o.getServerName());
-  }
 }



[3/3] hbase git commit: HBASE-19381 TestGlobalThrottler doesn't make progress

2017-11-29 Thread apurtell
HBASE-19381 TestGlobalThrottler doesn't make progress

Revert "HBASE-17314 Limit total buffered size for all replication sources"

This reverts commit 29e390c80895af54206d6a14eac50ca2859cf2b7.

Conflicts:

hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java

hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSource.java

hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalThrottler.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ea8123e8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ea8123e8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ea8123e8

Branch: refs/heads/branch-1.4
Commit: ea8123e81cb4b0e2d89fb672b5bfe67557852ec0
Parents: 39da0d4
Author: Andrew Purtell 
Authored: Wed Nov 29 17:21:42 2017 -0800
Committer: Andrew Purtell 
Committed: Wed Nov 29 17:26:39 2017 -0800

--
 .../org/apache/hadoop/hbase/HConstants.java |   9 -
 .../hbase/regionserver/HRegionServer.java   |   1 -
 .../regionserver/ReplicationSource.java |  13 +-
 .../regionserver/ReplicationSourceManager.java  |   8 -
 .../ReplicationSourceWALReaderThread.java   |  34 +---
 .../replication/TestReplicationSource.java  |  12 +-
 .../regionserver/TestGlobalThrottler.java   | 187 ---
 .../regionserver/TestWALEntryStream.java|   3 -
 8 files changed, 5 insertions(+), 262 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ea8123e8/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
--
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 8242a17..2de16f7 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -895,15 +895,6 @@ public final class HConstants {
   /** Replication cluster id of source cluster which uniquely identifies 
itself with peer cluster */
   public static final String REPLICATION_CLUSTER_ID = 
"hbase.replication.cluster.id";
   /**
-   * Max total size of buffered entries in all replication peers. It will 
prevent server getting
-   * OOM if there are many peers. Default value is 256MB which is four times 
to default
-   * replication.source.size.capacity.
-   */
-  public static final String REPLICATION_SOURCE_TOTAL_BUFFER_KEY = 
"replication.total.buffer.quota";
-  public static final int REPLICATION_SOURCE_TOTAL_BUFFER_DFAULT = 256 * 1024 
* 1024;
-
-
-  /**
* Directory where the source cluster file system client configuration are 
placed which is used by
* sink cluster to copy HFiles from source cluster file system
*/

http://git-wip-us.apache.org/repos/asf/hbase/blob/ea8123e8/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 4853b2b..b156256 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -2291,7 +2291,6 @@ public class HRegionServer extends HasThread implements
* @return Return the object that implements the replication
* source service.
*/
-  @VisibleForTesting
   public ReplicationSourceService getReplicationSourceService() {
 return replicationSourceHandler;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/ea8123e8/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
index add1043..776814f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
@@ -72,10 +72,6 @@ import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.wal.DefaultWALProvider;
 import org.apache.hadoop.hbase.wal.WAL.Entry;
 
-import com.google.common.collect.Lists;
-import com.google.common.util.concurrent.ListenableFuture;
-import 

[1/3] hbase git commit: HBASE-19379 TestEndToEndSplitTransaction fails with NPE

2017-11-29 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-1 4c413e0c5 -> f3614f20c
  refs/heads/branch-1.4 fb070f1d4 -> ea8123e81


HBASE-19379 TestEndToEndSplitTransaction fails with NPE


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f3614f20
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f3614f20
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f3614f20

Branch: refs/heads/branch-1
Commit: f3614f20c00a455dd59d6ca46abaa00123b946f9
Parents: 4c413e0
Author: Andrew Purtell 
Authored: Wed Nov 29 16:37:07 2017 -0800
Committer: Andrew Purtell 
Committed: Wed Nov 29 17:10:52 2017 -0800

--
 .../apache/hadoop/hbase/HRegionLocation.java| 73 +++-
 1 file changed, 55 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f3614f20/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
index 373e76b..ff78ddc 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
@@ -61,29 +61,71 @@ public class HRegionLocation implements 
Comparable {
 + ", hostname=" + this.serverName + ", seqNum=" + seqNum;
   }
 
-  /**
-   * @see java.lang.Object#equals(java.lang.Object)
-   */
   @Override
-  public boolean equals(Object o) {
-if (this == o) {
+  public int hashCode() {
+final int prime = 31;
+int result = 1;
+result = prime * result + ((regionInfo == null) ? 0 : 
regionInfo.hashCode());
+result = prime * result + (int) (seqNum ^ (seqNum >>> 32));
+result = prime * result + ((serverName == null) ? 0 : 
serverName.hashCode());
+return result;
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+if (this == obj) {
   return true;
 }
-if (o == null) {
+if (obj == null) {
+  return false;
+}
+if (getClass() != obj.getClass()) {
+  return false;
+}
+HRegionLocation other = (HRegionLocation) obj;
+if (regionInfo == null) {
+  if (other.regionInfo != null) {
+return false;
+  }
+} else if (!regionInfo.equals(other.regionInfo)) {
   return false;
 }
-if (!(o instanceof HRegionLocation)) {
+if (seqNum != other.seqNum) {
   return false;
 }
-return this.compareTo((HRegionLocation)o) == 0;
+if (serverName == null) {
+  if (other.serverName != null) {
+return false;
+  }
+} else if (!serverName.equals(other.serverName)) {
+  return false;
+}
+return true;
   }
 
-  /**
-   * @see java.lang.Object#hashCode()
-   */
   @Override
-  public int hashCode() {
-return this.serverName.hashCode();
+  public int compareTo(HRegionLocation other) {
+if (regionInfo == null) {
+  if (other.regionInfo != null) {
+return 1;
+  }
+} else {
+  int compare = regionInfo.compareTo(other.regionInfo);
+  if (compare != 0) {
+return compare;
+  }
+}
+if (serverName == null) {
+  if (other.serverName != null) {
+return 1;
+  }
+} else {
+  int compare = serverName.compareTo(other.serverName);
+  if (compare != 0) {
+return compare;
+  }
+}
+return Long.compare(seqNum, other.seqNum);
   }
 
   /** @return HRegionInfo */
@@ -113,9 +155,4 @@ public class HRegionLocation implements 
Comparable {
   public ServerName getServerName() {
 return serverName;
   }
-
-  @Override
-  public int compareTo(HRegionLocation o) {
-return serverName.compareTo(o.getServerName());
-  }
 }



hbase git commit: HBASE-19376 Fix more binary compatibility problems with branch-1.4 / branch-1

2017-11-29 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 74e313396 -> fb070f1d4


HBASE-19376 Fix more binary compatibility problems with branch-1.4 / branch-1


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/fb070f1d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/fb070f1d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/fb070f1d

Branch: refs/heads/branch-1.4
Commit: fb070f1d4aa72c9aeeac1da1a8760ca6303a13ed
Parents: 74e3133
Author: Andrew Purtell 
Authored: Wed Nov 29 11:43:07 2017 -0800
Committer: Andrew Purtell 
Committed: Wed Nov 29 14:07:41 2017 -0800

--
 .../org/apache/hadoop/hbase/ClusterStatus.java  | 25 +++-
 .../org/apache/hadoop/hbase/client/Result.java  | 15 +
 .../apache/hadoop/hbase/filter/FilterList.java  |  9 +++
 .../hbase/mapreduce/TableInputFormatBase.java   | 62 +++-
 .../hbase/master/ClusterStatusPublisher.java|  7 +--
 .../hbase/master/snapshot/SnapshotManager.java  | 13 +++-
 6 files changed, 120 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/fb070f1d/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
index 10637d6..2eb1162 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
@@ -20,12 +20,15 @@
 package org.apache.hadoop.hbase;
 
 import com.google.common.base.Objects;
+import com.google.common.collect.Sets;
+
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
+import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
@@ -41,7 +44,6 @@ import 
org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileConte
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
 import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.VersionedWritable;
 
 
@@ -100,6 +102,26 @@ public class ClusterStatus extends VersionedWritable {
 super();
   }
 
+  @Deprecated
+  public ClusterStatus(String hbaseVersion, String clusterid, List 
deadServers,
+  ServerName master) {
+this(hbaseVersion, clusterid, new HashMap(), 
deadServers, master,
+  new ArrayList(), new HashSet(), new String[0], 
null);
+  }
+
+  @Deprecated
+  public ClusterStatus(final String hbaseVersion, final String clusterid,
+  final Map servers,
+  final Collection deadServers,
+  final ServerName master,
+  final Collection backupMasters,
+  final Map rit,
+  final String[ ] masterCoprocessors,
+  final Boolean balancerOn) {
+this(hbaseVersion, clusterid, servers, deadServers, master, backupMasters, 
Sets.newHashSet(rit.values()),
+  masterCoprocessors, balancerOn);
+  }
+
   public ClusterStatus(final String hbaseVersion, final String clusterid,
   final Map servers,
   final Collection deadServers,
@@ -109,7 +131,6 @@ public class ClusterStatus extends VersionedWritable {
   final String[] masterCoprocessors,
   final Boolean balancerOn) {
 this.hbaseVersion = hbaseVersion;
-
 this.liveServers = servers;
 this.deadServers = deadServers;
 this.master = master;

http://git-wip-us.apache.org/repos/asf/hbase/blob/fb070f1d/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
index ffd7695..d14171f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
@@ -869,6 +869,21 @@ public class Result implements CellScannable, CellScanner {
* @return The complete result that is formed by combining all of the 
partial results together
* @throws IOException A complete result cannot be formed because the 
results in the partial list
*   come from different rows
+   * @deprecated
+   */
+  @Deprecated
+  public static Result createCompleteResult(List partialResults)
+  throws IOException {
+

hbase git commit: HBASE-19376 Fix more binary compatibility problems with branch-1.4 / branch-1

2017-11-29 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-1 36a702996 -> 4c413e0c5


HBASE-19376 Fix more binary compatibility problems with branch-1.4 / branch-1


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4c413e0c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4c413e0c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4c413e0c

Branch: refs/heads/branch-1
Commit: 4c413e0c50777e1d0cbe72f8f081da96063913c0
Parents: 36a7029
Author: Andrew Purtell 
Authored: Wed Nov 29 11:43:07 2017 -0800
Committer: Andrew Purtell 
Committed: Wed Nov 29 14:07:36 2017 -0800

--
 .../org/apache/hadoop/hbase/ClusterStatus.java  | 25 +++-
 .../org/apache/hadoop/hbase/client/Result.java  | 15 +
 .../apache/hadoop/hbase/filter/FilterList.java  |  9 +++
 .../hbase/mapreduce/TableInputFormatBase.java   | 62 +++-
 .../hbase/master/ClusterStatusPublisher.java|  7 +--
 .../hbase/master/snapshot/SnapshotManager.java  | 13 +++-
 6 files changed, 120 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4c413e0c/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
index 10637d6..2eb1162 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
@@ -20,12 +20,15 @@
 package org.apache.hadoop.hbase;
 
 import com.google.common.base.Objects;
+import com.google.common.collect.Sets;
+
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
+import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
@@ -41,7 +44,6 @@ import 
org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileConte
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
 import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.VersionedWritable;
 
 
@@ -100,6 +102,26 @@ public class ClusterStatus extends VersionedWritable {
 super();
   }
 
+  @Deprecated
+  public ClusterStatus(String hbaseVersion, String clusterid, List 
deadServers,
+  ServerName master) {
+this(hbaseVersion, clusterid, new HashMap(), 
deadServers, master,
+  new ArrayList(), new HashSet(), new String[0], 
null);
+  }
+
+  @Deprecated
+  public ClusterStatus(final String hbaseVersion, final String clusterid,
+  final Map servers,
+  final Collection deadServers,
+  final ServerName master,
+  final Collection backupMasters,
+  final Map rit,
+  final String[ ] masterCoprocessors,
+  final Boolean balancerOn) {
+this(hbaseVersion, clusterid, servers, deadServers, master, backupMasters, 
Sets.newHashSet(rit.values()),
+  masterCoprocessors, balancerOn);
+  }
+
   public ClusterStatus(final String hbaseVersion, final String clusterid,
   final Map servers,
   final Collection deadServers,
@@ -109,7 +131,6 @@ public class ClusterStatus extends VersionedWritable {
   final String[] masterCoprocessors,
   final Boolean balancerOn) {
 this.hbaseVersion = hbaseVersion;
-
 this.liveServers = servers;
 this.deadServers = deadServers;
 this.master = master;

http://git-wip-us.apache.org/repos/asf/hbase/blob/4c413e0c/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
index ffd7695..d14171f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
@@ -869,6 +869,21 @@ public class Result implements CellScannable, CellScanner {
* @return The complete result that is formed by combining all of the 
partial results together
* @throws IOException A complete result cannot be formed because the 
results in the partial list
*   come from different rows
+   * @deprecated
+   */
+  @Deprecated
+  public static Result createCompleteResult(List partialResults)
+  throws IOException {
+return 

hbase git commit: HBASE-18233 We shouldn't wait for readlock in doMiniBatchMutation in case of deadlock (Allan Yang)

2017-11-29 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 9519efa43 -> 9f102b293


HBASE-18233 We shouldn't wait for readlock in doMiniBatchMutation in case of 
deadlock (Allan Yang)

This patch plus a sorting of the batch (HBASE-17924) fixes a regression
in Increment/CheckAndPut-style operations.

Signed-off-by: Yu Li 
Signed-off-by: Allan Yang 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9f102b29
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9f102b29
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9f102b29

Branch: refs/heads/branch-1.3
Commit: 9f102b293f1f211aff84af3c09873414767c5805
Parents: 9519efa
Author: Michael Stack 
Authored: Tue Nov 28 09:14:58 2017 -0800
Committer: Michael Stack 
Committed: Wed Nov 29 13:32:52 2017 -0800

--
 .../hadoop/hbase/regionserver/HRegion.java  |  82 ---
 .../hadoop/hbase/regionserver/Region.java   |   4 +-
 .../hadoop/hbase/client/TestMultiParallel.java  | 141 +++
 .../hbase/regionserver/TestAtomicOperation.java |   5 +-
 4 files changed, 212 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9f102b29/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index a7ed011..b25e11b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -3135,18 +3135,29 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   continue;
 }
 
+
+//HBASE-18233
 // If we haven't got any rows in our batch, we should block to
-// get the next one.
+// get the next one's read lock. We need at least one row to mutate.
+// If we have got rows, do not block when lock is not available,
+// so that we can fail fast and go on with the rows with locks in
+// the batch. By doing this, we can reduce contention and prevent
+// possible deadlocks.
+// The unfinished rows in the batch will be detected in batchMutate,
+// and it wil try to finish them by calling doMiniBatchMutation again.
+boolean shouldBlock = numReadyToWrite == 0;
 RowLock rowLock = null;
 try {
-  rowLock = getRowLockInternal(mutation.getRow(), true);
+  rowLock = getRowLockInternal(mutation.getRow(), true, shouldBlock);
 } catch (IOException ioe) {
   LOG.warn("Failed getting lock in batch put, row="
 + Bytes.toStringBinary(mutation.getRow()), ioe);
 }
 if (rowLock == null) {
-  // We failed to grab another lock
-  break; // stop acquiring more rows for this batch
+  // We failed to grab another lock. Stop acquiring more rows for this
+  // batch and go on with the gotten ones
+  break;
+
 } else {
   acquiredRowLocks.add(rowLock);
 }
@@ -3242,7 +3253,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   checkAndPrepareMutation(cpMutation, isInReplay, cpFamilyMap, 
now);
 
   // Acquire row locks. If not, the whole batch will fail.
-  acquiredRowLocks.add(getRowLockInternal(cpMutation.getRow(), 
true));
+  acquiredRowLocks.add(getRowLockInternal(cpMutation.getRow(), 
true, true));
 
   if (cpMutation.getDurability() == Durability.SKIP_WAL) {
 recordMutationWithoutWal(cpFamilyMap);
@@ -3537,7 +3548,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   get.addColumn(family, qualifier);
   checkRow(row, "checkAndMutate");
   // Lock row - note that doBatchMutate will relock this row if called
-  RowLock rowLock = getRowLockInternal(get.getRow(), false);
+  RowLock rowLock = getRowLockInternal(get.getRow());
   // wait for all previous transactions to complete (with lock held)
   mvcc.await();
   try {
@@ -3647,7 +3658,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   get.addColumn(family, qualifier);
   checkRow(row, "checkAndRowMutate");
   // Lock row - note that doBatchMutate will relock this row if called
-  RowLock rowLock = getRowLockInternal(get.getRow(), false);
+  RowLock rowLock = getRowLockInternal(get.getRow());
   // wait for all previous transactions to complete (with lock 

hbase git commit: HBASE-19354 [branch-1] Build using a jdk that is beyond ubuntu trusty's openjdk-151

2017-11-29 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-1 dce6285c2 -> 36a702996


HBASE-19354 [branch-1] Build using a jdk that is beyond ubuntu trusty's 
openjdk-151

Amend our DockerFile so it gets jdks from azul repo.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/36a70299
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/36a70299
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/36a70299

Branch: refs/heads/branch-1
Commit: 36a702996184b817d9203f84c04c9f8f4a7baa56
Parents: dce6285
Author: Michael Stack 
Authored: Mon Nov 27 15:27:32 2017 -0800
Committer: Michael Stack 
Committed: Wed Nov 29 13:27:50 2017 -0800

--
 dev-support/docker/Dockerfile | 42 +-
 1 file changed, 28 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/36a70299/dev-support/docker/Dockerfile
--
diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
index 2605e2c..f706e4f 100644
--- a/dev-support/docker/Dockerfile
+++ b/dev-support/docker/Dockerfile
@@ -63,29 +63,43 @@ RUN apt-get -q update && apt-get -q install 
--no-install-recommends -y \
 rsync \
 snappy \
 zlib1g-dev \
-wget \
-openjdk-7-jdk
+wget
+
+
+# Apps that require Java.
+# Maven and ant depend on ubuntu trusty's headless jdk7. The install of
+# maven and ant will pull down this jdk even though we don't want it.
+# Do the maven and ant install here rather than later where the jdk7
+# will overwrite the jdk7 we actually want to use. See next section on jdks.
+###
+RUN apt-get -q update && apt-get -q install --no-install-recommends -y \
+ant \
+maven
 
 ###
-# OpenJDK 8
+# Install jdk7 and jdk8.
 ###
-
+# The jdks in ubuntu trusty don't work. HDFS hangs on openjdk-7 151.
+# See HBASE-19204. So, we use the azul jdks because they are available, and
+# later versions of openjdk (openjdk-7 161). Below we add the azul repo and
+# then install its jdks. We then move aside the headless jdk7 added above
+# when we added maven and ant and rename the azul jvms as
+# though they were from openjdk (otherwise yetus won't set JAVA_HOME;
+# it does find /usr/lib/jvm/ -name java-* -type d so a symlink to the zulu jvms
+# won't work).
 RUN echo "dot_style = mega" > "/root/.wgetrc"
 RUN echo "quiet = on" >> "/root/.wgetrc"
-
-RUN apt-get -q update && apt-get -q install --no-install-recommends -y 
software-properties-common
-RUN add-apt-repository -y ppa:openjdk-r/ppa
+RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 
0x219BD9C9
+RUN apt-get -q update && apt-get -q install --no-install-recommends -y 
software-properties-common python-software-properties
+RUN apt-add-repository 'deb http://repos.azulsystems.com/ubuntu stable main'
 RUN apt-get -q update
-RUN apt-get -q install --no-install-recommends -y openjdk-8-jdk
+RUN apt-get -q install --no-install-recommends -y zulu-8 zulu-7
 RUN update-alternatives --config java
 RUN update-alternatives --config javac
+RUN mv /usr/lib/jvm/java-7-openjdk-amd64 
/usr/lib/jvm/moved.java-7-openjdk-amd64
+RUN mv /usr/lib/jvm/zulu-7-amd64 /usr/lib/jvm/java-7-openjdk-amd64 
+RUN mv /usr/lib/jvm/zulu-8-amd64 /usr/lib/jvm/java-8-openjdk-amd64 
 
-
-# Apps that require Java
-###
-RUN apt-get -q update && apt-get -q install --no-install-recommends -y \
-ant \
-maven
 
 # Fixing the Apache commons / Maven dependency problem under Ubuntu:
 # See http://wiki.apache.org/commons/VfsProblems



hbase git commit: HBASE-19354 [branch-1] Build using a jdk that is beyond ubuntu trusty's openjdk-151

2017-11-29 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 148080e52 -> 74e313396


HBASE-19354 [branch-1] Build using a jdk that is beyond ubuntu trusty's 
openjdk-151

Amend our DockerFile so it gets jdks from azul repo.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/74e31339
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/74e31339
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/74e31339

Branch: refs/heads/branch-1.4
Commit: 74e313396e4a574cdd7f3a0867411a155d6e2696
Parents: 148080e
Author: Michael Stack 
Authored: Mon Nov 27 15:27:32 2017 -0800
Committer: Michael Stack 
Committed: Wed Nov 29 13:27:22 2017 -0800

--
 dev-support/docker/Dockerfile | 42 +-
 1 file changed, 28 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/74e31339/dev-support/docker/Dockerfile
--
diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
index 2605e2c..f706e4f 100644
--- a/dev-support/docker/Dockerfile
+++ b/dev-support/docker/Dockerfile
@@ -63,29 +63,43 @@ RUN apt-get -q update && apt-get -q install 
--no-install-recommends -y \
 rsync \
 snappy \
 zlib1g-dev \
-wget \
-openjdk-7-jdk
+wget
+
+
+# Apps that require Java.
+# Maven and ant depend on ubuntu trusty's headless jdk7. The install of
+# maven and ant will pull down this jdk even though we don't want it.
+# Do the maven and ant install here rather than later where the jdk7
+# will overwrite the jdk7 we actually want to use. See next section on jdks.
+###
+RUN apt-get -q update && apt-get -q install --no-install-recommends -y \
+ant \
+maven
 
 ###
-# OpenJDK 8
+# Install jdk7 and jdk8.
 ###
-
+# The jdks in ubuntu trusty don't work. HDFS hangs on openjdk-7 151.
+# See HBASE-19204. So, we use the azul jdks because they are available, and
+# later versions of openjdk (openjdk-7 161). Below we add the azul repo and
+# then install its jdks. We then move aside the headless jdk7 added above
+# when we added maven and ant and rename the azul jvms as
+# though they were from openjdk (otherwise yetus won't set JAVA_HOME;
+# it does find /usr/lib/jvm/ -name java-* -type d so a symlink to the zulu jvms
+# won't work).
 RUN echo "dot_style = mega" > "/root/.wgetrc"
 RUN echo "quiet = on" >> "/root/.wgetrc"
-
-RUN apt-get -q update && apt-get -q install --no-install-recommends -y 
software-properties-common
-RUN add-apt-repository -y ppa:openjdk-r/ppa
+RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 
0x219BD9C9
+RUN apt-get -q update && apt-get -q install --no-install-recommends -y 
software-properties-common python-software-properties
+RUN apt-add-repository 'deb http://repos.azulsystems.com/ubuntu stable main'
 RUN apt-get -q update
-RUN apt-get -q install --no-install-recommends -y openjdk-8-jdk
+RUN apt-get -q install --no-install-recommends -y zulu-8 zulu-7
 RUN update-alternatives --config java
 RUN update-alternatives --config javac
+RUN mv /usr/lib/jvm/java-7-openjdk-amd64 
/usr/lib/jvm/moved.java-7-openjdk-amd64
+RUN mv /usr/lib/jvm/zulu-7-amd64 /usr/lib/jvm/java-7-openjdk-amd64 
+RUN mv /usr/lib/jvm/zulu-8-amd64 /usr/lib/jvm/java-8-openjdk-amd64 
 
-
-# Apps that require Java
-###
-RUN apt-get -q update && apt-get -q install --no-install-recommends -y \
-ant \
-maven
 
 # Fixing the Apache commons / Maven dependency problem under Ubuntu:
 # See http://wiki.apache.org/commons/VfsProblems



hbase git commit: HBASE-19188 Build fails on branch-1 using maven-3.5.2

2017-11-29 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 690367fc4 -> 45e856bc8


HBASE-19188 Build fails on branch-1 using maven-3.5.2

Changing tomcat:jasper-runtime scope to compile in
hbase-server, hbase-rest and hbase-thrift

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/45e856bc
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/45e856bc
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/45e856bc

Branch: refs/heads/branch-1.2
Commit: 45e856bc878ae4491440250d56ab23659311a7d7
Parents: 690367f
Author: Peter Somogyi 
Authored: Sun Nov 26 12:35:57 2017 +0100
Committer: Michael Stack 
Committed: Wed Nov 29 12:29:44 2017 -0800

--
 hbase-rest/pom.xml   | 1 +
 hbase-server/pom.xml | 1 +
 hbase-thrift/pom.xml | 1 +
 3 files changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/45e856bc/hbase-rest/pom.xml
--
diff --git a/hbase-rest/pom.xml b/hbase-rest/pom.xml
index e85407c..806f805 100644
--- a/hbase-rest/pom.xml
+++ b/hbase-rest/pom.xml
@@ -243,6 +243,7 @@
 
   tomcat
   jasper-runtime
+  compile
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/45e856bc/hbase-server/pom.xml
--
diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml
index 81e529f..d3abf21 100644
--- a/hbase-server/pom.xml
+++ b/hbase-server/pom.xml
@@ -536,6 +536,7 @@
 
   tomcat
   jasper-runtime
+  compile
 
 
   org.jamon

http://git-wip-us.apache.org/repos/asf/hbase/blob/45e856bc/hbase-thrift/pom.xml
--
diff --git a/hbase-thrift/pom.xml b/hbase-thrift/pom.xml
index 2987fdd..957d644 100644
--- a/hbase-thrift/pom.xml
+++ b/hbase-thrift/pom.xml
@@ -329,6 +329,7 @@
 
   tomcat
   jasper-runtime
+  compile
 
   
 



hbase git commit: HBASE-19188 Build fails on branch-1 using maven-3.5.2

2017-11-29 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 604266f7b -> 9519efa43


HBASE-19188 Build fails on branch-1 using maven-3.5.2

Changing tomcat:jasper-runtime scope to compile in
hbase-server, hbase-rest and hbase-thrift

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9519efa4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9519efa4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9519efa4

Branch: refs/heads/branch-1.3
Commit: 9519efa435243197e1bd1b69e9dbb57edd9663a5
Parents: 604266f
Author: Peter Somogyi 
Authored: Sun Nov 26 12:35:57 2017 +0100
Committer: Michael Stack 
Committed: Wed Nov 29 12:29:21 2017 -0800

--
 hbase-rest/pom.xml   | 1 +
 hbase-server/pom.xml | 1 +
 hbase-thrift/pom.xml | 1 +
 3 files changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9519efa4/hbase-rest/pom.xml
--
diff --git a/hbase-rest/pom.xml b/hbase-rest/pom.xml
index a405a27..be6bc62 100644
--- a/hbase-rest/pom.xml
+++ b/hbase-rest/pom.xml
@@ -243,6 +243,7 @@
 
   tomcat
   jasper-runtime
+  compile
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/9519efa4/hbase-server/pom.xml
--
diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml
index 46d47ec..dddbb42 100644
--- a/hbase-server/pom.xml
+++ b/hbase-server/pom.xml
@@ -536,6 +536,7 @@
 
   tomcat
   jasper-runtime
+  compile
 
 
   org.jamon

http://git-wip-us.apache.org/repos/asf/hbase/blob/9519efa4/hbase-thrift/pom.xml
--
diff --git a/hbase-thrift/pom.xml b/hbase-thrift/pom.xml
index d330779..c6c232e 100644
--- a/hbase-thrift/pom.xml
+++ b/hbase-thrift/pom.xml
@@ -329,6 +329,7 @@
 
   tomcat
   jasper-runtime
+  compile
 
   
 



hbase git commit: HBASE-19188 Build fails on branch-1 using maven-3.5.2

2017-11-29 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 5f0219b86 -> 148080e52


HBASE-19188 Build fails on branch-1 using maven-3.5.2

Changing tomcat:jasper-runtime scope to compile in
hbase-server, hbase-rest and hbase-thrift

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/148080e5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/148080e5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/148080e5

Branch: refs/heads/branch-1.4
Commit: 148080e52ba0fb7e390f3ad2e83049afc0610d02
Parents: 5f0219b
Author: Peter Somogyi 
Authored: Sun Nov 26 12:35:57 2017 +0100
Committer: Michael Stack 
Committed: Wed Nov 29 12:28:45 2017 -0800

--
 hbase-rest/pom.xml   | 1 +
 hbase-server/pom.xml | 1 +
 hbase-thrift/pom.xml | 1 +
 3 files changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/148080e5/hbase-rest/pom.xml
--
diff --git a/hbase-rest/pom.xml b/hbase-rest/pom.xml
index 435bd02..564a260 100644
--- a/hbase-rest/pom.xml
+++ b/hbase-rest/pom.xml
@@ -243,6 +243,7 @@
 
   tomcat
   jasper-runtime
+  compile
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/148080e5/hbase-server/pom.xml
--
diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml
index 23acf39..3998d54 100644
--- a/hbase-server/pom.xml
+++ b/hbase-server/pom.xml
@@ -544,6 +544,7 @@
 
   tomcat
   jasper-runtime
+  compile
 
 
   org.jamon

http://git-wip-us.apache.org/repos/asf/hbase/blob/148080e5/hbase-thrift/pom.xml
--
diff --git a/hbase-thrift/pom.xml b/hbase-thrift/pom.xml
index 1a49c8f..3b8a3da 100644
--- a/hbase-thrift/pom.xml
+++ b/hbase-thrift/pom.xml
@@ -329,6 +329,7 @@
 
   tomcat
   jasper-runtime
+  compile
 
   
 



hbase git commit: HBASE-19188 Build fails on branch-1 using maven-3.5.2

2017-11-29 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-1 4fe4d755c -> dce6285c2


HBASE-19188 Build fails on branch-1 using maven-3.5.2

Changing tomcat:jasper-runtime scope to compile in
hbase-server, hbase-rest and hbase-thrift

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/dce6285c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/dce6285c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/dce6285c

Branch: refs/heads/branch-1
Commit: dce6285c2178bf81302df1cec28cf71810747ff4
Parents: 4fe4d75
Author: Peter Somogyi 
Authored: Sun Nov 26 12:35:57 2017 +0100
Committer: Michael Stack 
Committed: Wed Nov 29 12:28:09 2017 -0800

--
 hbase-rest/pom.xml   | 1 +
 hbase-server/pom.xml | 1 +
 hbase-thrift/pom.xml | 1 +
 3 files changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/dce6285c/hbase-rest/pom.xml
--
diff --git a/hbase-rest/pom.xml b/hbase-rest/pom.xml
index f227d46..688f38a 100644
--- a/hbase-rest/pom.xml
+++ b/hbase-rest/pom.xml
@@ -243,6 +243,7 @@
 
   tomcat
   jasper-runtime
+  compile
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/dce6285c/hbase-server/pom.xml
--
diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml
index e1b08d6..16b2f9e 100644
--- a/hbase-server/pom.xml
+++ b/hbase-server/pom.xml
@@ -544,6 +544,7 @@
 
   tomcat
   jasper-runtime
+  compile
 
 
   org.jamon

http://git-wip-us.apache.org/repos/asf/hbase/blob/dce6285c/hbase-thrift/pom.xml
--
diff --git a/hbase-thrift/pom.xml b/hbase-thrift/pom.xml
index 4ad9ccd..c46b939 100644
--- a/hbase-thrift/pom.xml
+++ b/hbase-thrift/pom.xml
@@ -329,6 +329,7 @@
 
   tomcat
   jasper-runtime
+  compile
 
   
 



hbase git commit: HBASE-19359 Revisit the default config of hbase client retries number

2017-11-29 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2 22b90c4a6 -> 9e7ab9ffe


HBASE-19359 Revisit the default config of hbase client retries number


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9e7ab9ff
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9e7ab9ff
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9e7ab9ff

Branch: refs/heads/branch-2
Commit: 9e7ab9ffe2fc8dbbdc547fec314728f83dc7ed2f
Parents: 22b90c4
Author: Guanghao Zhang 
Authored: Tue Nov 28 21:08:19 2017 +0800
Committer: Michael Stack 
Committed: Wed Nov 29 10:33:20 2017 -0800

--
 .../src/main/java/org/apache/hadoop/hbase/client/Admin.java   | 2 +-
 .../java/org/apache/hadoop/hbase/client/ConnectionUtils.java  | 3 ++-
 .../src/main/java/org/apache/hadoop/hbase/HConstants.java | 7 ++-
 hbase-common/src/main/resources/hbase-default.xml | 2 +-
 .../src/main/java/org/apache/hadoop/hbase/master/HMaster.java | 2 +-
 .../org/apache/hadoop/hbase/regionserver/HRegionServer.java   | 2 +-
 .../hbase/regionserver/handler/RegionReplicaFlushHandler.java | 3 ++-
 .../regionserver/RegionReplicaReplicationEndpoint.java| 5 +++--
 .../regionserver/TestRegionReplicaReplicationEndpoint.java| 2 +-
 9 files changed, 18 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9e7ab9ff/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index d9f8e899..0567e8e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -2628,4 +2628,4 @@ public interface Admin extends Abortable, Closeable {
* @return List of servers that are not cleared
*/
   List clearDeadServers(final List servers) throws 
IOException;
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/9e7ab9ff/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
index 345fac6..e27bf71 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
@@ -116,7 +116,8 @@ public final class ConnectionUtils {
   HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
 // Go big. Multiply by 10. If we can't get to meta after this many retries
 // then something seriously wrong.
-int serversideMultiplier = 
c.getInt("hbase.client.serverside.retries.multiplier", 10);
+int serversideMultiplier = 
c.getInt(HConstants.HBASE_CLIENT_SERVERSIDE_RETRIES_MULTIPLIER,
+  HConstants.DEFAULT_HBASE_CLIENT_SERVERSIDE_RETRIES_MULTIPLIER);
 int retries = hcRetries * serversideMultiplier;
 c.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, retries);
 log.info(sn + " server-side Connection retries=" + retries);

http://git-wip-us.apache.org/repos/asf/hbase/blob/9e7ab9ff/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
--
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 589fae3..594a895 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -790,7 +790,12 @@ public final class HConstants {
   /**
* Default value of {@link #HBASE_CLIENT_RETRIES_NUMBER}.
*/
-  public static final int DEFAULT_HBASE_CLIENT_RETRIES_NUMBER = 35;
+  public static final int DEFAULT_HBASE_CLIENT_RETRIES_NUMBER = 10;
+
+  public static final String HBASE_CLIENT_SERVERSIDE_RETRIES_MULTIPLIER =
+  "hbase.client.serverside.retries.multiplier";
+
+  public static final int DEFAULT_HBASE_CLIENT_SERVERSIDE_RETRIES_MULTIPLIER = 
3;
 
   /**
* Parameter name to set the default scanner caching for all clients.

http://git-wip-us.apache.org/repos/asf/hbase/blob/9e7ab9ff/hbase-common/src/main/resources/hbase-default.xml
--
diff --git a/hbase-common/src/main/resources/hbase-default.xml 
b/hbase-common/src/main/resources/hbase-default.xml
index 400699f..c9fe83b 100644
--- 

hbase git commit: HBASE-19359 Revisit the default config of hbase client retries number

2017-11-29 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master f81ac42aa -> abb535eef


HBASE-19359 Revisit the default config of hbase client retries number


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/abb535ee
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/abb535ee
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/abb535ee

Branch: refs/heads/master
Commit: abb535eef661dc9cf0f72f18b96151ae7d5d0179
Parents: f81ac42
Author: Guanghao Zhang 
Authored: Tue Nov 28 21:08:19 2017 +0800
Committer: Michael Stack 
Committed: Wed Nov 29 10:32:42 2017 -0800

--
 .../src/main/java/org/apache/hadoop/hbase/client/Admin.java   | 2 +-
 .../java/org/apache/hadoop/hbase/client/ConnectionUtils.java  | 3 ++-
 .../src/main/java/org/apache/hadoop/hbase/HConstants.java | 7 ++-
 hbase-common/src/main/resources/hbase-default.xml | 2 +-
 .../src/main/java/org/apache/hadoop/hbase/master/HMaster.java | 2 +-
 .../org/apache/hadoop/hbase/regionserver/HRegionServer.java   | 2 +-
 .../hbase/regionserver/handler/RegionReplicaFlushHandler.java | 3 ++-
 .../regionserver/RegionReplicaReplicationEndpoint.java| 5 +++--
 .../regionserver/TestRegionReplicaReplicationEndpoint.java| 2 +-
 9 files changed, 18 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/abb535ee/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index d9f8e899..0567e8e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -2628,4 +2628,4 @@ public interface Admin extends Abortable, Closeable {
* @return List of servers that are not cleared
*/
   List clearDeadServers(final List servers) throws 
IOException;
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/abb535ee/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
index 345fac6..e27bf71 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
@@ -116,7 +116,8 @@ public final class ConnectionUtils {
   HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
 // Go big. Multiply by 10. If we can't get to meta after this many retries
 // then something seriously wrong.
-int serversideMultiplier = 
c.getInt("hbase.client.serverside.retries.multiplier", 10);
+int serversideMultiplier = 
c.getInt(HConstants.HBASE_CLIENT_SERVERSIDE_RETRIES_MULTIPLIER,
+  HConstants.DEFAULT_HBASE_CLIENT_SERVERSIDE_RETRIES_MULTIPLIER);
 int retries = hcRetries * serversideMultiplier;
 c.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, retries);
 log.info(sn + " server-side Connection retries=" + retries);

http://git-wip-us.apache.org/repos/asf/hbase/blob/abb535ee/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
--
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 834e5bb..14ce089 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -774,7 +774,12 @@ public final class HConstants {
   /**
* Default value of {@link #HBASE_CLIENT_RETRIES_NUMBER}.
*/
-  public static final int DEFAULT_HBASE_CLIENT_RETRIES_NUMBER = 35;
+  public static final int DEFAULT_HBASE_CLIENT_RETRIES_NUMBER = 10;
+
+  public static final String HBASE_CLIENT_SERVERSIDE_RETRIES_MULTIPLIER =
+  "hbase.client.serverside.retries.multiplier";
+
+  public static final int DEFAULT_HBASE_CLIENT_SERVERSIDE_RETRIES_MULTIPLIER = 
3;
 
   /**
* Parameter name to set the default scanner caching for all clients.

http://git-wip-us.apache.org/repos/asf/hbase/blob/abb535ee/hbase-common/src/main/resources/hbase-default.xml
--
diff --git a/hbase-common/src/main/resources/hbase-default.xml 
b/hbase-common/src/main/resources/hbase-default.xml
index be23dc8..d230e57 100644
--- 

hbase git commit: HBASE-19372 Remove the Span object in SyncFuture as it is useless now

2017-11-29 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2 6f7d2afcd -> 22b90c4a6


HBASE-19372 Remove the Span object in SyncFuture as it is useless now

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/22b90c4a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/22b90c4a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/22b90c4a

Branch: refs/heads/branch-2
Commit: 22b90c4a647d0ffeec7778042eedd0a49a664ed0
Parents: 6f7d2af
Author: zhangduo 
Authored: Wed Nov 29 21:07:02 2017 +0800
Committer: Michael Stack 
Committed: Wed Nov 29 09:13:02 2017 -0800

--
 .../hbase/regionserver/wal/AbstractFSWAL.java   | 34 
 .../hbase/regionserver/wal/AsyncFSWAL.java  | 32 +--
 .../hadoop/hbase/regionserver/wal/FSHLog.java   | 42 
 .../hbase/regionserver/wal/SyncFuture.java  | 28 +
 .../hbase/regionserver/wal/TestSyncFuture.java  |  4 +-
 5 files changed, 47 insertions(+), 93 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/22b90c4a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
index f7fbd86..64f44cd 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
@@ -21,6 +21,8 @@ import static 
org.apache.hadoop.hbase.shaded.com.google.common.base.Precondition
 import static 
org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions.checkNotNull;
 import static 
org.apache.hadoop.hbase.wal.AbstractFSWALProvider.WAL_FILE_NAME_DELIMITER;
 
+import com.lmax.disruptor.RingBuffer;
+
 import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.lang.management.MemoryType;
@@ -59,7 +61,6 @@ import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
 import org.apache.hadoop.hbase.io.util.MemorySizeUtil;
 import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
-import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.hbase.trace.TraceUtil;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CollectionUtils;
@@ -74,11 +75,10 @@ import org.apache.hadoop.hbase.wal.WALKey;
 import org.apache.hadoop.hbase.wal.WALProvider.WriterBase;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.util.StringUtils;
-import org.apache.htrace.core.Span;
 import org.apache.htrace.core.TraceScope;
 import org.apache.yetus.audience.InterfaceAudience;
 
-import com.lmax.disruptor.RingBuffer;
+import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
 
 /**
  * Implementation of {@link WAL} to go against {@link FileSystem}; i.e. keep 
WALs in HDFS. Only one
@@ -696,13 +696,12 @@ public abstract class AbstractFSWAL 
implements WAL {
 }
   }
 
-  protected Span blockOnSync(final SyncFuture syncFuture) throws IOException {
+  protected final void blockOnSync(SyncFuture syncFuture) throws IOException {
 // Now we have published the ringbuffer, halt the current thread until we 
get an answer back.
 try {
   if (syncFuture != null) {
 syncFuture.get(walSyncTimeoutNs);
   }
-  return (syncFuture == null) ? null : syncFuture.getSpan();
 } catch (TimeoutIOException tioe) {
   // SyncFuture reuse by thread, if TimeoutIOException happens, ringbuffer
   // still refer to it, so if this thread use it next time may get a wrong
@@ -792,7 +791,8 @@ public abstract class AbstractFSWAL 
implements WAL {
* Get the backing files associated with this WAL.
* @return may be null if there are no files.
*/
-  protected FileStatus[] getFiles() throws IOException {
+  @VisibleForTesting
+  FileStatus[] getFiles() throws IOException {
 return CommonFSUtils.listStatus(fs, walDir, ourFiles);
   }
 
@@ -862,13 +862,13 @@ public abstract class AbstractFSWAL 
implements WAL {
 sequenceIdAccounting.updateStore(encodedRegionName, familyName, 
sequenceid, onlyIfGreater);
   }
 
-  protected SyncFuture getSyncFuture(long sequence, Span span) {
+  protected final SyncFuture getSyncFuture(long sequence) {
 return CollectionUtils
 .computeIfAbsent(syncFuturesByHandler, Thread.currentThread(), 
SyncFuture::new)
-.reset(sequence, span);
+

hbase git commit: HBASE-19372 Remove the Span object in SyncFuture as it is useless now

2017-11-29 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master 8b32d3792 -> f81ac42aa


HBASE-19372 Remove the Span object in SyncFuture as it is useless now

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f81ac42a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f81ac42a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f81ac42a

Branch: refs/heads/master
Commit: f81ac42aa316ccde9a6b7d5765cf9c3060543c04
Parents: 8b32d37
Author: zhangduo 
Authored: Wed Nov 29 21:07:02 2017 +0800
Committer: Michael Stack 
Committed: Wed Nov 29 09:11:46 2017 -0800

--
 .../hbase/regionserver/wal/AbstractFSWAL.java   | 34 
 .../hbase/regionserver/wal/AsyncFSWAL.java  | 32 +--
 .../hadoop/hbase/regionserver/wal/FSHLog.java   | 42 
 .../hbase/regionserver/wal/SyncFuture.java  | 28 +
 .../hbase/regionserver/wal/TestSyncFuture.java  |  4 +-
 5 files changed, 47 insertions(+), 93 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f81ac42a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
index f7fbd86..64f44cd 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
@@ -21,6 +21,8 @@ import static 
org.apache.hadoop.hbase.shaded.com.google.common.base.Precondition
 import static 
org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions.checkNotNull;
 import static 
org.apache.hadoop.hbase.wal.AbstractFSWALProvider.WAL_FILE_NAME_DELIMITER;
 
+import com.lmax.disruptor.RingBuffer;
+
 import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.lang.management.MemoryType;
@@ -59,7 +61,6 @@ import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
 import org.apache.hadoop.hbase.io.util.MemorySizeUtil;
 import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
-import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.hbase.trace.TraceUtil;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CollectionUtils;
@@ -74,11 +75,10 @@ import org.apache.hadoop.hbase.wal.WALKey;
 import org.apache.hadoop.hbase.wal.WALProvider.WriterBase;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.util.StringUtils;
-import org.apache.htrace.core.Span;
 import org.apache.htrace.core.TraceScope;
 import org.apache.yetus.audience.InterfaceAudience;
 
-import com.lmax.disruptor.RingBuffer;
+import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
 
 /**
  * Implementation of {@link WAL} to go against {@link FileSystem}; i.e. keep 
WALs in HDFS. Only one
@@ -696,13 +696,12 @@ public abstract class AbstractFSWAL 
implements WAL {
 }
   }
 
-  protected Span blockOnSync(final SyncFuture syncFuture) throws IOException {
+  protected final void blockOnSync(SyncFuture syncFuture) throws IOException {
 // Now we have published the ringbuffer, halt the current thread until we 
get an answer back.
 try {
   if (syncFuture != null) {
 syncFuture.get(walSyncTimeoutNs);
   }
-  return (syncFuture == null) ? null : syncFuture.getSpan();
 } catch (TimeoutIOException tioe) {
   // SyncFuture reuse by thread, if TimeoutIOException happens, ringbuffer
   // still refer to it, so if this thread use it next time may get a wrong
@@ -792,7 +791,8 @@ public abstract class AbstractFSWAL 
implements WAL {
* Get the backing files associated with this WAL.
* @return may be null if there are no files.
*/
-  protected FileStatus[] getFiles() throws IOException {
+  @VisibleForTesting
+  FileStatus[] getFiles() throws IOException {
 return CommonFSUtils.listStatus(fs, walDir, ourFiles);
   }
 
@@ -862,13 +862,13 @@ public abstract class AbstractFSWAL 
implements WAL {
 sequenceIdAccounting.updateStore(encodedRegionName, familyName, 
sequenceid, onlyIfGreater);
   }
 
-  protected SyncFuture getSyncFuture(long sequence, Span span) {
+  protected final SyncFuture getSyncFuture(long sequence) {
 return CollectionUtils
 .computeIfAbsent(syncFuturesByHandler, Thread.currentThread(), 
SyncFuture::new)
-.reset(sequence, span);
+.reset(sequence);
  

hbase git commit: HBASE-19362 Remove unused imports from hbase-thrift module

2017-11-29 Thread janh
Repository: hbase
Updated Branches:
  refs/heads/branch-2 64ddce303 -> 6f7d2afcd


HBASE-19362 Remove unused imports from hbase-thrift module

Signed-off-by: Jan Hentschel 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6f7d2afc
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6f7d2afc
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6f7d2afc

Branch: refs/heads/branch-2
Commit: 6f7d2afcdd50a1b9f63cc7f03d1deb756307e521
Parents: 64ddce3
Author: Guangxu Cheng 
Authored: Wed Nov 29 14:43:17 2017 +0800
Committer: Jan Hentschel 
Committed: Wed Nov 29 16:32:16 2017 +0100

--
 .../org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java| 1 -
 .../src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java | 1 -
 .../src/test/java/org/apache/hadoop/hbase/thrift/TestCallQueue.java | 1 -
 3 files changed, 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6f7d2afc/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java
index 6bcd181..59825b1 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java
@@ -29,7 +29,6 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.thrift.CallQueue.Call;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.thrift.TException;
 import org.apache.thrift.TProcessor;

http://git-wip-us.apache.org/repos/asf/hbase/blob/6f7d2afc/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java
index c590370..000c115 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java
@@ -57,7 +57,6 @@ import org.apache.hadoop.hbase.security.SaslUtil;
 import org.apache.hadoop.hbase.security.SecurityUtil;
 import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.thrift.CallQueue;
-import org.apache.hadoop.hbase.thrift.CallQueue.Call;
 import org.apache.hadoop.hbase.thrift.THBaseThreadPoolExecutor;
 import org.apache.hadoop.hbase.thrift.ThriftMetrics;
 import org.apache.hadoop.hbase.thrift2.generated.THBaseService;

http://git-wip-us.apache.org/repos/asf/hbase/blob/6f7d2afc/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestCallQueue.java
--
diff --git 
a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestCallQueue.java 
b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestCallQueue.java
index e595847..ed9ca6b 100644
--- 
a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestCallQueue.java
+++ 
b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestCallQueue.java
@@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.test.MetricsAssertHelper;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
-import org.apache.hadoop.hbase.thrift.CallQueue.Call;
 import org.junit.experimental.categories.Category;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;



hbase-site git commit: INFRA-10751 Empty commit

2017-11-29 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site fd365a2bc -> 51b7ea776


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/51b7ea77
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/51b7ea77
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/51b7ea77

Branch: refs/heads/asf-site
Commit: 51b7ea776f30bf4f41f7b73668bd5604c1541193
Parents: fd365a2
Author: jenkins 
Authored: Wed Nov 29 15:17:59 2017 +
Committer: jenkins 
Committed: Wed Nov 29 15:17:59 2017 +

--

--




[04/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.html
index d438f22..7c59e27 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.html
@@ -1290,8 +1290,8 @@
 1282   CompactType 
compactType) throws IOException {
 1283switch (compactType) {
 1284  case MOB:
-1285
compact(this.connection.getAdminForMaster(), getMobRegionInfo(tableName), 
major,
-1286  columnFamily);
+1285
compact(this.connection.getAdminForMaster(), 
RegionInfo.createMobRegionInfo(tableName),
+1286major, columnFamily);
 1287break;
 1288  case NORMAL:
 1289checkTableExists(tableName);
@@ -3248,7 +3248,7 @@
 3240  new 
CallableAdminProtos.GetRegionInfoResponse.CompactionState() {
 3241@Override
 3242public 
AdminProtos.GetRegionInfoResponse.CompactionState call() throws Exception {
-3243  RegionInfo info = 
getMobRegionInfo(tableName);
+3243  RegionInfo info = 
RegionInfo.createMobRegionInfo(tableName);
 3244  GetRegionInfoRequest 
request =
 3245
RequestConverter.buildGetRegionInfoRequest(info.getRegionName(), true);
 3246  GetRegionInfoResponse 
response = masterAdmin.getRegionInfo(rpcController, request);
@@ -3312,7 +3312,7 @@
 3304}
 3305break;
 3306  default:
-3307throw new 
IllegalArgumentException("Unknowne compactType: " + compactType);
+3307throw new 
IllegalArgumentException("Unknown compactType: " + compactType);
 3308}
 3309if (state != null) {
 3310  return 
ProtobufUtil.createCompactionState(state);
@@ -3847,325 +3847,320 @@
 3839});
 3840  }
 3841
-3842  private RegionInfo 
getMobRegionInfo(TableName tableName) {
-3843return 
RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes(".mob")).setRegionId(0)
-3844.build();
-3845  }
-3846
-3847  private RpcControllerFactory 
getRpcControllerFactory() {
-3848return this.rpcControllerFactory;
-3849  }
-3850
-3851  @Override
-3852  public void addReplicationPeer(String 
peerId, ReplicationPeerConfig peerConfig, boolean enabled)
-3853  throws IOException {
-3854executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3855  @Override
-3856  protected Void rpcCall() throws 
Exception {
-3857
master.addReplicationPeer(getRpcController(),
-3858  
RequestConverter.buildAddReplicationPeerRequest(peerId, peerConfig, 
enabled));
-3859return null;
-3860  }
-3861});
-3862  }
-3863
-3864  @Override
-3865  public void 
removeReplicationPeer(String peerId) throws IOException {
-3866executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3867  @Override
-3868  protected Void rpcCall() throws 
Exception {
-3869
master.removeReplicationPeer(getRpcController(),
-3870  
RequestConverter.buildRemoveReplicationPeerRequest(peerId));
-3871return null;
-3872  }
-3873});
-3874  }
-3875
-3876  @Override
-3877  public void 
enableReplicationPeer(final String peerId) throws IOException {
-3878executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3879  @Override
-3880  protected Void rpcCall() throws 
Exception {
-3881
master.enableReplicationPeer(getRpcController(),
-3882  
RequestConverter.buildEnableReplicationPeerRequest(peerId));
-3883return null;
-3884  }
-3885});
-3886  }
-3887
-3888  @Override
-3889  public void 
disableReplicationPeer(final String peerId) throws IOException {
-3890executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3891  @Override
-3892  protected Void rpcCall() throws 
Exception {
-3893
master.disableReplicationPeer(getRpcController(),
-3894  
RequestConverter.buildDisableReplicationPeerRequest(peerId));
-3895return null;
-3896  }
-3897});
-3898  }
-3899
-3900  @Override
-3901  public ReplicationPeerConfig 
getReplicationPeerConfig(final String peerId) throws IOException {
-3902return executeCallable(new 
MasterCallableReplicationPeerConfig(getConnection(),
-3903getRpcControllerFactory()) {
-3904  @Override
-3905  protected ReplicationPeerConfig 
rpcCall() throws Exception {
-3906GetReplicationPeerConfigResponse 
response = master.getReplicationPeerConfig(
-3907  getRpcController(), 

[49/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/apidocs/index-all.html
--
diff --git a/apidocs/index-all.html b/apidocs/index-all.html
index 51c2573..8ba05c3 100644
--- a/apidocs/index-all.html
+++ b/apidocs/index-all.html
@@ -732,7 +732,7 @@
 
 batch(List?
 extends Row) - Method in interface 
org.apache.hadoop.hbase.client.AsyncTable
 
-Method that does a batch call on Deletes, Gets, Puts, 
Increments and Appends.
+Method that does a batch call on Deletes, Gets, Puts, 
Increments, Appends and RowMutations.
 
 batch(List?
 extends Row, Object[]) - Method in interface 
org.apache.hadoop.hbase.client.Table
 
@@ -1251,7 +1251,9 @@
 checkAndDelete(byte[],
 byte[], byte[], byte[], Delete) - Method in class 
org.apache.hadoop.hbase.rest.client.RemoteHTable
 
 checkAndDelete(byte[],
 byte[], byte[], CompareFilter.CompareOp, byte[], Delete) - Method 
in class org.apache.hadoop.hbase.rest.client.RemoteHTable
-
+
+Deprecated.
+
 checkAndDelete(byte[],
 byte[], byte[], CompareOperator, byte[], Delete) - Method in class 
org.apache.hadoop.hbase.rest.client.RemoteHTable
 
 checkAndMutate(byte[],
 byte[]) - Method in interface org.apache.hadoop.hbase.client.AsyncTable
@@ -1270,7 +1272,9 @@
 Atomically checks if a row/family/qualifier value matches 
the expected value.
 
 checkAndMutate(byte[],
 byte[], byte[], CompareFilter.CompareOp, byte[], RowMutations) - 
Method in class org.apache.hadoop.hbase.rest.client.RemoteHTable
-
+
+Deprecated.
+
 checkAndMutate(byte[],
 byte[], byte[], CompareOperator, byte[], RowMutations) - Method in 
class org.apache.hadoop.hbase.rest.client.RemoteHTable
 
 checkAndPut(byte[],
 byte[], byte[], byte[], Put) - Method in interface 
org.apache.hadoop.hbase.client.Table
@@ -1293,7 +1297,9 @@
 checkAndPut(byte[],
 byte[], byte[], byte[], Put) - Method in class 
org.apache.hadoop.hbase.rest.client.RemoteHTable
 
 checkAndPut(byte[],
 byte[], byte[], CompareFilter.CompareOp, byte[], Put) - Method in 
class org.apache.hadoop.hbase.rest.client.RemoteHTable
-
+
+Deprecated.
+
 checkAndPut(byte[],
 byte[], byte[], CompareOperator, byte[], Put) - Method in class 
org.apache.hadoop.hbase.rest.client.RemoteHTable
 
 checkForAnd(byte[],
 int) - Static method in class org.apache.hadoop.hbase.filter.ParseFilter
@@ -1717,6 +1723,14 @@
 
 Compact a column family within a table.
 
+compact(TableName,
 CompactType) - Method in interface 
org.apache.hadoop.hbase.client.AsyncAdmin
+
+Compact a table.
+
+compact(TableName,
 byte[], CompactType) - Method in interface 
org.apache.hadoop.hbase.client.AsyncAdmin
+
+Compact a column family within a table.
+
 COMPACTION_ENABLED
 - Static variable in class org.apache.hadoop.hbase.client.TableDescriptorBuilder
 
 Used by HBase Shell interface to access this metadata
@@ -2772,6 +2786,10 @@
 
 Create a max byte array with the specified max byte 
count
 
+createMobRegionInfo(TableName)
 - Static method in interface org.apache.hadoop.hbase.client.RegionInfo
+
+Creates a RegionInfo object for MOB data.
+
 createNamespace(NamespaceDescriptor)
 - Method in interface org.apache.hadoop.hbase.client.Admin
 
 Create a new namespace.
@@ -6080,6 +6098,10 @@
 
 Get the current compaction state of a table.
 
+getCompactionState(TableName,
 CompactType) - Method in interface 
org.apache.hadoop.hbase.client.AsyncAdmin
+
+Get the current compaction state of a table.
+
 getCompactionStateForRegion(byte[])
 - Method in interface org.apache.hadoop.hbase.client.Admin
 
 Get the current compaction state of region.
@@ -8182,7 +8204,9 @@
 
 
 getTableDescriptor()
 - Method in class org.apache.hadoop.hbase.rest.client.RemoteHTable
-
+
+Deprecated.
+
 getTableDescriptors(ListString)
 - Method in interface org.apache.hadoop.hbase.client.Admin
 
 Deprecated.
@@ -11181,6 +11205,14 @@
 
 Major compact a column family within a table.
 
+majorCompact(TableName,
 CompactType) - Method in interface 
org.apache.hadoop.hbase.client.AsyncAdmin
+
+Major compact a table.
+
+majorCompact(TableName,
 byte[], CompactType) - Method in interface 
org.apache.hadoop.hbase.client.AsyncAdmin
+
+Major compact a column family within a table.
+
 majorCompactRegion(byte[])
 - Method in interface org.apache.hadoop.hbase.client.Admin
 
 Major compact a table or an individual region.
@@ -15730,7 +15762,9 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 Set timeout for a whole operation such as get, put or 
delete.
 
 setOperationTimeout(int)
 - Method in class org.apache.hadoop.hbase.rest.client.RemoteHTable
-
+
+Deprecated.
+
 setOwner(User)
 - Method in class org.apache.hadoop.hbase.client.TableDescriptorBuilder
 
 Deprecated.
@@ -15820,7 +15854,9 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 Set timeout for each read(get, scan) rpc request.
 
 setReadRpcTimeout(int)
 - Method in class org.apache.hadoop.hbase.rest.client.RemoteHTable
-
+
+Deprecated.
+
 

[35/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
index 8cbd00e..8eee002 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":6,"i1":6,"i2":18,"i3":6,"i4":6,"i5":6,"i6":18,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":6,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":6,"i26":6,"i27":6,"i28":6,"i29":6,"i30":6,"i31":6,"i32":6,"i33":6,"i34":6,"i35":6,"i36":6,"i37":6,"i38":6,"i39":6,"i40":6,"i41":6,"i42":6,"i43":6,"i44":18,"i45":6,"i46":6,"i47":6,"i48":6,"i49":6,"i50":6,"i51":6,"i52":6,"i53":18,"i54":18,"i55":18,"i56":6,"i57":6,"i58":6,"i59":6,"i60":6,"i61":6,"i62":6,"i63":18,"i64":6,"i65":6,"i66":6,"i67":6,"i68":6,"i69":6,"i70":6,"i71":6,"i72":6,"i73":6,"i74":6,"i75":6,"i76":6,"i77":6,"i78":6,"i79":18,"i80":6,"i81":6,"i82":6,"i83":6,"i84":6,"i85":6,"i86":6,"i87":18,"i88":6,"i89":6,"i90":6,"i91":18,"i92":6,"i93":6,"i94":6,"i95":6,"i96":6,"i97":6,"i98":6,"i99":6,"i100":6,"i101":6,"i102":6,"i103":6,"i104":6,"i105":6,"i106":6,"i107":6,"i108":6,"i109":6,"i110":6,"i111":6,"i112":6,"i113":6,"i114":6,"i115":6,"i116":6,"i117":6,"i118":6,"i119":6,"i
 
120":6,"i121":6,"i122":6,"i123":18,"i124":18,"i125":6,"i126":6,"i127":6,"i128":6,"i129":6,"i130":6,"i131":6,"i132":6,"i133":6,"i134":6,"i135":6,"i136":6,"i137":6};
+var methods = 
{"i0":6,"i1":6,"i2":18,"i3":6,"i4":6,"i5":6,"i6":18,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":18,"i15":18,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":6,"i26":6,"i27":6,"i28":6,"i29":6,"i30":6,"i31":6,"i32":6,"i33":6,"i34":6,"i35":6,"i36":6,"i37":6,"i38":6,"i39":6,"i40":6,"i41":6,"i42":6,"i43":6,"i44":6,"i45":6,"i46":18,"i47":6,"i48":6,"i49":18,"i50":6,"i51":6,"i52":6,"i53":6,"i54":6,"i55":6,"i56":18,"i57":18,"i58":18,"i59":6,"i60":6,"i61":6,"i62":6,"i63":6,"i64":6,"i65":6,"i66":18,"i67":6,"i68":6,"i69":6,"i70":6,"i71":6,"i72":6,"i73":6,"i74":6,"i75":6,"i76":6,"i77":6,"i78":6,"i79":6,"i80":6,"i81":6,"i82":18,"i83":6,"i84":6,"i85":6,"i86":6,"i87":6,"i88":6,"i89":6,"i90":18,"i91":6,"i92":6,"i93":6,"i94":18,"i95":6,"i96":6,"i97":6,"i98":6,"i99":6,"i100":18,"i101":18,"i102":6,"i103":6,"i104":6,"i105":6,"i106":6,"i107":6,"i108":6,"i109":6,"i110":6,"i111":6,"i112":6,"i113":6,"i114":6,"i115":6,"i116":6,"i117":6,"i118":6,"i119"
 
:6,"i120":6,"i121":6,"i122":6,"i123":6,"i124":6,"i125":6,"i126":6,"i127":6,"i128":18,"i129":18,"i130":6,"i131":6,"i132":6,"i133":6,"i134":6,"i135":6,"i136":6,"i137":6,"i138":6,"i139":6,"i140":6,"i141":6,"i142":6};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"],16:["t5","Default Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -221,13 +221,13 @@ public interface 
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
+default http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
 compact(TableNametableName)
 Compact a table.
 
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
+default http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
 compact(TableNametableName,
byte[]columnFamily)
 Compact a column family within a table.
@@ -235,31 +235,46 @@ public interface 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
+compact(TableNametableName,
+   byte[]columnFamily,
+   CompactTypecompactType)
+Compact a column family within a table.
+
+
+

[51/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
Published site at .


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/fd365a2b
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/fd365a2b
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/fd365a2b

Branch: refs/heads/asf-site
Commit: fd365a2bc9e84209b92dafbedad1a982841843fb
Parents: 83b248d
Author: jenkins 
Authored: Wed Nov 29 15:17:22 2017 +
Committer: jenkins 
Committed: Wed Nov 29 15:17:22 2017 +

--
 acid-semantics.html | 4 +-
 apache_hbase_reference_guide.pdf| 6 +-
 apidocs/deprecated-list.html|   347 +-
 apidocs/index-all.html  |54 +-
 .../org/apache/hadoop/hbase/HRegionInfo.html| 2 +-
 .../hbase/class-use/HTableDescriptor.html   | 4 +-
 .../hadoop/hbase/class-use/TableName.html   |   115 +-
 .../org/apache/hadoop/hbase/client/Admin.html   |   484 +-
 .../apache/hadoop/hbase/client/AsyncAdmin.html  |   597 +-
 .../apache/hadoop/hbase/client/AsyncTable.html  |14 +-
 .../apache/hadoop/hbase/client/RegionInfo.html  |90 +-
 .../hbase/client/class-use/CompactType.html |37 +
 .../hbase/client/class-use/CompactionState.html | 9 +-
 .../hadoop/hbase/client/class-use/Delete.html   | 4 +-
 .../hadoop/hbase/client/class-use/Put.html  | 4 +-
 .../hbase/client/class-use/RegionInfo.html  |14 +-
 .../hadoop/hbase/client/class-use/Row.html  | 2 +-
 .../hbase/client/class-use/RowMutations.html| 4 +-
 .../class-use/CompareFilter.CompareOp.html  |12 +-
 .../hadoop/hbase/rest/client/RemoteHTable.html  |   169 +-
 .../org/apache/hadoop/hbase/client/Admin.html   |  2898 +-
 .../apache/hadoop/hbase/client/AsyncAdmin.html  |  1723 +-
 .../client/AsyncTable.CoprocessorCallback.html  |12 +-
 .../apache/hadoop/hbase/client/AsyncTable.html  |12 +-
 .../apache/hadoop/hbase/client/RegionInfo.html  |   333 +-
 .../hadoop/hbase/rest/client/RemoteHTable.html  |  1347 +-
 book.html   | 2 +-
 bulk-loads.html | 4 +-
 checkstyle-aggregate.html   | 30256 -
 checkstyle.rss  |22 +-
 coc.html| 4 +-
 cygwin.html | 4 +-
 dependencies.html   | 4 +-
 dependency-convergence.html | 4 +-
 dependency-info.html| 4 +-
 dependency-management.html  | 4 +-
 devapidocs/constant-values.html | 6 +-
 devapidocs/deprecated-list.html |91 +-
 devapidocs/index-all.html   |   100 +-
 .../org/apache/hadoop/hbase/HRegionInfo.html| 2 +-
 .../hadoop/hbase/backup/package-tree.html   | 4 +-
 .../hadoop/hbase/class-use/CellScannable.html   | 5 +-
 .../hbase/class-use/HTableDescriptor.html   | 6 +-
 .../hadoop/hbase/class-use/TableName.html   |   601 +-
 .../org/apache/hadoop/hbase/client/Admin.html   |   484 +-
 .../apache/hadoop/hbase/client/AsyncAdmin.html  |   597 +-
 .../client/AsyncBatchRpcRetryingCaller.html |24 +-
 .../hadoop/hbase/client/AsyncHBaseAdmin.html|   266 +-
 .../apache/hadoop/hbase/client/AsyncTable.html  |14 +-
 .../hadoop/hbase/client/AsyncTableImpl.html |12 +-
 .../apache/hadoop/hbase/client/HBaseAdmin.html  |   325 +-
 .../org/apache/hadoop/hbase/client/HTable.html  |   182 +-
 .../hbase/client/ImmutableHRegionInfo.html  | 2 +-
 .../hbase/client/MultiServerCallable.html   |24 +-
 ...dmin.AddColumnFamilyProcedureBiConsumer.html | 6 +-
 ...dmin.CreateNamespaceProcedureBiConsumer.html | 6 +-
 ...aseAdmin.CreateTableProcedureBiConsumer.html | 6 +-
 ...n.DeleteColumnFamilyProcedureBiConsumer.html | 6 +-
 ...dmin.DeleteNamespaceProcedureBiConsumer.html | 6 +-
 ...aseAdmin.DeleteTableProcedureBiConsumer.html | 8 +-
 ...seAdmin.DisableTableProcedureBiConsumer.html | 6 +-
 ...aseAdmin.EnableTableProcedureBiConsumer.html | 6 +-
 ...min.MergeTableRegionProcedureBiConsumer.html | 6 +-
 ...n.ModifyColumnFamilyProcedureBiConsumer.html | 6 +-
 ...dmin.ModifyNamespaceProcedureBiConsumer.html | 6 +-
 ...aseAdmin.ModifyTableProcedureBiConsumer.html | 6 +-
 ...HBaseAdmin.NamespaceProcedureBiConsumer.html |14 +-
 .../RawAsyncHBaseAdmin.ProcedureBiConsumer.html |10 +-
 ...min.SplitTableRegionProcedureBiConsumer.html | 6 +-
 ...syncHBaseAdmin.TableProcedureBiConsumer.html |14 +-
 ...eAdmin.TruncateTableProcedureBiConsumer.html | 6 +-
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.html |   342 +-
 

[48/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/apidocs/org/apache/hadoop/hbase/client/Admin.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/client/Admin.html 
b/apidocs/org/apache/hadoop/hbase/client/Admin.html
index a0091b9..ede7015 100644
--- a/apidocs/org/apache/hadoop/hbase/client/Admin.html
+++ b/apidocs/org/apache/hadoop/hbase/client/Admin.html
@@ -3193,13 +3193,61 @@ void
+
+
+
+
+compact
+voidcompact(TableNametableName,
+ CompactTypecompactType)
+  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException,
+ http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
 title="class or interface in java.lang">InterruptedException
+Compact a table.  Asynchronous operation in that this 
method requests that a
+ Compaction run and then it returns. It does not wait on the completion of 
Compaction
+ (it can take a while).
+
+Parameters:
+tableName - table to compact
+compactType - CompactType
+Throws:
+http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException - if a remote or 
network exception occurs
+http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
 title="class or interface in java.lang">InterruptedException
+
+
+
+
+
+
+
+
+compact
+voidcompact(TableNametableName,
+ byte[]columnFamily,
+ CompactTypecompactType)
+  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException,
+ http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
 title="class or interface in java.lang">InterruptedException
+Compact a column family within a table.  Asynchronous 
operation in that this method
+ requests that a Compaction run and then it returns. It does not wait on the
+ completion of Compaction (it can take a while).
+
+Parameters:
+tableName - table to compact
+columnFamily - column family within a table
+compactType - CompactType
+Throws:
+http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException - if not a mob 
column family or if a remote or network exception occurs
+http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
 title="class or interface in java.lang">InterruptedException
+
+
+
 
 
 
 
 
 majorCompact
-voidmajorCompact(TableNametableName)
+voidmajorCompact(TableNametableName)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Major compact a table. Asynchronous operation in that this 
method requests
  that a Compaction run and then it returns. It does not wait on the completion 
of Compaction
@@ -3218,7 +3266,7 @@ void
 
 majorCompactRegion
-voidmajorCompactRegion(byte[]regionName)
+voidmajorCompactRegion(byte[]regionName)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Major compact a table or an individual region. Asynchronous 
operation in that this method requests
  that a Compaction run and then it returns. It does not wait on the completion 
of Compaction
@@ -3237,7 +3285,7 @@ void
 
 majorCompact
-voidmajorCompact(TableNametableName,
+voidmajorCompact(TableNametableName,
   byte[]columnFamily)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Major compact a column family within a table. Asynchronous 
operation in that this method requests
@@ -3258,7 +3306,7 @@ void
 
 majorCompactRegion
-voidmajorCompactRegion(byte[]regionName,
+voidmajorCompactRegion(byte[]regionName,
 byte[]columnFamily)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Major compact a column family within region. Asynchronous 
operation in that this method requests
@@ -3273,6 +3321,54 @@ void
+
+
+
+
+majorCompact
+voidmajorCompact(TableNametableName,
+  CompactTypecompactType)
+   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException,
+  http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
 title="class or interface in java.lang">InterruptedException
+Major compact a table.  Asynchronous operation in that this 
method requests that a
+ Compaction run and then it 

[38/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/checkstyle.rss
--
diff --git a/checkstyle.rss b/checkstyle.rss
index 63c077c..3283c25 100644
--- a/checkstyle.rss
+++ b/checkstyle.rss
@@ -26,7 +26,7 @@ under the License.
 2007 - 2017 The Apache Software Foundation
 
   File: 3437,
- Errors: 20812,
+ Errors: 20803,
  Warnings: 0,
  Infos: 0
   
@@ -3765,7 +3765,7 @@ under the License.
   0
 
 
-  4
+  3
 
   
   
@@ -9477,7 +9477,7 @@ under the License.
   0
 
 
-  15
+  22
 
   
   
@@ -13957,7 +13957,7 @@ under the License.
   0
 
 
-  112
+  118
 
   
   
@@ -22063,7 +22063,7 @@ under the License.
   0
 
 
-  102
+  99
 
   
   
@@ -29987,7 +29987,7 @@ under the License.
   0
 
 
-  115
+  102
 
   
   
@@ -33291,7 +33291,7 @@ under the License.
   0
 
 
-  13
+  12
 
   
   
@@ -39787,7 +39787,7 @@ under the License.
   0
 
 
-  3
+  2
 
   
   
@@ -41341,7 +41341,7 @@ under the License.
   0
 
 
-  6
+  3
 
   
   
@@ -46731,7 +46731,7 @@ under the License.
   0
 
 
-  63
+  67
 
   
   
@@ -47739,7 +47739,7 @@ under the License.
   0
 
 
-  118
+  114
 
   
   

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/coc.html
--
diff --git a/coc.html b/coc.html
index 10ae93f..e6a6b80 100644
--- a/coc.html
+++ b/coc.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  
   Code of Conduct Policy
@@ -380,7 +380,7 @@ email to mailto:priv...@hbase.apache.org;>the priv
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-11-28
+  Last Published: 
2017-11-29
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/cygwin.html
--
diff --git a/cygwin.html b/cygwin.html
index 9c1b8f7..ec8fe38 100644
--- a/cygwin.html
+++ b/cygwin.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Installing Apache HBase (TM) on Windows using 
Cygwin
 
@@ -679,7 +679,7 @@ Now your HBase server is running, start 
coding and build that next
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-11-28
+  Last Published: 
2017-11-29
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/dependencies.html
--
diff --git a/dependencies.html b/dependencies.html
index 0af8122..a2a1ae9 100644
--- a/dependencies.html
+++ b/dependencies.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Dependencies
 
@@ -445,7 +445,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-11-28
+  Last Published: 
2017-11-29
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/dependency-convergence.html
--
diff --git a/dependency-convergence.html b/dependency-convergence.html
index 0186244..6ba4c49 100644
--- a/dependency-convergence.html
+++ b/dependency-convergence.html
@@ -7,7 +7,7 @@
   

[12/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteTableFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteTableFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteTableFuture.html
index d438f22..7c59e27 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteTableFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteTableFuture.html
@@ -1290,8 +1290,8 @@
 1282   CompactType 
compactType) throws IOException {
 1283switch (compactType) {
 1284  case MOB:
-1285
compact(this.connection.getAdminForMaster(), getMobRegionInfo(tableName), 
major,
-1286  columnFamily);
+1285
compact(this.connection.getAdminForMaster(), 
RegionInfo.createMobRegionInfo(tableName),
+1286major, columnFamily);
 1287break;
 1288  case NORMAL:
 1289checkTableExists(tableName);
@@ -3248,7 +3248,7 @@
 3240  new 
CallableAdminProtos.GetRegionInfoResponse.CompactionState() {
 3241@Override
 3242public 
AdminProtos.GetRegionInfoResponse.CompactionState call() throws Exception {
-3243  RegionInfo info = 
getMobRegionInfo(tableName);
+3243  RegionInfo info = 
RegionInfo.createMobRegionInfo(tableName);
 3244  GetRegionInfoRequest 
request =
 3245
RequestConverter.buildGetRegionInfoRequest(info.getRegionName(), true);
 3246  GetRegionInfoResponse 
response = masterAdmin.getRegionInfo(rpcController, request);
@@ -3312,7 +3312,7 @@
 3304}
 3305break;
 3306  default:
-3307throw new 
IllegalArgumentException("Unknowne compactType: " + compactType);
+3307throw new 
IllegalArgumentException("Unknown compactType: " + compactType);
 3308}
 3309if (state != null) {
 3310  return 
ProtobufUtil.createCompactionState(state);
@@ -3847,325 +3847,320 @@
 3839});
 3840  }
 3841
-3842  private RegionInfo 
getMobRegionInfo(TableName tableName) {
-3843return 
RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes(".mob")).setRegionId(0)
-3844.build();
-3845  }
-3846
-3847  private RpcControllerFactory 
getRpcControllerFactory() {
-3848return this.rpcControllerFactory;
-3849  }
-3850
-3851  @Override
-3852  public void addReplicationPeer(String 
peerId, ReplicationPeerConfig peerConfig, boolean enabled)
-3853  throws IOException {
-3854executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3855  @Override
-3856  protected Void rpcCall() throws 
Exception {
-3857
master.addReplicationPeer(getRpcController(),
-3858  
RequestConverter.buildAddReplicationPeerRequest(peerId, peerConfig, 
enabled));
-3859return null;
-3860  }
-3861});
-3862  }
-3863
-3864  @Override
-3865  public void 
removeReplicationPeer(String peerId) throws IOException {
-3866executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3867  @Override
-3868  protected Void rpcCall() throws 
Exception {
-3869
master.removeReplicationPeer(getRpcController(),
-3870  
RequestConverter.buildRemoveReplicationPeerRequest(peerId));
-3871return null;
-3872  }
-3873});
-3874  }
-3875
-3876  @Override
-3877  public void 
enableReplicationPeer(final String peerId) throws IOException {
-3878executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3879  @Override
-3880  protected Void rpcCall() throws 
Exception {
-3881
master.enableReplicationPeer(getRpcController(),
-3882  
RequestConverter.buildEnableReplicationPeerRequest(peerId));
-3883return null;
-3884  }
-3885});
-3886  }
-3887
-3888  @Override
-3889  public void 
disableReplicationPeer(final String peerId) throws IOException {
-3890executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3891  @Override
-3892  protected Void rpcCall() throws 
Exception {
-3893
master.disableReplicationPeer(getRpcController(),
-3894  
RequestConverter.buildDisableReplicationPeerRequest(peerId));
-3895return null;
-3896  }
-3897});
-3898  }
-3899
-3900  @Override
-3901  public ReplicationPeerConfig 
getReplicationPeerConfig(final String peerId) throws IOException {
-3902return executeCallable(new 
MasterCallableReplicationPeerConfig(getConnection(),
-3903getRpcControllerFactory()) {
-3904  @Override
-3905  protected ReplicationPeerConfig 
rpcCall() throws Exception {
-3906GetReplicationPeerConfigResponse 
response = master.getReplicationPeerConfig(
-3907  getRpcController(), 

[25/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
index fe914c5..7f8dbfb 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
@@ -209,8 +209,8 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.master.procedure.TableProcedureInterface.TableOperationType
 org.apache.hadoop.hbase.master.procedure.ServerProcedureInterface.ServerOperationType
+org.apache.hadoop.hbase.master.procedure.TableProcedureInterface.TableOperationType
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/org/apache/hadoop/hbase/monitoring/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/monitoring/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/monitoring/package-tree.html
index d264c19..c43c370 100644
--- a/devapidocs/org/apache/hadoop/hbase/monitoring/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/monitoring/package-tree.html
@@ -125,8 +125,8 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.monitoring.MonitoredTask.State
 org.apache.hadoop.hbase.monitoring.TaskMonitor.TaskFilter.TaskType
+org.apache.hadoop.hbase.monitoring.MonitoredTask.State
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/org/apache/hadoop/hbase/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/package-tree.html
index a5af6b5..08a4db2 100644
--- a/devapidocs/org/apache/hadoop/hbase/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/package-tree.html
@@ -439,18 +439,18 @@
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
 org.apache.hadoop.hbase.CellBuilder.DataType
+org.apache.hadoop.hbase.CompatibilitySingletonFactory.SingletonStorage
+org.apache.hadoop.hbase.ClusterStatus.Option
 org.apache.hadoop.hbase.HConstants.OperationStatusCode
+org.apache.hadoop.hbase.CellBuilderType
+org.apache.hadoop.hbase.HealthChecker.HealthCheckerExitStatus
 org.apache.hadoop.hbase.Coprocessor.State
-org.apache.hadoop.hbase.CompareOperator
-org.apache.hadoop.hbase.MemoryCompactionPolicy
 org.apache.hadoop.hbase.MetaTableAccessor.QueryType
-org.apache.hadoop.hbase.CompatibilitySingletonFactory.SingletonStorage
-org.apache.hadoop.hbase.HealthChecker.HealthCheckerExitStatus
+org.apache.hadoop.hbase.KeyValue.Type
+org.apache.hadoop.hbase.MemoryCompactionPolicy
 org.apache.hadoop.hbase.KeepDeletedCells
 org.apache.hadoop.hbase.ProcedureState
-org.apache.hadoop.hbase.KeyValue.Type
-org.apache.hadoop.hbase.CellBuilderType
-org.apache.hadoop.hbase.ClusterStatus.Option
+org.apache.hadoop.hbase.CompareOperator
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
index 423856b..46d0516 100644
--- a/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
@@ -204,10 +204,10 @@
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or 

[45/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/apidocs/org/apache/hadoop/hbase/rest/client/RemoteHTable.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/rest/client/RemoteHTable.html 
b/apidocs/org/apache/hadoop/hbase/rest/client/RemoteHTable.html
index 0f17e40..e19d46c 100644
--- a/apidocs/org/apache/hadoop/hbase/rest/client/RemoteHTable.html
+++ b/apidocs/org/apache/hadoop/hbase/rest/client/RemoteHTable.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":42,"i32":10,"i33":42,"i34":10,"i35":42,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":42,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":42,"i54":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":42,"i11":10,"i12":42,"i13":10,"i14":10,"i15":42,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":42,"i32":10,"i33":42,"i34":10,"i35":42,"i36":10,"i37":10,"i38":10,"i39":10,"i40":42,"i41":10,"i42":42,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":42,"i52":42,"i53":42,"i54":42};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -256,8 +256,7 @@ implements CompareFilter.CompareOpcompareOp,
   byte[]value,
   Deletedelete)
-Atomically checks if a row/family/qualifier value matches 
the expected
- value.
+Deprecated.
 
 
 
@@ -280,7 +279,7 @@ implements CompareFilter.CompareOpcompareOp,
   byte[]value,
   RowMutationsrm)
-Atomically checks if a row/family/qualifier value matches 
the expected value.
+Deprecated.
 
 
 
@@ -313,8 +312,7 @@ implements CompareFilter.CompareOpcompareOp,
byte[]value,
Putput)
-Atomically checks if a row/family/qualifier value matches 
the expected
- value.
+Deprecated.
 
 
 
@@ -484,7 +482,7 @@ implements 
 HTableDescriptor
 getTableDescriptor()
-Gets the table descriptor for 
this table.
+Deprecated.
 
 
 
@@ -553,15 +551,13 @@ implements 
 void
 setOperationTimeout(intoperationTimeout)
-Set timeout (millisecond) of each operation in this Table 
instance, will override the value
- of hbase.client.operation.timeout in configuration.
+Deprecated.
 
 
 
 void
 setReadRpcTimeout(intreadRpcTimeout)
-Set timeout (millisecond) of each rpc read request in 
operations of this Table instance, will
- override the value of hbase.rpc.read.timeout in configuration.
+Deprecated.
 
 
 
@@ -573,8 +569,7 @@ implements 
 void
 setWriteRpcTimeout(intwriteRpcTimeout)
-Set timeout (millisecond) of each rpc write request in 
operations of this Table instance, will
- override the value of hbase.rpc.write.timeout in configuration.
+Deprecated.
 
 
 
@@ -738,8 +733,10 @@ implements 
 
 getTableDescriptor
-publicHTableDescriptorgetTableDescriptor()
-throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
+http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true;
 title="class or interface in java.lang">@Deprecated
+publicHTableDescriptorgetTableDescriptor()
+throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
+Deprecated.
 Description copied from 
interface:Table
 Gets the table descriptor for 
this table.
 
@@ -756,7 +753,7 @@ implements 
 
 close
-publicvoidclose()
+publicvoidclose()
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Description copied from 
interface:Table
 Releases any resources held or pending changes in internal 
buffers.
@@ -778,7 +775,7 @@ implements 
 
 get
-publicResultget(Getget)
+publicResultget(Getget)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Description copied from 
interface:Table
 Extracts certain cells from a given row.
@@ -802,7 +799,7 @@ implements 
 
 get
-publicResult[]get(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListGetgets)
+publicResult[]get(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 

[39/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index 8343584..ae49b24 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Checkstyle Results
 
@@ -289,7 +289,7 @@
 3437
 0
 0
-20812
+20803
 
 Files
 
@@ -1522,7 +1522,7 @@
 org/apache/hadoop/hbase/client/Admin.java
 0
 0
-102
+99
 
 org/apache/hadoop/hbase/client/Append.java
 0
@@ -1797,7 +1797,7 @@
 org/apache/hadoop/hbase/client/HTable.java
 0
 0
-63
+67
 
 org/apache/hadoop/hbase/client/HTableMultiplexer.java
 0
@@ -1912,7 +1912,7 @@
 org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
 0
 0
-115
+102
 
 org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
 0
@@ -2232,7 +2232,7 @@
 org/apache/hadoop/hbase/client/TestCheckAndMutate.java
 0
 0
-6
+3
 
 org/apache/hadoop/hbase/client/TestClientClusterStatus.java
 0
@@ -8212,7 +8212,7 @@
 org/apache/hadoop/hbase/regionserver/RegionAsTable.java
 0
 0
-15
+22
 
 org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
 0
@@ -10197,7 +10197,7 @@
 org/apache/hadoop/hbase/rest/client/RemoteHTable.java
 0
 0
-112
+118
 
 org/apache/hadoop/hbase/rest/client/Response.java
 0
@@ -10957,7 +10957,7 @@
 org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
 0
 0
-118
+114
 
 org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java
 0
@@ -11242,12 +11242,12 @@
 org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java
 0
 0
-4
+3
 
 org/apache/hadoop/hbase/thrift/TestCallQueue.java
 0
 0
-3
+2
 
 org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactory.java
 0
@@ -11327,7 +11327,7 @@
 org/apache/hadoop/hbase/thrift2/ThriftServer.java
 0
 0
-13
+12
 
 org/apache/hadoop/hbase/thrift2/ThriftUtilities.java
 0
@@ -12899,7 +12899,7 @@
 
 annotation
 http://checkstyle.sourceforge.net/config_annotation.html#MissingDeprecated;>MissingDeprecated
-119
+137
 Error
 
 blocks
@@ -12989,7 +12989,7 @@
 http://checkstyle.sourceforge.net/config_imports.html#UnusedImports;>UnusedImports
 
 processJavadoc: true
-225
+221
 Error
 
 indentation
@@ -13000,7 +13000,7 @@
 caseIndent: 2
 basicOffset: 2
 lineWrappingIndentation: 2
-6280
+6265
 Error
 
 javadoc
@@ -13012,7 +13012,7 @@
 
 
 http://checkstyle.sourceforge.net/config_javadoc.html#NonEmptyAtclauseDescription;>NonEmptyAtclauseDescription
-4341
+4333
 Error
 
 misc
@@ -28671,449 +28671,431 @@
 810
 
 Error
+javadoc
+NonEmptyAtclauseDescription
+At-clause should have a non-empty description.
+882
+
+Error
+javadoc
+NonEmptyAtclauseDescription
+At-clause should have a non-empty description.
+896
+
+Error
 sizes
 LineLength
 Line is longer than 100 characters (found 103).
-885
+912
 
 Error
 sizes
 LineLength
 Line is longer than 100 characters (found 102).
-895
+922
 
 Error
 sizes
 LineLength
 Line is longer than 100 characters (found 101).
-907
+934
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-925
+953
 
 Error
+sizes
+LineLength
+Line is longer than 100 characters (found 110).
+959
+
+Error
 javadoc
-JavadocTagContinuationIndentation
-Line continuation have incorrect indentation level, expected level should 
be 2.
-962
+NonEmptyAtclauseDescription
+At-clause should have a non-empty description.
+967
+
+Error
+javadoc
+NonEmptyAtclauseDescription
+At-clause should have a non-empty description.
+979
 
 Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
-964
+1016
 
 Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
-966
+1018
 
 Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
-967
+1020
 
 Error
+javadoc
+JavadocTagContinuationIndentation
+Line continuation have incorrect indentation level, expected level should 
be 2.
+1021
+
+Error
 sizes
 LineLength
 Line is longer than 100 characters (found 108).
-985
-
+1039
+
 Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
-986
-
+1040
+
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-999
-
-Error
-javadoc
-JavadocTagContinuationIndentation
-Line continuation have incorrect indentation level, expected level should 
be 2.
-1007
+1053
 
 Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
-1010
+1061
 
 Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
-1021
+1064
 
 Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect 

[33/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
index 0ceca07..a661ed3 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
@@ -267,15 +267,17 @@ implements 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
-compact(TableNametableName)
-Compact a table.
+compact(TableNametableName,
+   byte[]columnFamily,
+   CompactTypecompactType)
+Compact a column family within a table.
 
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
-compact(TableNametableName,
-   byte[]columnFamily)
-Compact a column family within a table.
+compact(TableNametableName,
+   CompactTypecompactType)
+Compact a table.
 
 
 
@@ -473,7 +475,8 @@ implements 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureCompactionState
-getCompactionState(TableNametableName)
+getCompactionState(TableNametableName,
+  CompactTypecompactType)
 Get the current compaction state of a table.
 
 
@@ -738,15 +741,17 @@ implements 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
-majorCompact(TableNametableName)
-Major compact a table.
+majorCompact(TableNametableName,
+byte[]columnFamily,
+CompactTypecompactType)
+Major compact a column family within a table.
 
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
-majorCompact(TableNametableName,
-byte[]columnFamily)
-Major compact a column family within a table.
+majorCompact(TableNametableName,
+CompactTypecompactType)
+Major compact a table.
 
 
 
@@ -1000,7 +1005,7 @@ implements AsyncAdmin
-addReplicationPeer,
 balance,
 getBackupMasters,
 getMaster,
 getMasterCoprocessors,
 getMasterInfoPort,
 getRegionServers,
 listTableDescriptors,
 listTableNames,
 snapshot,
 snapshot
+addReplicationPeer,
 balance,
 compact,
 compact,
 getBackupMasters,
 getCompactionState,
 getMaster,
 getMasterCoprocessors, getMasterInfoPort,
 getRegionServers,
 listTableDescriptors,
 listTableNames,
 majorCompact,
 majorCompact,
 snapshot,
 snapshot
 
 
 
@@ -1658,43 +1663,46 @@ implements 
+
 
 
 
 
 compact
-publichttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Voidcompact(TableNametableName)
-Description copied from 
interface:AsyncAdmin
+publichttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Voidcompact(TableNametableName,
+   CompactTypecompactType)
+Description copied from 
interface:AsyncAdmin
 Compact a table. When the returned CompletableFuture is 
done, it only means the compact request
  was sent to HBase and may need some time to finish the compact 
operation.
 
 Specified by:
-compactin
 interfaceAsyncAdmin
+compactin
 interfaceAsyncAdmin
 Parameters:
 tableName - table to compact
+compactType - CompactType
 
 
 
-
+
 
 
 
 
 compact
-publichttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in 

[07/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyTableFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyTableFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyTableFuture.html
index d438f22..7c59e27 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyTableFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyTableFuture.html
@@ -1290,8 +1290,8 @@
 1282   CompactType 
compactType) throws IOException {
 1283switch (compactType) {
 1284  case MOB:
-1285
compact(this.connection.getAdminForMaster(), getMobRegionInfo(tableName), 
major,
-1286  columnFamily);
+1285
compact(this.connection.getAdminForMaster(), 
RegionInfo.createMobRegionInfo(tableName),
+1286major, columnFamily);
 1287break;
 1288  case NORMAL:
 1289checkTableExists(tableName);
@@ -3248,7 +3248,7 @@
 3240  new 
CallableAdminProtos.GetRegionInfoResponse.CompactionState() {
 3241@Override
 3242public 
AdminProtos.GetRegionInfoResponse.CompactionState call() throws Exception {
-3243  RegionInfo info = 
getMobRegionInfo(tableName);
+3243  RegionInfo info = 
RegionInfo.createMobRegionInfo(tableName);
 3244  GetRegionInfoRequest 
request =
 3245
RequestConverter.buildGetRegionInfoRequest(info.getRegionName(), true);
 3246  GetRegionInfoResponse 
response = masterAdmin.getRegionInfo(rpcController, request);
@@ -3312,7 +3312,7 @@
 3304}
 3305break;
 3306  default:
-3307throw new 
IllegalArgumentException("Unknowne compactType: " + compactType);
+3307throw new 
IllegalArgumentException("Unknown compactType: " + compactType);
 3308}
 3309if (state != null) {
 3310  return 
ProtobufUtil.createCompactionState(state);
@@ -3847,325 +3847,320 @@
 3839});
 3840  }
 3841
-3842  private RegionInfo 
getMobRegionInfo(TableName tableName) {
-3843return 
RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes(".mob")).setRegionId(0)
-3844.build();
-3845  }
-3846
-3847  private RpcControllerFactory 
getRpcControllerFactory() {
-3848return this.rpcControllerFactory;
-3849  }
-3850
-3851  @Override
-3852  public void addReplicationPeer(String 
peerId, ReplicationPeerConfig peerConfig, boolean enabled)
-3853  throws IOException {
-3854executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3855  @Override
-3856  protected Void rpcCall() throws 
Exception {
-3857
master.addReplicationPeer(getRpcController(),
-3858  
RequestConverter.buildAddReplicationPeerRequest(peerId, peerConfig, 
enabled));
-3859return null;
-3860  }
-3861});
-3862  }
-3863
-3864  @Override
-3865  public void 
removeReplicationPeer(String peerId) throws IOException {
-3866executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3867  @Override
-3868  protected Void rpcCall() throws 
Exception {
-3869
master.removeReplicationPeer(getRpcController(),
-3870  
RequestConverter.buildRemoveReplicationPeerRequest(peerId));
-3871return null;
-3872  }
-3873});
-3874  }
-3875
-3876  @Override
-3877  public void 
enableReplicationPeer(final String peerId) throws IOException {
-3878executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3879  @Override
-3880  protected Void rpcCall() throws 
Exception {
-3881
master.enableReplicationPeer(getRpcController(),
-3882  
RequestConverter.buildEnableReplicationPeerRequest(peerId));
-3883return null;
-3884  }
-3885});
-3886  }
-3887
-3888  @Override
-3889  public void 
disableReplicationPeer(final String peerId) throws IOException {
-3890executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3891  @Override
-3892  protected Void rpcCall() throws 
Exception {
-3893
master.disableReplicationPeer(getRpcController(),
-3894  
RequestConverter.buildDisableReplicationPeerRequest(peerId));
-3895return null;
-3896  }
-3897});
-3898  }
-3899
-3900  @Override
-3901  public ReplicationPeerConfig 
getReplicationPeerConfig(final String peerId) throws IOException {
-3902return executeCallable(new 
MasterCallableReplicationPeerConfig(getConnection(),
-3903getRpcControllerFactory()) {
-3904  @Override
-3905  protected ReplicationPeerConfig 
rpcCall() throws Exception {
-3906GetReplicationPeerConfigResponse 
response = master.getReplicationPeerConfig(
-3907  getRpcController(), 

[36/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/org/apache/hadoop/hbase/client/Admin.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/Admin.html 
b/devapidocs/org/apache/hadoop/hbase/client/Admin.html
index 9e4fb11..8f2d209 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/Admin.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/Admin.html
@@ -3197,13 +3197,61 @@ void
+
+
+
+
+compact
+voidcompact(TableNametableName,
+ CompactTypecompactType)
+  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException,
+ http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
 title="class or interface in java.lang">InterruptedException
+Compact a table.  Asynchronous operation in that this 
method requests that a
+ Compaction run and then it returns. It does not wait on the completion of 
Compaction
+ (it can take a while).
+
+Parameters:
+tableName - table to compact
+compactType - CompactType
+Throws:
+http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException - if a remote or 
network exception occurs
+http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
 title="class or interface in java.lang">InterruptedException
+
+
+
+
+
+
+
+
+compact
+voidcompact(TableNametableName,
+ byte[]columnFamily,
+ CompactTypecompactType)
+  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException,
+ http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
 title="class or interface in java.lang">InterruptedException
+Compact a column family within a table.  Asynchronous 
operation in that this method
+ requests that a Compaction run and then it returns. It does not wait on the
+ completion of Compaction (it can take a while).
+
+Parameters:
+tableName - table to compact
+columnFamily - column family within a table
+compactType - CompactType
+Throws:
+http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException - if not a mob 
column family or if a remote or network exception occurs
+http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
 title="class or interface in java.lang">InterruptedException
+
+
+
 
 
 
 
 
 majorCompact
-voidmajorCompact(TableNametableName)
+voidmajorCompact(TableNametableName)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Major compact a table. Asynchronous operation in that this 
method requests
  that a Compaction run and then it returns. It does not wait on the completion 
of Compaction
@@ -3222,7 +3270,7 @@ void
 
 majorCompactRegion
-voidmajorCompactRegion(byte[]regionName)
+voidmajorCompactRegion(byte[]regionName)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Major compact a table or an individual region. Asynchronous 
operation in that this method requests
  that a Compaction run and then it returns. It does not wait on the completion 
of Compaction
@@ -3241,7 +3289,7 @@ void
 
 majorCompact
-voidmajorCompact(TableNametableName,
+voidmajorCompact(TableNametableName,
   byte[]columnFamily)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Major compact a column family within a table. Asynchronous 
operation in that this method requests
@@ -3262,7 +3310,7 @@ void
 
 majorCompactRegion
-voidmajorCompactRegion(byte[]regionName,
+voidmajorCompactRegion(byte[]regionName,
 byte[]columnFamily)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Major compact a column family within region. Asynchronous 
operation in that this method requests
@@ -3277,6 +3325,54 @@ void
+
+
+
+
+majorCompact
+voidmajorCompact(TableNametableName,
+  CompactTypecompactType)
+   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException,
+  http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
 title="class or interface in java.lang">InterruptedException
+Major compact a table.  Asynchronous operation in that this 
method requests that a
+ Compaction run 

[14/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html
index d438f22..7c59e27 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html
@@ -1290,8 +1290,8 @@
 1282   CompactType 
compactType) throws IOException {
 1283switch (compactType) {
 1284  case MOB:
-1285
compact(this.connection.getAdminForMaster(), getMobRegionInfo(tableName), 
major,
-1286  columnFamily);
+1285
compact(this.connection.getAdminForMaster(), 
RegionInfo.createMobRegionInfo(tableName),
+1286major, columnFamily);
 1287break;
 1288  case NORMAL:
 1289checkTableExists(tableName);
@@ -3248,7 +3248,7 @@
 3240  new 
CallableAdminProtos.GetRegionInfoResponse.CompactionState() {
 3241@Override
 3242public 
AdminProtos.GetRegionInfoResponse.CompactionState call() throws Exception {
-3243  RegionInfo info = 
getMobRegionInfo(tableName);
+3243  RegionInfo info = 
RegionInfo.createMobRegionInfo(tableName);
 3244  GetRegionInfoRequest 
request =
 3245
RequestConverter.buildGetRegionInfoRequest(info.getRegionName(), true);
 3246  GetRegionInfoResponse 
response = masterAdmin.getRegionInfo(rpcController, request);
@@ -3312,7 +3312,7 @@
 3304}
 3305break;
 3306  default:
-3307throw new 
IllegalArgumentException("Unknowne compactType: " + compactType);
+3307throw new 
IllegalArgumentException("Unknown compactType: " + compactType);
 3308}
 3309if (state != null) {
 3310  return 
ProtobufUtil.createCompactionState(state);
@@ -3847,325 +3847,320 @@
 3839});
 3840  }
 3841
-3842  private RegionInfo 
getMobRegionInfo(TableName tableName) {
-3843return 
RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes(".mob")).setRegionId(0)
-3844.build();
-3845  }
-3846
-3847  private RpcControllerFactory 
getRpcControllerFactory() {
-3848return this.rpcControllerFactory;
-3849  }
-3850
-3851  @Override
-3852  public void addReplicationPeer(String 
peerId, ReplicationPeerConfig peerConfig, boolean enabled)
-3853  throws IOException {
-3854executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3855  @Override
-3856  protected Void rpcCall() throws 
Exception {
-3857
master.addReplicationPeer(getRpcController(),
-3858  
RequestConverter.buildAddReplicationPeerRequest(peerId, peerConfig, 
enabled));
-3859return null;
-3860  }
-3861});
-3862  }
-3863
-3864  @Override
-3865  public void 
removeReplicationPeer(String peerId) throws IOException {
-3866executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3867  @Override
-3868  protected Void rpcCall() throws 
Exception {
-3869
master.removeReplicationPeer(getRpcController(),
-3870  
RequestConverter.buildRemoveReplicationPeerRequest(peerId));
-3871return null;
-3872  }
-3873});
-3874  }
-3875
-3876  @Override
-3877  public void 
enableReplicationPeer(final String peerId) throws IOException {
-3878executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3879  @Override
-3880  protected Void rpcCall() throws 
Exception {
-3881
master.enableReplicationPeer(getRpcController(),
-3882  
RequestConverter.buildEnableReplicationPeerRequest(peerId));
-3883return null;
-3884  }
-3885});
-3886  }
-3887
-3888  @Override
-3889  public void 
disableReplicationPeer(final String peerId) throws IOException {
-3890executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3891  @Override
-3892  protected Void rpcCall() throws 
Exception {
-3893
master.disableReplicationPeer(getRpcController(),
-3894  
RequestConverter.buildDisableReplicationPeerRequest(peerId));
-3895return null;
-3896  }
-3897});
-3898  }
-3899
-3900  @Override
-3901  public ReplicationPeerConfig 
getReplicationPeerConfig(final String peerId) throws IOException {
-3902return executeCallable(new 
MasterCallableReplicationPeerConfig(getConnection(),
-3903getRpcControllerFactory()) {
-3904  @Override
-3905  protected ReplicationPeerConfig 
rpcCall() throws Exception {
-3906GetReplicationPeerConfigResponse 
response = master.getReplicationPeerConfig(
-3907  getRpcController(), 

[22/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/src-html/org/apache/hadoop/hbase/client/Admin.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/Admin.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/Admin.html
index cb363f9..d7f2c1a 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/Admin.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/Admin.html
@@ -880,1508 +880,1508 @@
 872throws IOException;
 873
 874  /**
-875   * Major compact a table. Asynchronous 
operation in that this method requests
-876   * that a Compaction run and then it 
returns. It does not wait on the completion of Compaction
+875   * Compact a table.  Asynchronous 
operation in that this method requests that a
+876   * Compaction run and then it returns. 
It does not wait on the completion of Compaction
 877   * (it can take a while).
 878   *
-879   * @param tableName table to major 
compact
-880   * @throws IOException if a remote or 
network exception occurs
-881   */
-882  void majorCompact(TableName tableName) 
throws IOException;
-883
-884  /**
-885   * Major compact a table or an 
individual region. Asynchronous operation in that this method requests
-886   * that a Compaction run and then it 
returns. It does not wait on the completion of Compaction
-887   * (it can take a while).
-888   *
-889   * @param regionName region to major 
compact
-890   * @throws IOException if a remote or 
network exception occurs
-891   */
-892  void majorCompactRegion(byte[] 
regionName) throws IOException;
-893
-894  /**
-895   * Major compact a column family within 
a table. Asynchronous operation in that this method requests
-896   * that a Compaction run and then it 
returns. It does not wait on the completion of Compaction
-897   * (it can take a while).
-898   *
-899   * @param tableName table to major 
compact
-900   * @param columnFamily column family 
within a table
-901   * @throws IOException if a remote or 
network exception occurs
-902   */
-903  void majorCompact(TableName tableName, 
byte[] columnFamily)
-904throws IOException;
-905
-906  /**
-907   * Major compact a column family within 
region. Asynchronous operation in that this method requests
-908   * that a Compaction run and then it 
returns. It does not wait on the completion of Compaction
-909   * (it can take a while).
-910   *
-911   * @param regionName egion to major 
compact
-912   * @param columnFamily column family 
within a region
-913   * @throws IOException if a remote or 
network exception occurs
-914   */
-915  void majorCompactRegion(byte[] 
regionName, byte[] columnFamily)
-916throws IOException;
-917
-918  /**
-919   * Compact all regions on the region 
server. Asynchronous operation in that this method requests
-920   * that a Compaction run and then it 
returns. It does not wait on the completion of Compaction (it
-921   * can take a while).
-922   * @param sn the region server name
-923   * @param major if it's major 
compaction
-924   * @throws IOException if a remote or 
network exception occurs
-925   * @throws InterruptedException
-926   * @deprecated As of release 2.0.0, 
this will be removed in HBase 3.0.0. Use
-927   * {@link 
#compactRegionServer(ServerName)} or
-928   * {@link 
#majorCompactRegionServer(ServerName)}.
+879   * @param tableName table to compact
+880   * @param compactType {@link 
org.apache.hadoop.hbase.client.CompactType}
+881   * @throws IOException if a remote or 
network exception occurs
+882   * @throws InterruptedException
+883   */
+884  void compact(TableName tableName, 
CompactType compactType)
+885throws IOException, 
InterruptedException;
+886
+887  /**
+888   * Compact a column family within a 
table.  Asynchronous operation in that this method
+889   * requests that a Compaction run and 
then it returns. It does not wait on the
+890   * completion of Compaction (it can 
take a while).
+891   *
+892   * @param tableName table to compact
+893   * @param columnFamily column family 
within a table
+894   * @param compactType {@link 
org.apache.hadoop.hbase.client.CompactType}
+895   * @throws IOException if not a mob 
column family or if a remote or network exception occurs
+896   * @throws InterruptedException
+897   */
+898  void compact(TableName tableName, 
byte[] columnFamily, CompactType compactType)
+899throws IOException, 
InterruptedException;
+900
+901  /**
+902   * Major compact a table. Asynchronous 
operation in that this method requests
+903   * that a Compaction run and then it 
returns. It does not wait on the completion of Compaction
+904   * (it can take a while).
+905   *
+906   * @param tableName table to major 
compact
+907   * @throws IOException if a remote or 
network exception occurs
+908   */
+909  void majorCompact(TableName tableName) 
throws IOException;
+910
+911  /**
+912   * Major compact a table or 

[27/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncTableImpl.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncTableImpl.html 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncTableImpl.html
index 91bfe40..3ddf027 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncTableImpl.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncTableImpl.html
@@ -279,7 +279,7 @@ implements 
 Thttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in 
java.util.concurrent">CompletableFutureT
 batch(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">List? extends Rowactions)
-Method that does a batch call on Deletes, Gets, Puts, 
Increments and Appends.
+Method that does a batch call on Deletes, Gets, Puts, 
Increments, Appends and RowMutations.
 
 
 
@@ -1165,15 +1165,15 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in 
java.util.concurrent">CompletableFutureTbatch(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">List? extends Rowactions)
 Description copied from 
interface:AsyncTable
-Method that does a batch call on Deletes, Gets, Puts, 
Increments and Appends. The ordering of
- execution of the actions is not defined. Meaning if you do a Put and a Get in 
the same
- AsyncTable.batch(java.util.List?
 extends org.apache.hadoop.hbase.client.Row) call, you will not 
necessarily be guaranteed that the Get returns what the Put
- had put.
+Method that does a batch call on Deletes, Gets, Puts, 
Increments, Appends and RowMutations. The
+ ordering of execution of the actions is not defined. Meaning if you do a Put 
and a Get in the
+ same AsyncTable.batch(java.util.List?
 extends org.apache.hadoop.hbase.client.Row) call, you will not 
necessarily be guaranteed that the Get returns what the
+ Put had put.
 
 Specified by:
 batchin
 interfaceAsyncTableAdvancedScanResultConsumer
 Parameters:
-actions - list of Get, Put, Delete, Increment, Append 
objects
+actions - list of Get, Put, Delete, Increment, Append, and 
RowMutations objects
 Returns:
 A list of http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in 
java.util.concurrent">CompletableFutures that represent the 
result for each action.
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/org/apache/hadoop/hbase/client/RegionInfo.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/RegionInfo.html 
b/devapidocs/org/apache/hadoop/hbase/client/RegionInfo.html
index 495b3bf..66c3237 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/RegionInfo.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/RegionInfo.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":17,"i1":6,"i2":6,"i3":17,"i4":17,"i5":17,"i6":17,"i7":17,"i8":17,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":6,"i16":6,"i17":17,"i18":17,"i19":6,"i20":17,"i21":6,"i22":17,"i23":17,"i24":17,"i25":6,"i26":6,"i27":6,"i28":6,"i29":17,"i30":17,"i31":17,"i32":17,"i33":17,"i34":17,"i35":17,"i36":17,"i37":17,"i38":17,"i39":17};
+var methods = 
{"i0":17,"i1":6,"i2":6,"i3":17,"i4":17,"i5":17,"i6":17,"i7":17,"i8":17,"i9":17,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":6,"i16":6,"i17":6,"i18":17,"i19":17,"i20":6,"i21":17,"i22":6,"i23":17,"i24":17,"i25":17,"i26":6,"i27":6,"i28":6,"i29":6,"i30":17,"i31":17,"i32":17,"i33":17,"i34":17,"i35":17,"i36":17,"i37":17,"i38":17,"i39":17,"i40":17};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],4:["t3","Abstract 
Methods"],16:["t5","Default Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -212,6 +212,12 @@ public interface containsRow(byte[]row)
 
 
+static RegionInfo
+createMobRegionInfo(TableNametableName)
+Creates a RegionInfo object for MOB data.
+
+
+
 static byte[]
 createRegionName(TableNametableName,
 byte[]startKey,
@@ -220,7 +226,7 @@ public interface Make a region name of passed parameters.
 
 
-
+
 static byte[]
 createRegionName(TableNametableName,
 byte[]startKey,
@@ -230,7 +236,7 @@ public interface Make a region name of passed parameters.
 
 
-
+
 static byte[]
 createRegionName(TableNametableName,
 byte[]startKey,
@@ -239,7 +245,7 

[05/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html
index d438f22..7c59e27 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html
@@ -1290,8 +1290,8 @@
 1282   CompactType 
compactType) throws IOException {
 1283switch (compactType) {
 1284  case MOB:
-1285
compact(this.connection.getAdminForMaster(), getMobRegionInfo(tableName), 
major,
-1286  columnFamily);
+1285
compact(this.connection.getAdminForMaster(), 
RegionInfo.createMobRegionInfo(tableName),
+1286major, columnFamily);
 1287break;
 1288  case NORMAL:
 1289checkTableExists(tableName);
@@ -3248,7 +3248,7 @@
 3240  new 
CallableAdminProtos.GetRegionInfoResponse.CompactionState() {
 3241@Override
 3242public 
AdminProtos.GetRegionInfoResponse.CompactionState call() throws Exception {
-3243  RegionInfo info = 
getMobRegionInfo(tableName);
+3243  RegionInfo info = 
RegionInfo.createMobRegionInfo(tableName);
 3244  GetRegionInfoRequest 
request =
 3245
RequestConverter.buildGetRegionInfoRequest(info.getRegionName(), true);
 3246  GetRegionInfoResponse 
response = masterAdmin.getRegionInfo(rpcController, request);
@@ -3312,7 +3312,7 @@
 3304}
 3305break;
 3306  default:
-3307throw new 
IllegalArgumentException("Unknowne compactType: " + compactType);
+3307throw new 
IllegalArgumentException("Unknown compactType: " + compactType);
 3308}
 3309if (state != null) {
 3310  return 
ProtobufUtil.createCompactionState(state);
@@ -3847,325 +3847,320 @@
 3839});
 3840  }
 3841
-3842  private RegionInfo 
getMobRegionInfo(TableName tableName) {
-3843return 
RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes(".mob")).setRegionId(0)
-3844.build();
-3845  }
-3846
-3847  private RpcControllerFactory 
getRpcControllerFactory() {
-3848return this.rpcControllerFactory;
-3849  }
-3850
-3851  @Override
-3852  public void addReplicationPeer(String 
peerId, ReplicationPeerConfig peerConfig, boolean enabled)
-3853  throws IOException {
-3854executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3855  @Override
-3856  protected Void rpcCall() throws 
Exception {
-3857
master.addReplicationPeer(getRpcController(),
-3858  
RequestConverter.buildAddReplicationPeerRequest(peerId, peerConfig, 
enabled));
-3859return null;
-3860  }
-3861});
-3862  }
-3863
-3864  @Override
-3865  public void 
removeReplicationPeer(String peerId) throws IOException {
-3866executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3867  @Override
-3868  protected Void rpcCall() throws 
Exception {
-3869
master.removeReplicationPeer(getRpcController(),
-3870  
RequestConverter.buildRemoveReplicationPeerRequest(peerId));
-3871return null;
-3872  }
-3873});
-3874  }
-3875
-3876  @Override
-3877  public void 
enableReplicationPeer(final String peerId) throws IOException {
-3878executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3879  @Override
-3880  protected Void rpcCall() throws 
Exception {
-3881
master.enableReplicationPeer(getRpcController(),
-3882  
RequestConverter.buildEnableReplicationPeerRequest(peerId));
-3883return null;
-3884  }
-3885});
-3886  }
-3887
-3888  @Override
-3889  public void 
disableReplicationPeer(final String peerId) throws IOException {
-3890executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3891  @Override
-3892  protected Void rpcCall() throws 
Exception {
-3893
master.disableReplicationPeer(getRpcController(),
-3894  
RequestConverter.buildDisableReplicationPeerRequest(peerId));
-3895return null;
-3896  }
-3897});
-3898  }
-3899
-3900  @Override
-3901  public ReplicationPeerConfig 
getReplicationPeerConfig(final String peerId) throws IOException {
-3902return executeCallable(new 
MasterCallableReplicationPeerConfig(getConnection(),
-3903getRpcControllerFactory()) {
-3904  @Override
-3905  protected ReplicationPeerConfig 
rpcCall() throws Exception {
-3906GetReplicationPeerConfigResponse 
response = 

[46/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/apidocs/org/apache/hadoop/hbase/client/AsyncTable.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/client/AsyncTable.html 
b/apidocs/org/apache/hadoop/hbase/client/AsyncTable.html
index c0e71c2..a99f944 100644
--- a/apidocs/org/apache/hadoop/hbase/client/AsyncTable.html
+++ b/apidocs/org/apache/hadoop/hbase/client/AsyncTable.html
@@ -162,7 +162,7 @@ public interface 
 Thttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in 
java.util.concurrent">CompletableFutureT
 batch(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">List? extends Rowactions)
-Method that does a batch call on Deletes, Gets, Puts, 
Increments and Appends.
+Method that does a batch call on Deletes, Gets, Puts, 
Increments, Appends and RowMutations.
 
 
 
@@ -970,13 +970,13 @@ public interface 
 batch
 Thttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in 
java.util.concurrent">CompletableFutureTbatch(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">List? extends Rowactions)
-Method that does a batch call on Deletes, Gets, Puts, 
Increments and Appends. The ordering of
- execution of the actions is not defined. Meaning if you do a Put and a Get in 
the same
- batch(java.util.List?
 extends org.apache.hadoop.hbase.client.Row) call, you will not 
necessarily be guaranteed that the Get returns what the Put
- had put.
+Method that does a batch call on Deletes, Gets, Puts, 
Increments, Appends and RowMutations. The
+ ordering of execution of the actions is not defined. Meaning if you do a Put 
and a Get in the
+ same batch(java.util.List?
 extends org.apache.hadoop.hbase.client.Row) call, you will not 
necessarily be guaranteed that the Get returns what the
+ Put had put.
 
 Parameters:
-actions - list of Get, Put, Delete, Increment, Append 
objects
+actions - list of Get, Put, Delete, Increment, Append, and 
RowMutations objects
 Returns:
 A list of http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in 
java.util.concurrent">CompletableFutures that represent the 
result for each action.
 
@@ -993,7 +993,7 @@ public interface Parameters:
-actions - list of Get, Put, Delete, Increment, Append 
objects
+actions - list of Get, Put, Delete, Increment, Append and 
RowMutations objects
 Returns:
 A list of the result for the actions. Wrapped by a http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in 
java.util.concurrent">CompletableFuture.
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/apidocs/org/apache/hadoop/hbase/client/RegionInfo.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/client/RegionInfo.html 
b/apidocs/org/apache/hadoop/hbase/client/RegionInfo.html
index fabed42..06a1b25 100644
--- a/apidocs/org/apache/hadoop/hbase/client/RegionInfo.html
+++ b/apidocs/org/apache/hadoop/hbase/client/RegionInfo.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":17,"i1":6,"i2":6,"i3":17,"i4":17,"i5":17,"i6":17,"i7":17,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":6,"i16":17,"i17":17,"i18":6,"i19":17,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":17,"i26":17,"i27":17,"i28":17,"i29":17,"i30":17,"i31":17};
+var methods = 
{"i0":17,"i1":6,"i2":6,"i3":17,"i4":17,"i5":17,"i6":17,"i7":17,"i8":17,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":6,"i16":6,"i17":17,"i18":17,"i19":6,"i20":17,"i21":6,"i22":6,"i23":6,"i24":6,"i25":6,"i26":17,"i27":17,"i28":17,"i29":17,"i30":17,"i31":17,"i32":17};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],4:["t3","Abstract 
Methods"],16:["t5","Default Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -164,6 +164,12 @@ public interface containsRow(byte[]row)
 
 
+static RegionInfo
+createMobRegionInfo(TableNametableName)
+Creates a RegionInfo object for MOB data.
+
+
+
 static byte[]
 createRegionName(TableNametableName,
 byte[]startKey,
@@ -172,7 +178,7 @@ public interface Make a region name of passed parameters.
 
 
-
+
 static byte[]
 createRegionName(TableNametableName,
 byte[]startKey,
@@ -182,7 +188,7 @@ public interface Make a region name of passed parameters.
 
 
-
+

[44/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/apidocs/src-html/org/apache/hadoop/hbase/client/Admin.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/Admin.html 
b/apidocs/src-html/org/apache/hadoop/hbase/client/Admin.html
index cb363f9..d7f2c1a 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/client/Admin.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/client/Admin.html
@@ -880,1508 +880,1508 @@
 872throws IOException;
 873
 874  /**
-875   * Major compact a table. Asynchronous 
operation in that this method requests
-876   * that a Compaction run and then it 
returns. It does not wait on the completion of Compaction
+875   * Compact a table.  Asynchronous 
operation in that this method requests that a
+876   * Compaction run and then it returns. 
It does not wait on the completion of Compaction
 877   * (it can take a while).
 878   *
-879   * @param tableName table to major 
compact
-880   * @throws IOException if a remote or 
network exception occurs
-881   */
-882  void majorCompact(TableName tableName) 
throws IOException;
-883
-884  /**
-885   * Major compact a table or an 
individual region. Asynchronous operation in that this method requests
-886   * that a Compaction run and then it 
returns. It does not wait on the completion of Compaction
-887   * (it can take a while).
-888   *
-889   * @param regionName region to major 
compact
-890   * @throws IOException if a remote or 
network exception occurs
-891   */
-892  void majorCompactRegion(byte[] 
regionName) throws IOException;
-893
-894  /**
-895   * Major compact a column family within 
a table. Asynchronous operation in that this method requests
-896   * that a Compaction run and then it 
returns. It does not wait on the completion of Compaction
-897   * (it can take a while).
-898   *
-899   * @param tableName table to major 
compact
-900   * @param columnFamily column family 
within a table
-901   * @throws IOException if a remote or 
network exception occurs
-902   */
-903  void majorCompact(TableName tableName, 
byte[] columnFamily)
-904throws IOException;
-905
-906  /**
-907   * Major compact a column family within 
region. Asynchronous operation in that this method requests
-908   * that a Compaction run and then it 
returns. It does not wait on the completion of Compaction
-909   * (it can take a while).
-910   *
-911   * @param regionName egion to major 
compact
-912   * @param columnFamily column family 
within a region
-913   * @throws IOException if a remote or 
network exception occurs
-914   */
-915  void majorCompactRegion(byte[] 
regionName, byte[] columnFamily)
-916throws IOException;
-917
-918  /**
-919   * Compact all regions on the region 
server. Asynchronous operation in that this method requests
-920   * that a Compaction run and then it 
returns. It does not wait on the completion of Compaction (it
-921   * can take a while).
-922   * @param sn the region server name
-923   * @param major if it's major 
compaction
-924   * @throws IOException if a remote or 
network exception occurs
-925   * @throws InterruptedException
-926   * @deprecated As of release 2.0.0, 
this will be removed in HBase 3.0.0. Use
-927   * {@link 
#compactRegionServer(ServerName)} or
-928   * {@link 
#majorCompactRegionServer(ServerName)}.
+879   * @param tableName table to compact
+880   * @param compactType {@link 
org.apache.hadoop.hbase.client.CompactType}
+881   * @throws IOException if a remote or 
network exception occurs
+882   * @throws InterruptedException
+883   */
+884  void compact(TableName tableName, 
CompactType compactType)
+885throws IOException, 
InterruptedException;
+886
+887  /**
+888   * Compact a column family within a 
table.  Asynchronous operation in that this method
+889   * requests that a Compaction run and 
then it returns. It does not wait on the
+890   * completion of Compaction (it can 
take a while).
+891   *
+892   * @param tableName table to compact
+893   * @param columnFamily column family 
within a table
+894   * @param compactType {@link 
org.apache.hadoop.hbase.client.CompactType}
+895   * @throws IOException if not a mob 
column family or if a remote or network exception occurs
+896   * @throws InterruptedException
+897   */
+898  void compact(TableName tableName, 
byte[] columnFamily, CompactType compactType)
+899throws IOException, 
InterruptedException;
+900
+901  /**
+902   * Major compact a table. Asynchronous 
operation in that this method requests
+903   * that a Compaction run and then it 
returns. It does not wait on the completion of Compaction
+904   * (it can take a while).
+905   *
+906   * @param tableName table to major 
compact
+907   * @throws IOException if a remote or 
network exception occurs
+908   */
+909  void majorCompact(TableName tableName) 
throws IOException;
+910
+911  /**
+912   * Major compact a table or an 
individual 

[17/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
index e6498a8..6cb32c4 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
@@ -252,496 +252,500 @@
 244  }
 245
 246  @Override
-247  public CompletableFutureVoid 
compact(TableName tableName) {
-248return 
wrap(rawAdmin.compact(tableName));
-249  }
-250
-251  @Override
-252  public CompletableFutureVoid 
compact(TableName tableName, byte[] columnFamily) {
-253return 
wrap(rawAdmin.compact(tableName, columnFamily));
-254  }
-255
-256  @Override
-257  public CompletableFutureVoid 
compactRegion(byte[] regionName) {
-258return 
wrap(rawAdmin.compactRegion(regionName));
-259  }
-260
-261  @Override
-262  public CompletableFutureVoid 
compactRegion(byte[] regionName, byte[] columnFamily) {
-263return 
wrap(rawAdmin.compactRegion(regionName, columnFamily));
-264  }
-265
-266  @Override
-267  public CompletableFutureVoid 
majorCompact(TableName tableName) {
-268return 
wrap(rawAdmin.majorCompact(tableName));
-269  }
-270
-271  @Override
-272  public CompletableFutureVoid 
majorCompact(TableName tableName, byte[] columnFamily) {
-273return 
wrap(rawAdmin.majorCompact(tableName, columnFamily));
-274  }
-275
-276  @Override
-277  public CompletableFutureVoid 
majorCompactRegion(byte[] regionName) {
-278return 
wrap(rawAdmin.majorCompactRegion(regionName));
-279  }
-280
-281  @Override
-282  public CompletableFutureVoid 
majorCompactRegion(byte[] regionName, byte[] columnFamily) {
-283return 
wrap(rawAdmin.majorCompactRegion(regionName, columnFamily));
-284  }
-285
-286  @Override
-287  public CompletableFutureVoid 
compactRegionServer(ServerName serverName) {
-288return 
wrap(rawAdmin.compactRegionServer(serverName));
-289  }
-290
-291  @Override
-292  public CompletableFutureVoid 
majorCompactRegionServer(ServerName serverName) {
-293return 
wrap(rawAdmin.majorCompactRegionServer(serverName));
-294  }
-295
-296  @Override
-297  public CompletableFutureBoolean 
mergeSwitch(boolean on) {
-298return 
wrap(rawAdmin.mergeSwitch(on));
-299  }
-300
-301  @Override
-302  public CompletableFutureBoolean 
isMergeEnabled() {
-303return 
wrap(rawAdmin.isMergeEnabled());
-304  }
-305
-306  @Override
-307  public CompletableFutureBoolean 
splitSwitch(boolean on) {
-308return 
wrap(rawAdmin.splitSwitch(on));
-309  }
-310
-311  @Override
-312  public CompletableFutureBoolean 
isSplitEnabled() {
-313return 
wrap(rawAdmin.isSplitEnabled());
-314  }
-315
-316  @Override
-317  public CompletableFutureVoid 
mergeRegions(byte[] nameOfRegionA, byte[] nameOfRegionB,
-318  boolean forcible) {
-319return 
wrap(rawAdmin.mergeRegions(nameOfRegionA, nameOfRegionB, forcible));
-320  }
-321
-322  @Override
-323  public CompletableFutureVoid 
split(TableName tableName) {
-324return 
wrap(rawAdmin.split(tableName));
-325  }
-326
-327  @Override
-328  public CompletableFutureVoid 
split(TableName tableName, byte[] splitPoint) {
-329return wrap(rawAdmin.split(tableName, 
splitPoint));
-330  }
-331
-332  @Override
-333  public CompletableFutureVoid 
splitRegion(byte[] regionName) {
-334return 
wrap(rawAdmin.splitRegion(regionName));
-335  }
-336
-337  @Override
-338  public CompletableFutureVoid 
splitRegion(byte[] regionName, byte[] splitPoint) {
-339return 
wrap(rawAdmin.splitRegion(regionName, splitPoint));
-340  }
-341
-342  @Override
-343  public CompletableFutureVoid 
assign(byte[] regionName) {
-344return 
wrap(rawAdmin.assign(regionName));
-345  }
-346
-347  @Override
-348  public CompletableFutureVoid 
unassign(byte[] regionName, boolean forcible) {
-349return 
wrap(rawAdmin.unassign(regionName, forcible));
-350  }
-351
-352  @Override
-353  public CompletableFutureVoid 
offline(byte[] regionName) {
-354return 
wrap(rawAdmin.offline(regionName));
-355  }
-356
-357  @Override
-358  public CompletableFutureVoid 
move(byte[] regionName) {
-359return 
wrap(rawAdmin.move(regionName));
-360  }
-361
-362  @Override
-363  public CompletableFutureVoid 
move(byte[] regionName, ServerName destServerName) {
-364return wrap(rawAdmin.move(regionName, 
destServerName));
-365  }
-366
-367  @Override
-368  public CompletableFutureVoid 
setQuota(QuotaSettings quota) {
-369return 
wrap(rawAdmin.setQuota(quota));
-370  }
-371
-372  @Override
-373  public 
CompletableFutureListQuotaSettings getQuota(QuotaFilter filter) 
{
-374return 
wrap(rawAdmin.getQuota(filter));
-375  }
-376
-377  @Override
-378  public CompletableFutureVoid 
addReplicationPeer(String peerId,
-379  

[06/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.NamespaceFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.NamespaceFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.NamespaceFuture.html
index d438f22..7c59e27 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.NamespaceFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.NamespaceFuture.html
@@ -1290,8 +1290,8 @@
 1282   CompactType 
compactType) throws IOException {
 1283switch (compactType) {
 1284  case MOB:
-1285
compact(this.connection.getAdminForMaster(), getMobRegionInfo(tableName), 
major,
-1286  columnFamily);
+1285
compact(this.connection.getAdminForMaster(), 
RegionInfo.createMobRegionInfo(tableName),
+1286major, columnFamily);
 1287break;
 1288  case NORMAL:
 1289checkTableExists(tableName);
@@ -3248,7 +3248,7 @@
 3240  new 
CallableAdminProtos.GetRegionInfoResponse.CompactionState() {
 3241@Override
 3242public 
AdminProtos.GetRegionInfoResponse.CompactionState call() throws Exception {
-3243  RegionInfo info = 
getMobRegionInfo(tableName);
+3243  RegionInfo info = 
RegionInfo.createMobRegionInfo(tableName);
 3244  GetRegionInfoRequest 
request =
 3245
RequestConverter.buildGetRegionInfoRequest(info.getRegionName(), true);
 3246  GetRegionInfoResponse 
response = masterAdmin.getRegionInfo(rpcController, request);
@@ -3312,7 +3312,7 @@
 3304}
 3305break;
 3306  default:
-3307throw new 
IllegalArgumentException("Unknowne compactType: " + compactType);
+3307throw new 
IllegalArgumentException("Unknown compactType: " + compactType);
 3308}
 3309if (state != null) {
 3310  return 
ProtobufUtil.createCompactionState(state);
@@ -3847,325 +3847,320 @@
 3839});
 3840  }
 3841
-3842  private RegionInfo 
getMobRegionInfo(TableName tableName) {
-3843return 
RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes(".mob")).setRegionId(0)
-3844.build();
-3845  }
-3846
-3847  private RpcControllerFactory 
getRpcControllerFactory() {
-3848return this.rpcControllerFactory;
-3849  }
-3850
-3851  @Override
-3852  public void addReplicationPeer(String 
peerId, ReplicationPeerConfig peerConfig, boolean enabled)
-3853  throws IOException {
-3854executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3855  @Override
-3856  protected Void rpcCall() throws 
Exception {
-3857
master.addReplicationPeer(getRpcController(),
-3858  
RequestConverter.buildAddReplicationPeerRequest(peerId, peerConfig, 
enabled));
-3859return null;
-3860  }
-3861});
-3862  }
-3863
-3864  @Override
-3865  public void 
removeReplicationPeer(String peerId) throws IOException {
-3866executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3867  @Override
-3868  protected Void rpcCall() throws 
Exception {
-3869
master.removeReplicationPeer(getRpcController(),
-3870  
RequestConverter.buildRemoveReplicationPeerRequest(peerId));
-3871return null;
-3872  }
-3873});
-3874  }
-3875
-3876  @Override
-3877  public void 
enableReplicationPeer(final String peerId) throws IOException {
-3878executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3879  @Override
-3880  protected Void rpcCall() throws 
Exception {
-3881
master.enableReplicationPeer(getRpcController(),
-3882  
RequestConverter.buildEnableReplicationPeerRequest(peerId));
-3883return null;
-3884  }
-3885});
-3886  }
-3887
-3888  @Override
-3889  public void 
disableReplicationPeer(final String peerId) throws IOException {
-3890executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3891  @Override
-3892  protected Void rpcCall() throws 
Exception {
-3893
master.disableReplicationPeer(getRpcController(),
-3894  
RequestConverter.buildDisableReplicationPeerRequest(peerId));
-3895return null;
-3896  }
-3897});
-3898  }
-3899
-3900  @Override
-3901  public ReplicationPeerConfig 
getReplicationPeerConfig(final String peerId) throws IOException {
-3902return executeCallable(new 
MasterCallableReplicationPeerConfig(getConnection(),
-3903getRpcControllerFactory()) {
-3904  @Override
-3905  protected ReplicationPeerConfig 
rpcCall() throws Exception {
-3906GetReplicationPeerConfigResponse 
response = master.getReplicationPeerConfig(
-3907  getRpcController(), 

[50/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/apidocs/deprecated-list.html
--
diff --git a/apidocs/deprecated-list.html b/apidocs/deprecated-list.html
index 7a68ded..363d530 100644
--- a/apidocs/deprecated-list.html
+++ b/apidocs/deprecated-list.html
@@ -315,11 +315,17 @@
 
 
 
+org.apache.hadoop.hbase.rest.client.RemoteHTable.checkAndDelete(byte[],
 byte[], byte[], CompareFilter.CompareOp, byte[], Delete)
+
+
 org.apache.hadoop.hbase.client.Table.checkAndMutate(byte[],
 byte[], byte[], CompareFilter.CompareOp, byte[], RowMutations)
 Since 2.0.0. Will be 
removed in 3.0.0. Use
  Table.checkAndMutate(byte[],
 byte[], byte[], CompareOperator, byte[], RowMutations)
 
 
+
+org.apache.hadoop.hbase.rest.client.RemoteHTable.checkAndMutate(byte[],
 byte[], byte[], CompareFilter.CompareOp, byte[], RowMutations)
+
 
 org.apache.hadoop.hbase.client.Table.checkAndPut(byte[],
 byte[], byte[], CompareFilter.CompareOp, byte[], Put)
 Since 2.0.0. Will be 
removed in 3.0.0. Use
@@ -327,221 +333,224 @@
 
 
 
+org.apache.hadoop.hbase.rest.client.RemoteHTable.checkAndPut(byte[],
 byte[], byte[], CompareFilter.CompareOp, byte[], Put)
+
+
 org.apache.hadoop.hbase.CellUtil.cloneTags(Cell)
 As of HBase-2.0. Will be 
removed in HBase-3.0.
  Use RawCell.cloneTags()
 
 
-
+
 org.apache.hadoop.hbase.client.Admin.closeRegion(byte[],
 String)
 As of release 2.0.0, this 
will be removed in HBase 3.0.0.
  Use Admin.unassign(byte[],
 boolean).
 
 
-
+
 org.apache.hadoop.hbase.client.Admin.closeRegion(ServerName,
 HRegionInfo)
 As of release 2.0.0, this 
will be removed in HBase 3.0.0
  (https://issues.apache.org/jira/browse/HBASE-18231;>HBASE-18231).
  Use Admin.unassign(byte[],
 boolean).
 
 
-
+
 org.apache.hadoop.hbase.client.Admin.closeRegion(String,
 String)
 As of release 2.0.0, this 
will be removed in HBase 3.0.0.
  Use Admin.unassign(byte[],
 boolean).
 
 
-
+
 org.apache.hadoop.hbase.client.Admin.closeRegionWithEncodedRegionName(String,
 String)
 As of release 2.0.0, this 
will be removed in HBase 3.0.0.
  Use Admin.unassign(byte[],
 boolean).
 
 
-
+
 org.apache.hadoop.hbase.client.Admin.compactRegionServer(ServerName,
 boolean)
 As of release 2.0.0, this 
will be removed in HBase 3.0.0. Use
  Admin.compactRegionServer(ServerName)
 or
  Admin.majorCompactRegionServer(ServerName).
 
 
-
+
 org.apache.hadoop.hbase.CellUtil.compare(CellComparator,
 Cell, byte[], int, int)
 As of HBase-2.0. Will be 
removed in HBase-3.0
 
 
-
+
 org.apache.hadoop.hbase.filter.CompareFilter.compareFamily(CompareFilter.CompareOp,
 ByteArrayComparable, Cell)
 Since 2.0.0. Will be 
removed in 3.0.0.
  Use CompareFilter.compareFamily(CompareOperator,
 ByteArrayComparable, Cell)
 
 
-
+
 org.apache.hadoop.hbase.filter.CompareFilter.compareQualifier(CompareFilter.CompareOp,
 ByteArrayComparable, Cell)
 Since 2.0.0. Will be 
removed in 3.0.0.
  Use CompareFilter.compareQualifier(CompareOperator,
 ByteArrayComparable, Cell)
 
 
-
+
 org.apache.hadoop.hbase.filter.CompareFilter.compareRow(CompareFilter.CompareOp,
 ByteArrayComparable, Cell)
 Since 2.0.0. Will be 
removed in 3.0.0.
  Use CompareFilter.compareRow(CompareOperator,
 ByteArrayComparable, Cell)
 
 
-
+
 org.apache.hadoop.hbase.filter.CompareFilter.compareValue(CompareFilter.CompareOp,
 ByteArrayComparable, Cell)
 Since 2.0.0. Will be 
removed in 3.0.0.
  Use CompareFilter.compareValue(CompareOperator,
 ByteArrayComparable, Cell)
 
 
-
+
 org.apache.hadoop.hbase.HRegionInfo.convert(HBaseProtos.RegionInfo)
 As of release 2.0.0, this 
will be removed in HBase 3.0.0
  Use toRegionInfo(HBaseProtos.RegionInfo)
  in 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.
 
 
-
+
 org.apache.hadoop.hbase.HRegionInfo.convert(HRegionInfo)
 As of release 2.0.0, this 
will be removed in HBase 3.0.0
  Use toRegionInfo(org.apache.hadoop.hbase.client.RegionInfo)
  in 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.
 
 
-
+
 org.apache.hadoop.hbase.CellUtil.copyTagTo(Cell,
 byte[], int)
 As of HBase-2.0. Will be 
removed in HBase-3.0.
 
 
-
+
 org.apache.hadoop.hbase.CellUtil.copyTagTo(Cell,
 ByteBuffer, int)
 As of HBase-2.0. Will be 
removed in 3.0.
 
 
-
+
 org.apache.hadoop.hbase.mapreduce.CellCreator.create(byte[],
 int, int, byte[], int, int, byte[], int, int, long, byte[], int, int, 
String)
 
-
+
 org.apache.hadoop.hbase.CellUtil.createCell(byte[])
 As of release 2.0.0, this 
will be removed in HBase 3.0.0. Use CellBuilder
  instead
 
 
-
+
 org.apache.hadoop.hbase.CellUtil.createCell(byte[],
 byte[])
 As of release 2.0.0, this 
will be removed in HBase 3.0.0. Use CellBuilder
  instead
 
 
-
+
 org.apache.hadoop.hbase.CellUtil.createCell(byte[],
 byte[], byte[])
 As of release 2.0.0, this 
will be removed in HBase 3.0.0.
  Use CellBuilder 

[47/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/apidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html 
b/apidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
index 3af5a4d..8f33987 100644
--- a/apidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
+++ b/apidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":6,"i1":6,"i2":18,"i3":6,"i4":6,"i5":6,"i6":18,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":6,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":6,"i26":6,"i27":6,"i28":6,"i29":6,"i30":6,"i31":6,"i32":6,"i33":6,"i34":6,"i35":6,"i36":6,"i37":6,"i38":6,"i39":6,"i40":6,"i41":6,"i42":6,"i43":6,"i44":18,"i45":6,"i46":6,"i47":6,"i48":6,"i49":6,"i50":6,"i51":6,"i52":6,"i53":18,"i54":18,"i55":18,"i56":6,"i57":6,"i58":6,"i59":6,"i60":6,"i61":6,"i62":6,"i63":18,"i64":6,"i65":6,"i66":6,"i67":6,"i68":6,"i69":6,"i70":6,"i71":6,"i72":6,"i73":6,"i74":6,"i75":6,"i76":6,"i77":6,"i78":6,"i79":18,"i80":6,"i81":6,"i82":6,"i83":6,"i84":6,"i85":6,"i86":6,"i87":18,"i88":6,"i89":6,"i90":6,"i91":18,"i92":6,"i93":6,"i94":6,"i95":6,"i96":6,"i97":6,"i98":6,"i99":6,"i100":6,"i101":6,"i102":6,"i103":6,"i104":6,"i105":6,"i106":6,"i107":6,"i108":6,"i109":6,"i110":6,"i111":6,"i112":6,"i113":6,"i114":6,"i115":6,"i116":6,"i117":6,"i118":6,"i119":6,"i
 
120":6,"i121":6,"i122":6,"i123":18,"i124":18,"i125":6,"i126":6,"i127":6,"i128":6,"i129":6,"i130":6,"i131":6,"i132":6,"i133":6,"i134":6,"i135":6,"i136":6,"i137":6};
+var methods = 
{"i0":6,"i1":6,"i2":18,"i3":6,"i4":6,"i5":6,"i6":18,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":18,"i15":18,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":6,"i26":6,"i27":6,"i28":6,"i29":6,"i30":6,"i31":6,"i32":6,"i33":6,"i34":6,"i35":6,"i36":6,"i37":6,"i38":6,"i39":6,"i40":6,"i41":6,"i42":6,"i43":6,"i44":6,"i45":6,"i46":18,"i47":6,"i48":6,"i49":18,"i50":6,"i51":6,"i52":6,"i53":6,"i54":6,"i55":6,"i56":18,"i57":18,"i58":18,"i59":6,"i60":6,"i61":6,"i62":6,"i63":6,"i64":6,"i65":6,"i66":18,"i67":6,"i68":6,"i69":6,"i70":6,"i71":6,"i72":6,"i73":6,"i74":6,"i75":6,"i76":6,"i77":6,"i78":6,"i79":6,"i80":6,"i81":6,"i82":18,"i83":6,"i84":6,"i85":6,"i86":6,"i87":6,"i88":6,"i89":6,"i90":18,"i91":6,"i92":6,"i93":6,"i94":18,"i95":6,"i96":6,"i97":6,"i98":6,"i99":6,"i100":18,"i101":18,"i102":6,"i103":6,"i104":6,"i105":6,"i106":6,"i107":6,"i108":6,"i109":6,"i110":6,"i111":6,"i112":6,"i113":6,"i114":6,"i115":6,"i116":6,"i117":6,"i118":6,"i119"
 
:6,"i120":6,"i121":6,"i122":6,"i123":6,"i124":6,"i125":6,"i126":6,"i127":6,"i128":18,"i129":18,"i130":6,"i131":6,"i132":6,"i133":6,"i134":6,"i135":6,"i136":6,"i137":6,"i138":6,"i139":6,"i140":6,"i141":6,"i142":6};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"],16:["t5","Default Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -217,13 +217,13 @@ public interface 
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
+default http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
 compact(TableNametableName)
 Compact a table.
 
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
+default http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
 compact(TableNametableName,
byte[]columnFamily)
 Compact a column family within a table.
@@ -231,31 +231,46 @@ public interface 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
+compact(TableNametableName,
+   byte[]columnFamily,
+   CompactTypecompactType)
+Compact a column family within a table.
+
+
+

[34/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.html 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.html
index be4e92d..fdade1a 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.html
@@ -273,8 +273,9 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 private 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MultiRequest
-buildReq(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],AsyncBatchRpcRetryingCaller.RegionRequestactionsByRegion,
-http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListCellScannablecells)
+buildReq(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],AsyncBatchRpcRetryingCaller.RegionRequestactionsByRegion,
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListCellScannablecells,
+http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
 title="class or interface in java.lang">Integer,http://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
 title="class or interface in 
java.lang">IntegerrowMutationsIndexMap)
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureT
@@ -634,14 +635,15 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
  inttries)
 
 
-
+
 
 
 
 
 buildReq
 
privateorg.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MultiRequestbuildReq(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],AsyncBatchRpcRetryingCaller.RegionRequestactionsByRegion,
-   
  http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListCellScannablecells)
+   
  http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListCellScannablecells,
+   
  http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
 title="class or interface in java.lang">Integer,http://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
 title="class or interface in 
java.lang">IntegerrowMutationsIndexMap)

   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:
@@ -655,7 +657,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 onComplete
-privatevoidonComplete(Actionaction,
+privatevoidonComplete(Actionaction,
 AsyncBatchRpcRetryingCaller.RegionRequestregionReq,
 inttries,
 ServerNameserverName,
@@ -669,7 +671,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 onComplete
-privatevoidonComplete(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],AsyncBatchRpcRetryingCaller.RegionRequestactionsByRegion,
+privatevoidonComplete(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],AsyncBatchRpcRetryingCaller.RegionRequestactionsByRegion,
 inttries,
 ServerNameserverName,
 MultiResponseresp)
@@ -681,7 +683,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 send

[28/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
index 675c3af..4f79024 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
@@ -453,25 +453,27 @@ implements 
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
-compact(TableNametableName)
-Compact a table.
+private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
+compact(TableNametableName,
+   byte[]columnFamily,
+   booleanmajor,
+   CompactTypecompactType)
+Compact column family of a table, Asynchronous operation 
even if CompletableFuture.get()
 
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
-compact(TableNametableName,
-   byte[]columnFamily)
+compact(TableNametableName,
+   byte[]columnFamily,
+   CompactTypecompactType)
 Compact a column family within a table.
 
 
 
-private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
-compact(TableNametableName,
-   byte[]columnFamily,
-   booleanmajor,
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
+compact(TableNametableName,
CompactTypecompactType)
-Compact column family of a table, Asynchronous operation 
even if CompletableFuture.get()
+Compact a table.
 
 
 
@@ -711,7 +713,8 @@ implements 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureCompactionState
-getCompactionState(TableNametableName)
+getCompactionState(TableNametableName,
+  CompactTypecompactType)
 Get the current compaction state of a table.
 
 
@@ -1052,15 +1055,17 @@ implements 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
-majorCompact(TableNametableName)
-Major compact a table.
+majorCompact(TableNametableName,
+byte[]columnFamily,
+CompactTypecompactType)
+Major compact a column family within a table.
 
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
-majorCompact(TableNametableName,
-byte[]columnFamily)
-Major compact a column family within a table.
+majorCompact(TableNametableName,
+CompactTypecompactType)
+Major compact a table.
 
 
 
@@ -1374,7 +1379,7 @@ implements AsyncAdmin
-addReplicationPeer,
 balance,
 getBackupMasters,
 getMaster,
 getMasterCoprocessors,
 getMasterInfoPort,
 getRegionServers,
 listDeadServers,
 listTableDescriptors,
 listTableNames,
 snapshot,
 snapshot
+addReplicationPeer,
 balance,
 compact,
 compact,
 getBackupMasters,
 getCompactionState,
 getMaster,
 getMasterCoprocessors, getMasterInfoPort,
 getRegionServers,
 listDeadServers,
 listTableDescriptors,
 listTableNames,
 majorCompact,
 majorCompact,
 snapshot, snapshot
 
 
 
@@ -2218,43 +2223,46 @@ implements 
+
 
 
 
 
 compact
-publichttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in 

[10/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.EnableTableFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.EnableTableFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.EnableTableFuture.html
index d438f22..7c59e27 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.EnableTableFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.EnableTableFuture.html
@@ -1290,8 +1290,8 @@
 1282   CompactType 
compactType) throws IOException {
 1283switch (compactType) {
 1284  case MOB:
-1285
compact(this.connection.getAdminForMaster(), getMobRegionInfo(tableName), 
major,
-1286  columnFamily);
+1285
compact(this.connection.getAdminForMaster(), 
RegionInfo.createMobRegionInfo(tableName),
+1286major, columnFamily);
 1287break;
 1288  case NORMAL:
 1289checkTableExists(tableName);
@@ -3248,7 +3248,7 @@
 3240  new 
CallableAdminProtos.GetRegionInfoResponse.CompactionState() {
 3241@Override
 3242public 
AdminProtos.GetRegionInfoResponse.CompactionState call() throws Exception {
-3243  RegionInfo info = 
getMobRegionInfo(tableName);
+3243  RegionInfo info = 
RegionInfo.createMobRegionInfo(tableName);
 3244  GetRegionInfoRequest 
request =
 3245
RequestConverter.buildGetRegionInfoRequest(info.getRegionName(), true);
 3246  GetRegionInfoResponse 
response = masterAdmin.getRegionInfo(rpcController, request);
@@ -3312,7 +3312,7 @@
 3304}
 3305break;
 3306  default:
-3307throw new 
IllegalArgumentException("Unknowne compactType: " + compactType);
+3307throw new 
IllegalArgumentException("Unknown compactType: " + compactType);
 3308}
 3309if (state != null) {
 3310  return 
ProtobufUtil.createCompactionState(state);
@@ -3847,325 +3847,320 @@
 3839});
 3840  }
 3841
-3842  private RegionInfo 
getMobRegionInfo(TableName tableName) {
-3843return 
RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes(".mob")).setRegionId(0)
-3844.build();
-3845  }
-3846
-3847  private RpcControllerFactory 
getRpcControllerFactory() {
-3848return this.rpcControllerFactory;
-3849  }
-3850
-3851  @Override
-3852  public void addReplicationPeer(String 
peerId, ReplicationPeerConfig peerConfig, boolean enabled)
-3853  throws IOException {
-3854executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3855  @Override
-3856  protected Void rpcCall() throws 
Exception {
-3857
master.addReplicationPeer(getRpcController(),
-3858  
RequestConverter.buildAddReplicationPeerRequest(peerId, peerConfig, 
enabled));
-3859return null;
-3860  }
-3861});
-3862  }
-3863
-3864  @Override
-3865  public void 
removeReplicationPeer(String peerId) throws IOException {
-3866executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3867  @Override
-3868  protected Void rpcCall() throws 
Exception {
-3869
master.removeReplicationPeer(getRpcController(),
-3870  
RequestConverter.buildRemoveReplicationPeerRequest(peerId));
-3871return null;
-3872  }
-3873});
-3874  }
-3875
-3876  @Override
-3877  public void 
enableReplicationPeer(final String peerId) throws IOException {
-3878executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3879  @Override
-3880  protected Void rpcCall() throws 
Exception {
-3881
master.enableReplicationPeer(getRpcController(),
-3882  
RequestConverter.buildEnableReplicationPeerRequest(peerId));
-3883return null;
-3884  }
-3885});
-3886  }
-3887
-3888  @Override
-3889  public void 
disableReplicationPeer(final String peerId) throws IOException {
-3890executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3891  @Override
-3892  protected Void rpcCall() throws 
Exception {
-3893
master.disableReplicationPeer(getRpcController(),
-3894  
RequestConverter.buildDisableReplicationPeerRequest(peerId));
-3895return null;
-3896  }
-3897});
-3898  }
-3899
-3900  @Override
-3901  public ReplicationPeerConfig 
getReplicationPeerConfig(final String peerId) throws IOException {
-3902return executeCallable(new 
MasterCallableReplicationPeerConfig(getConnection(),
-3903getRpcControllerFactory()) {
-3904  @Override
-3905  protected ReplicationPeerConfig 
rpcCall() throws Exception {
-3906GetReplicationPeerConfigResponse 
response = master.getReplicationPeerConfig(
-3907  getRpcController(), 

[19/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.ServerRequest.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.ServerRequest.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.ServerRequest.html
index 8ba8dc9..f973938 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.ServerRequest.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.ServerRequest.html
@@ -37,36 +37,36 @@
 029import java.io.IOException;
 030import java.util.ArrayList;
 031import java.util.Collections;
-032import java.util.IdentityHashMap;
-033import java.util.List;
-034import java.util.Map;
-035import java.util.Optional;
-036import 
java.util.concurrent.CompletableFuture;
-037import 
java.util.concurrent.ConcurrentHashMap;
-038import 
java.util.concurrent.ConcurrentLinkedQueue;
-039import 
java.util.concurrent.ConcurrentMap;
-040import 
java.util.concurrent.ConcurrentSkipListMap;
-041import java.util.concurrent.TimeUnit;
-042import java.util.function.Supplier;
-043import java.util.stream.Collectors;
-044import java.util.stream.Stream;
-045
-046import org.apache.commons.logging.Log;
-047import 
org.apache.commons.logging.LogFactory;
-048import 
org.apache.hadoop.hbase.CellScannable;
-049import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-050import 
org.apache.hadoop.hbase.HRegionLocation;
-051import 
org.apache.hadoop.hbase.ServerName;
-052import 
org.apache.hadoop.hbase.TableName;
-053import 
org.apache.yetus.audience.InterfaceAudience;
-054import 
org.apache.hadoop.hbase.client.MultiResponse.RegionResult;
-055import 
org.apache.hadoop.hbase.client.RetriesExhaustedException.ThrowableWithExtraContext;
-056import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-057import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-058import 
org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
-059import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-060import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
-061import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
+032import java.util.HashMap;
+033import java.util.IdentityHashMap;
+034import java.util.List;
+035import java.util.Map;
+036import java.util.Optional;
+037import 
java.util.concurrent.CompletableFuture;
+038import 
java.util.concurrent.ConcurrentHashMap;
+039import 
java.util.concurrent.ConcurrentLinkedQueue;
+040import 
java.util.concurrent.ConcurrentMap;
+041import 
java.util.concurrent.ConcurrentSkipListMap;
+042import java.util.concurrent.TimeUnit;
+043import java.util.function.Supplier;
+044import java.util.stream.Collectors;
+045import java.util.stream.Stream;
+046
+047import org.apache.commons.logging.Log;
+048import 
org.apache.commons.logging.LogFactory;
+049import 
org.apache.hadoop.hbase.CellScannable;
+050import 
org.apache.hadoop.hbase.DoNotRetryIOException;
+051import 
org.apache.hadoop.hbase.HRegionLocation;
+052import 
org.apache.hadoop.hbase.ServerName;
+053import 
org.apache.hadoop.hbase.TableName;
+054import 
org.apache.yetus.audience.InterfaceAudience;
+055import 
org.apache.hadoop.hbase.client.MultiResponse.RegionResult;
+056import 
org.apache.hadoop.hbase.client.RetriesExhaustedException.ThrowableWithExtraContext;
+057import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
+058import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
+059import 
org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
+060import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
+061import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
 062import 
org.apache.hadoop.hbase.util.Bytes;
 063import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 064
@@ -240,212 +240,208 @@
 232  }
 233
 234  private ClientProtos.MultiRequest 
buildReq(Mapbyte[], RegionRequest actionsByRegion,
-235  ListCellScannable cells) 
throws IOException {
+235  ListCellScannable cells, 
MapInteger, Integer rowMutationsIndexMap) throws IOException {
 236ClientProtos.MultiRequest.Builder 
multiRequestBuilder = ClientProtos.MultiRequest.newBuilder();
 237ClientProtos.RegionAction.Builder 
regionActionBuilder = ClientProtos.RegionAction.newBuilder();
 238ClientProtos.Action.Builder 
actionBuilder = ClientProtos.Action.newBuilder();
 239ClientProtos.MutationProto.Builder 
mutationBuilder = ClientProtos.MutationProto.newBuilder();
 240for (Map.Entrybyte[], 
RegionRequest entry : actionsByRegion.entrySet()) {
-241  // TODO: remove the extra for loop 
as we will iterate it in mutationBuilder.
-242  if 
(!multiRequestBuilder.hasNonceGroup()) {
-243for (Action action : 

[31/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.html 
b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.html
index 2f81057..d5bcd27 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":9,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":42,"i27":42,"i28":42,"i29":42,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":42,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":9,"i80":10,"i81":10,"i82":9,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":41,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":42,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109
 
":10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":9,"i116":10,"i117":10,"i118":10,"i119":42,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":10,"i149":10,"i150":10,"i151":10,"i152":10,"i153":10,"i154":10,"i155":10,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":10,"i167":10,"i168":10,"i169":10,"i170":10,"i171":42,"i172":10,"i173":10,"i174":10,"i175":10,"i176":10,"i177":10,"i178":10,"i179":10,"i180":10,"i181":10,"i182":10,"i183":10,"i184":10,"i185":10,"i186":10,"i187":10,"i188":10,"i189":10,"i190":10,"i191":10,"i192":10,"i193":10,"i194":10,"i195":10,"i196":10,"i197":42,"i198":10,"i199":10,"i200":10,"i201":10,"i202":10,"i203":10,"i204":10,"i205":10,"i206":10,"i207":10,"i208":10,"i2
 
09":10,"i210":10,"i211":10,"i212":10,"i213":10,"i214":10,"i215":10,"i216":10,"i217":10,"i218":10,"i219":10,"i220":10,"i221":10,"i222":10,"i223":10,"i224":10,"i225":10,"i226":10,"i227":10,"i228":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":9,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":42,"i27":42,"i28":42,"i29":42,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":42,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":9,"i80":10,"i81":10,"i82":9,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":41,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":42,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109
 
":10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":9,"i115":10,"i116":10,"i117":10,"i118":42,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":10,"i149":10,"i150":10,"i151":10,"i152":10,"i153":10,"i154":10,"i155":10,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":10,"i167":10,"i168":10,"i169":10,"i170":42,"i171":10,"i172":10,"i173":10,"i174":10,"i175":10,"i176":10,"i177":10,"i178":10,"i179":10,"i180":10,"i181":10,"i182":10,"i183":10,"i184":10,"i185":10,"i186":10,"i187":10,"i188":10,"i189":10,"i190":10,"i191":10,"i192":10,"i193":10,"i194":10,"i195":10,"i196":42,"i197":10,"i198":10,"i199":10,"i200":10,"i201":10,"i202":10,"i203":10,"i204":10,"i205":10,"i206":10,"i207":10,"i208":10,"i2
 
09":10,"i210":10,"i211":10,"i212":10,"i213":10,"i214":10,"i215":10,"i216":10,"i217":10,"i218":10,"i219":10,"i220":10,"i221":10,"i222":10,"i223":10,"i224":10,"i225":10,"i226":10,"i227":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 

[02/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.SplitTableRegionFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.SplitTableRegionFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.SplitTableRegionFuture.html
index d438f22..7c59e27 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.SplitTableRegionFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.SplitTableRegionFuture.html
@@ -1290,8 +1290,8 @@
 1282   CompactType 
compactType) throws IOException {
 1283switch (compactType) {
 1284  case MOB:
-1285
compact(this.connection.getAdminForMaster(), getMobRegionInfo(tableName), 
major,
-1286  columnFamily);
+1285
compact(this.connection.getAdminForMaster(), 
RegionInfo.createMobRegionInfo(tableName),
+1286major, columnFamily);
 1287break;
 1288  case NORMAL:
 1289checkTableExists(tableName);
@@ -3248,7 +3248,7 @@
 3240  new 
CallableAdminProtos.GetRegionInfoResponse.CompactionState() {
 3241@Override
 3242public 
AdminProtos.GetRegionInfoResponse.CompactionState call() throws Exception {
-3243  RegionInfo info = 
getMobRegionInfo(tableName);
+3243  RegionInfo info = 
RegionInfo.createMobRegionInfo(tableName);
 3244  GetRegionInfoRequest 
request =
 3245
RequestConverter.buildGetRegionInfoRequest(info.getRegionName(), true);
 3246  GetRegionInfoResponse 
response = masterAdmin.getRegionInfo(rpcController, request);
@@ -3312,7 +3312,7 @@
 3304}
 3305break;
 3306  default:
-3307throw new 
IllegalArgumentException("Unknowne compactType: " + compactType);
+3307throw new 
IllegalArgumentException("Unknown compactType: " + compactType);
 3308}
 3309if (state != null) {
 3310  return 
ProtobufUtil.createCompactionState(state);
@@ -3847,325 +3847,320 @@
 3839});
 3840  }
 3841
-3842  private RegionInfo 
getMobRegionInfo(TableName tableName) {
-3843return 
RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes(".mob")).setRegionId(0)
-3844.build();
-3845  }
-3846
-3847  private RpcControllerFactory 
getRpcControllerFactory() {
-3848return this.rpcControllerFactory;
-3849  }
-3850
-3851  @Override
-3852  public void addReplicationPeer(String 
peerId, ReplicationPeerConfig peerConfig, boolean enabled)
-3853  throws IOException {
-3854executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3855  @Override
-3856  protected Void rpcCall() throws 
Exception {
-3857
master.addReplicationPeer(getRpcController(),
-3858  
RequestConverter.buildAddReplicationPeerRequest(peerId, peerConfig, 
enabled));
-3859return null;
-3860  }
-3861});
-3862  }
-3863
-3864  @Override
-3865  public void 
removeReplicationPeer(String peerId) throws IOException {
-3866executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3867  @Override
-3868  protected Void rpcCall() throws 
Exception {
-3869
master.removeReplicationPeer(getRpcController(),
-3870  
RequestConverter.buildRemoveReplicationPeerRequest(peerId));
-3871return null;
-3872  }
-3873});
-3874  }
-3875
-3876  @Override
-3877  public void 
enableReplicationPeer(final String peerId) throws IOException {
-3878executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3879  @Override
-3880  protected Void rpcCall() throws 
Exception {
-3881
master.enableReplicationPeer(getRpcController(),
-3882  
RequestConverter.buildEnableReplicationPeerRequest(peerId));
-3883return null;
-3884  }
-3885});
-3886  }
-3887
-3888  @Override
-3889  public void 
disableReplicationPeer(final String peerId) throws IOException {
-3890executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3891  @Override
-3892  protected Void rpcCall() throws 
Exception {
-3893
master.disableReplicationPeer(getRpcController(),
-3894  
RequestConverter.buildDisableReplicationPeerRequest(peerId));
-3895return null;
-3896  }
-3897});
-3898  }
-3899
-3900  @Override
-3901  public ReplicationPeerConfig 
getReplicationPeerConfig(final String peerId) throws IOException {
-3902return executeCallable(new 
MasterCallableReplicationPeerConfig(getConnection(),
-3903getRpcControllerFactory()) {
-3904  @Override
-3905  protected ReplicationPeerConfig 
rpcCall() throws Exception {
-3906GetReplicationPeerConfigResponse 
response = master.getReplicationPeerConfig(
-3907  getRpcController(), 

[37/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
index b34f1b9..6d59a2e 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
@@ -2710,62 +2710,44 @@ service.
   TableNametableName)
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
-AsyncHBaseAdmin.compact(TableNametableName)
-
-
 void
 Admin.compact(TableNametableName)
 Compact a table.
 
 
-
+
 void
 HBaseAdmin.compact(TableNametableName)
 Compact a table.
 
 
-
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
+
+default http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
 AsyncAdmin.compact(TableNametableName)
 Compact a table.
 
 
-
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
-RawAsyncHBaseAdmin.compact(TableNametableName)
-
 
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
-AsyncHBaseAdmin.compact(TableNametableName,
-   byte[]columnFamily)
-
-
 void
 Admin.compact(TableNametableName,
byte[]columnFamily)
 Compact a column family within a table.
 
 
-
+
 void
 HBaseAdmin.compact(TableNametableName,
byte[]columnFamily)
 Compact a column family within a table.
 
 
-
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
+
+default http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
 AsyncAdmin.compact(TableNametableName,
byte[]columnFamily)
 Compact a column family within a table.
 
 
-
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
-RawAsyncHBaseAdmin.compact(TableNametableName,
-   byte[]columnFamily)
-
 
 private void
 HBaseAdmin.compact(TableNametableName,
@@ -2785,6 +2767,12 @@ service.
 
 
 
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
+AsyncHBaseAdmin.compact(TableNametableName,
+   byte[]columnFamily,
+   CompactTypecompactType)
+
+
 void
 Admin.compact(TableNametableName,
byte[]columnFamily,
@@ -2792,7 +2780,7 @@ service.
 Compact a column family within a table.
 
 
-
+
 void
 HBaseAdmin.compact(TableNametableName,
byte[]columnFamily,
@@ -2800,6 +2788,25 @@ service.
 Compact a column family within a table.
 
 
+
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
+AsyncAdmin.compact(TableNametableName,
+   byte[]columnFamily,
+   CompactTypecompactType)
+Compact a 

[42/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CoprocessorCallback.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CoprocessorCallback.html
 
b/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CoprocessorCallback.html
index c2c122a..7cece5c 100644
--- 
a/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CoprocessorCallback.html
+++ 
b/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CoprocessorCallback.html
@@ -439,11 +439,11 @@
 431  }
 432
 433  /**
-434   * Method that does a batch call on 
Deletes, Gets, Puts, Increments and Appends. The ordering of
-435   * execution of the actions is not 
defined. Meaning if you do a Put and a Get in the same
-436   * {@link #batch} call, you will not 
necessarily be guaranteed that the Get returns what the Put
-437   * had put.
-438   * @param actions list of Get, Put, 
Delete, Increment, Append objects
+434   * Method that does a batch call on 
Deletes, Gets, Puts, Increments, Appends and RowMutations. The
+435   * ordering of execution of the actions 
is not defined. Meaning if you do a Put and a Get in the
+436   * same {@link #batch} call, you will 
not necessarily be guaranteed that the Get returns what the
+437   * Put had put.
+438   * @param actions list of Get, Put, 
Delete, Increment, Append, and RowMutations objects
 439   * @return A list of {@link 
CompletableFuture}s that represent the result for each action.
 440   */
 441  T 
ListCompletableFutureT batch(List? extends Row 
actions);
@@ -451,7 +451,7 @@
 443  /**
 444   * A simple version of batch. It will 
fail if there are any failures and you will get the whole
 445   * result list at once if the operation 
is succeeded.
-446   * @param actions list of Get, Put, 
Delete, Increment, Append objects
+446   * @param actions list of Get, Put, 
Delete, Increment, Append and RowMutations objects
 447   * @return A list of the result for the 
actions. Wrapped by a {@link CompletableFuture}.
 448   */
 449  default T 
CompletableFutureListT batchAll(List? extends Row 
actions) {

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.html 
b/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.html
index c2c122a..7cece5c 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.html
@@ -439,11 +439,11 @@
 431  }
 432
 433  /**
-434   * Method that does a batch call on 
Deletes, Gets, Puts, Increments and Appends. The ordering of
-435   * execution of the actions is not 
defined. Meaning if you do a Put and a Get in the same
-436   * {@link #batch} call, you will not 
necessarily be guaranteed that the Get returns what the Put
-437   * had put.
-438   * @param actions list of Get, Put, 
Delete, Increment, Append objects
+434   * Method that does a batch call on 
Deletes, Gets, Puts, Increments, Appends and RowMutations. The
+435   * ordering of execution of the actions 
is not defined. Meaning if you do a Put and a Get in the
+436   * same {@link #batch} call, you will 
not necessarily be guaranteed that the Get returns what the
+437   * Put had put.
+438   * @param actions list of Get, Put, 
Delete, Increment, Append, and RowMutations objects
 439   * @return A list of {@link 
CompletableFuture}s that represent the result for each action.
 440   */
 441  T 
ListCompletableFutureT batch(List? extends Row 
actions);
@@ -451,7 +451,7 @@
 443  /**
 444   * A simple version of batch. It will 
fail if there are any failures and you will get the whole
 445   * result list at once if the operation 
is succeeded.
-446   * @param actions list of Get, Put, 
Delete, Increment, Append objects
+446   * @param actions list of Get, Put, 
Delete, Increment, Append and RowMutations objects
 447   * @return A list of the result for the 
actions. Wrapped by a {@link CompletableFuture}.
 448   */
 449  default T 
CompletableFutureListT batchAll(List? extends Row 
actions) {

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/apidocs/src-html/org/apache/hadoop/hbase/client/RegionInfo.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/RegionInfo.html 
b/apidocs/src-html/org/apache/hadoop/hbase/client/RegionInfo.html
index 757cb15..f5f7342 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/client/RegionInfo.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/client/RegionInfo.html
@@ -581,173 +581,184 @@
 573  }
 574
 575  /**
-576   * Separate elements of a regionName.
-577   * @param regionName
-578   * @return Array of 

[11/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DisableTableFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DisableTableFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DisableTableFuture.html
index d438f22..7c59e27 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DisableTableFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DisableTableFuture.html
@@ -1290,8 +1290,8 @@
 1282   CompactType 
compactType) throws IOException {
 1283switch (compactType) {
 1284  case MOB:
-1285
compact(this.connection.getAdminForMaster(), getMobRegionInfo(tableName), 
major,
-1286  columnFamily);
+1285
compact(this.connection.getAdminForMaster(), 
RegionInfo.createMobRegionInfo(tableName),
+1286major, columnFamily);
 1287break;
 1288  case NORMAL:
 1289checkTableExists(tableName);
@@ -3248,7 +3248,7 @@
 3240  new 
CallableAdminProtos.GetRegionInfoResponse.CompactionState() {
 3241@Override
 3242public 
AdminProtos.GetRegionInfoResponse.CompactionState call() throws Exception {
-3243  RegionInfo info = 
getMobRegionInfo(tableName);
+3243  RegionInfo info = 
RegionInfo.createMobRegionInfo(tableName);
 3244  GetRegionInfoRequest 
request =
 3245
RequestConverter.buildGetRegionInfoRequest(info.getRegionName(), true);
 3246  GetRegionInfoResponse 
response = masterAdmin.getRegionInfo(rpcController, request);
@@ -3312,7 +3312,7 @@
 3304}
 3305break;
 3306  default:
-3307throw new 
IllegalArgumentException("Unknowne compactType: " + compactType);
+3307throw new 
IllegalArgumentException("Unknown compactType: " + compactType);
 3308}
 3309if (state != null) {
 3310  return 
ProtobufUtil.createCompactionState(state);
@@ -3847,325 +3847,320 @@
 3839});
 3840  }
 3841
-3842  private RegionInfo 
getMobRegionInfo(TableName tableName) {
-3843return 
RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes(".mob")).setRegionId(0)
-3844.build();
-3845  }
-3846
-3847  private RpcControllerFactory 
getRpcControllerFactory() {
-3848return this.rpcControllerFactory;
-3849  }
-3850
-3851  @Override
-3852  public void addReplicationPeer(String 
peerId, ReplicationPeerConfig peerConfig, boolean enabled)
-3853  throws IOException {
-3854executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3855  @Override
-3856  protected Void rpcCall() throws 
Exception {
-3857
master.addReplicationPeer(getRpcController(),
-3858  
RequestConverter.buildAddReplicationPeerRequest(peerId, peerConfig, 
enabled));
-3859return null;
-3860  }
-3861});
-3862  }
-3863
-3864  @Override
-3865  public void 
removeReplicationPeer(String peerId) throws IOException {
-3866executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3867  @Override
-3868  protected Void rpcCall() throws 
Exception {
-3869
master.removeReplicationPeer(getRpcController(),
-3870  
RequestConverter.buildRemoveReplicationPeerRequest(peerId));
-3871return null;
-3872  }
-3873});
-3874  }
-3875
-3876  @Override
-3877  public void 
enableReplicationPeer(final String peerId) throws IOException {
-3878executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3879  @Override
-3880  protected Void rpcCall() throws 
Exception {
-3881
master.enableReplicationPeer(getRpcController(),
-3882  
RequestConverter.buildEnableReplicationPeerRequest(peerId));
-3883return null;
-3884  }
-3885});
-3886  }
-3887
-3888  @Override
-3889  public void 
disableReplicationPeer(final String peerId) throws IOException {
-3890executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3891  @Override
-3892  protected Void rpcCall() throws 
Exception {
-3893
master.disableReplicationPeer(getRpcController(),
-3894  
RequestConverter.buildDisableReplicationPeerRequest(peerId));
-3895return null;
-3896  }
-3897});
-3898  }
-3899
-3900  @Override
-3901  public ReplicationPeerConfig 
getReplicationPeerConfig(final String peerId) throws IOException {
-3902return executeCallable(new 
MasterCallableReplicationPeerConfig(getConnection(),
-3903getRpcControllerFactory()) {
-3904  @Override
-3905  protected ReplicationPeerConfig 
rpcCall() throws Exception {
-3906GetReplicationPeerConfigResponse 
response = master.getReplicationPeerConfig(
-3907  getRpcController(), 

[23/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.html 
b/devapidocs/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.html
index 741fc75..a0a7ebb 100644
--- a/devapidocs/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.html
+++ b/devapidocs/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.html
@@ -115,7 +115,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class TBoundedThreadPoolServer
+public class TBoundedThreadPoolServer
 extends org.apache.thrift.server.TServer
 A bounded thread pool server customized for HBase.
 
@@ -332,7 +332,7 @@ extends org.apache.thrift.server.TServer
 
 
 QUEUE_FULL_MSG
-private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String QUEUE_FULL_MSG
+private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String QUEUE_FULL_MSG
 
 See Also:
 Constant
 Field Values
@@ -345,7 +345,7 @@ extends org.apache.thrift.server.TServer
 
 
 MIN_WORKER_THREADS_CONF_KEY
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String MIN_WORKER_THREADS_CONF_KEY
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String MIN_WORKER_THREADS_CONF_KEY
 The "core size" of the thread pool. New threads are created 
on every
  connection until this many threads are created.
 
@@ -360,7 +360,7 @@ extends org.apache.thrift.server.TServer
 
 
 DEFAULT_MIN_WORKER_THREADS
-public static finalint DEFAULT_MIN_WORKER_THREADS
+public static finalint DEFAULT_MIN_WORKER_THREADS
 This default core pool size should be enough for many test 
scenarios. We
  want to override this with a much larger number (e.g. at least 200) for a
  large-scale production setup.
@@ -376,7 +376,7 @@ extends org.apache.thrift.server.TServer
 
 
 MAX_WORKER_THREADS_CONF_KEY
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String MAX_WORKER_THREADS_CONF_KEY
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String MAX_WORKER_THREADS_CONF_KEY
 The maximum size of the thread pool. When the pending 
request queue
  overflows, new threads are created until their number reaches this number.
  After that, the server starts dropping connections.
@@ -392,7 +392,7 @@ extends org.apache.thrift.server.TServer
 
 
 DEFAULT_MAX_WORKER_THREADS
-public static finalint DEFAULT_MAX_WORKER_THREADS
+public static finalint DEFAULT_MAX_WORKER_THREADS
 
 See Also:
 Constant
 Field Values
@@ -405,7 +405,7 @@ extends org.apache.thrift.server.TServer
 
 
 MAX_QUEUED_REQUESTS_CONF_KEY
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String MAX_QUEUED_REQUESTS_CONF_KEY
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String MAX_QUEUED_REQUESTS_CONF_KEY
 The maximum number of pending connections waiting in the 
queue. If there
  are no idle threads in the pool, the server queues requests. Only when
  the queue overflows, new threads are added, up to
@@ -422,7 +422,7 @@ extends org.apache.thrift.server.TServer
 
 
 DEFAULT_MAX_QUEUED_REQUESTS
-public static finalint DEFAULT_MAX_QUEUED_REQUESTS
+public static finalint DEFAULT_MAX_QUEUED_REQUESTS
 
 See Also:
 Constant
 Field Values
@@ -435,7 +435,7 @@ extends org.apache.thrift.server.TServer
 
 
 THREAD_KEEP_ALIVE_TIME_SEC_CONF_KEY
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String THREAD_KEEP_ALIVE_TIME_SEC_CONF_KEY
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String THREAD_KEEP_ALIVE_TIME_SEC_CONF_KEY
 Default amount of time in seconds to keep a thread alive. 
Worker threads
  are stopped after being idle for this long.
 
@@ -450,7 +450,7 @@ extends org.apache.thrift.server.TServer
 
 
 DEFAULT_THREAD_KEEP_ALIVE_TIME_SEC
-private static finalint DEFAULT_THREAD_KEEP_ALIVE_TIME_SEC
+private static finalint DEFAULT_THREAD_KEEP_ALIVE_TIME_SEC
 
 See Also:
 Constant
 Field Values
@@ -463,7 +463,7 @@ extends org.apache.thrift.server.TServer
 
 
 TIME_TO_WAIT_AFTER_SHUTDOWN_MS
-public static finalint TIME_TO_WAIT_AFTER_SHUTDOWN_MS
+public 

[13/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteColumnFamilyFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteColumnFamilyFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteColumnFamilyFuture.html
index d438f22..7c59e27 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteColumnFamilyFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteColumnFamilyFuture.html
@@ -1290,8 +1290,8 @@
 1282   CompactType 
compactType) throws IOException {
 1283switch (compactType) {
 1284  case MOB:
-1285
compact(this.connection.getAdminForMaster(), getMobRegionInfo(tableName), 
major,
-1286  columnFamily);
+1285
compact(this.connection.getAdminForMaster(), 
RegionInfo.createMobRegionInfo(tableName),
+1286major, columnFamily);
 1287break;
 1288  case NORMAL:
 1289checkTableExists(tableName);
@@ -3248,7 +3248,7 @@
 3240  new 
CallableAdminProtos.GetRegionInfoResponse.CompactionState() {
 3241@Override
 3242public 
AdminProtos.GetRegionInfoResponse.CompactionState call() throws Exception {
-3243  RegionInfo info = 
getMobRegionInfo(tableName);
+3243  RegionInfo info = 
RegionInfo.createMobRegionInfo(tableName);
 3244  GetRegionInfoRequest 
request =
 3245
RequestConverter.buildGetRegionInfoRequest(info.getRegionName(), true);
 3246  GetRegionInfoResponse 
response = masterAdmin.getRegionInfo(rpcController, request);
@@ -3312,7 +3312,7 @@
 3304}
 3305break;
 3306  default:
-3307throw new 
IllegalArgumentException("Unknowne compactType: " + compactType);
+3307throw new 
IllegalArgumentException("Unknown compactType: " + compactType);
 3308}
 3309if (state != null) {
 3310  return 
ProtobufUtil.createCompactionState(state);
@@ -3847,325 +3847,320 @@
 3839});
 3840  }
 3841
-3842  private RegionInfo 
getMobRegionInfo(TableName tableName) {
-3843return 
RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes(".mob")).setRegionId(0)
-3844.build();
-3845  }
-3846
-3847  private RpcControllerFactory 
getRpcControllerFactory() {
-3848return this.rpcControllerFactory;
-3849  }
-3850
-3851  @Override
-3852  public void addReplicationPeer(String 
peerId, ReplicationPeerConfig peerConfig, boolean enabled)
-3853  throws IOException {
-3854executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3855  @Override
-3856  protected Void rpcCall() throws 
Exception {
-3857
master.addReplicationPeer(getRpcController(),
-3858  
RequestConverter.buildAddReplicationPeerRequest(peerId, peerConfig, 
enabled));
-3859return null;
-3860  }
-3861});
-3862  }
-3863
-3864  @Override
-3865  public void 
removeReplicationPeer(String peerId) throws IOException {
-3866executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3867  @Override
-3868  protected Void rpcCall() throws 
Exception {
-3869
master.removeReplicationPeer(getRpcController(),
-3870  
RequestConverter.buildRemoveReplicationPeerRequest(peerId));
-3871return null;
-3872  }
-3873});
-3874  }
-3875
-3876  @Override
-3877  public void 
enableReplicationPeer(final String peerId) throws IOException {
-3878executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3879  @Override
-3880  protected Void rpcCall() throws 
Exception {
-3881
master.enableReplicationPeer(getRpcController(),
-3882  
RequestConverter.buildEnableReplicationPeerRequest(peerId));
-3883return null;
-3884  }
-3885});
-3886  }
-3887
-3888  @Override
-3889  public void 
disableReplicationPeer(final String peerId) throws IOException {
-3890executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3891  @Override
-3892  protected Void rpcCall() throws 
Exception {
-3893
master.disableReplicationPeer(getRpcController(),
-3894  
RequestConverter.buildDisableReplicationPeerRequest(peerId));
-3895return null;
-3896  }
-3897});
-3898  }
-3899
-3900  @Override
-3901  public ReplicationPeerConfig 
getReplicationPeerConfig(final String peerId) throws IOException {
-3902return executeCallable(new 
MasterCallableReplicationPeerConfig(getConnection(),
-3903getRpcControllerFactory()) {
-3904  @Override
-3905  protected ReplicationPeerConfig 
rpcCall() throws Exception {
-3906GetReplicationPeerConfigResponse 
response = master.getReplicationPeerConfig(
-3907  getRpcController(), 

[41/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/apidocs/src-html/org/apache/hadoop/hbase/rest/client/RemoteHTable.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/rest/client/RemoteHTable.html 
b/apidocs/src-html/org/apache/hadoop/hbase/rest/client/RemoteHTable.html
index 5e56349..92bdee3 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/rest/client/RemoteHTable.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/rest/client/RemoteHTable.html
@@ -259,680 +259,689 @@
 251  }
 252
 253  @Override
-254  public HTableDescriptor 
getTableDescriptor() throws IOException {
-255StringBuilder sb = new 
StringBuilder();
-256sb.append('/');
-257sb.append(Bytes.toString(name));
-258sb.append('/');
-259sb.append("schema");
-260for (int i = 0; i  maxRetries; 
i++) {
-261  Response response = 
client.get(sb.toString(), Constants.MIMETYPE_PROTOBUF);
-262  int code = response.getCode();
-263  switch (code) {
-264  case 200:
-265TableSchemaModel schema = new 
TableSchemaModel();
-266
schema.getObjectFromMessage(response.getBody());
-267return 
schema.getTableDescriptor();
-268  case 509:
-269try {
-270  Thread.sleep(sleepTime);
-271} catch (InterruptedException e) 
{
-272  throw 
(InterruptedIOException)new InterruptedIOException().initCause(e);
-273}
-274break;
-275  default:
-276throw new IOException("schema 
request returned " + code);
-277  }
-278}
-279throw new IOException("schema request 
timed out");
-280  }
-281
-282  @Override
-283  public void close() throws IOException 
{
-284client.shutdown();
-285  }
-286
-287  @Override
-288  public Result get(Get get) throws 
IOException {
-289TimeRange range = 
get.getTimeRange();
-290String spec = 
buildRowSpec(get.getRow(), get.getFamilyMap(),
-291  range.getMin(), range.getMax(), 
get.getMaxVersions());
-292if (get.getFilter() != null) {
-293  LOG.warn("filters not supported on 
gets");
-294}
-295Result[] results = 
getResults(spec);
-296if (results.length  0) {
-297  if (results.length  1) {
-298LOG.warn("too many results for 
get (" + results.length + ")");
-299  }
-300  return results[0];
-301} else {
-302  return new Result();
-303}
-304  }
-305
-306  @Override
-307  public Result[] get(ListGet 
gets) throws IOException {
-308byte[][] rows = new 
byte[gets.size()][];
-309int maxVersions = 1;
-310int count = 0;
-311
-312for(Get g:gets) {
-313
-314  if ( count == 0 ) {
-315maxVersions = 
g.getMaxVersions();
-316  } else if (g.getMaxVersions() != 
maxVersions) {
-317LOG.warn("MaxVersions on Gets do 
not match, using the first in the list ("+maxVersions+")");
-318  }
-319
-320  if (g.getFilter() != null) {
-321LOG.warn("filters not supported 
on gets");
-322  }
-323
-324  rows[count] = g.getRow();
-325  count ++;
-326}
-327
-328String spec = buildMultiRowSpec(rows, 
maxVersions);
-329
-330return getResults(spec);
-331  }
-332
-333  private Result[] getResults(String 
spec) throws IOException {
-334for (int i = 0; i  maxRetries; 
i++) {
-335  Response response = 
client.get(spec, Constants.MIMETYPE_PROTOBUF);
-336  int code = response.getCode();
-337  switch (code) {
-338case 200:
-339  CellSetModel model = new 
CellSetModel();
-340  
model.getObjectFromMessage(response.getBody());
-341  Result[] results = 
buildResultFromModel(model);
-342  if ( results.length  0) {
-343return results;
-344  }
-345  // fall through
-346case 404:
-347  return new Result[0];
-348
-349case 509:
-350  try {
-351Thread.sleep(sleepTime);
-352  } catch (InterruptedException 
e) {
-353throw 
(InterruptedIOException)new InterruptedIOException().initCause(e);
-354  }
-355  break;
-356default:
-357  throw new IOException("get 
request returned " + code);
-358  }
-359}
-360throw new IOException("get request 
timed out");
-361  }
-362
-363  @Override
-364  public boolean exists(Get get) throws 
IOException {
-365LOG.warn("exists() is really get(), 
just use get()");
-366Result result = get(get);
-367return (result != null  
!(result.isEmpty()));
-368  }
-369
-370  @Override
-371  public boolean[] exists(ListGet 
gets) throws IOException {
-372LOG.warn("exists(ListGet) is 
really list of get() calls, just use get()");
-373boolean[] results = new 
boolean[gets.size()];
-374for (int i = 0; i  
results.length; i++) {
-375  results[i] = exists(gets.get(i));
-376}
-377return results;
-378  }
-379
-380  @Override
-381  public void put(Put put) throws 
IOException {
-382CellSetModel model = 

[08/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
index d438f22..7c59e27 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
@@ -1290,8 +1290,8 @@
 1282   CompactType 
compactType) throws IOException {
 1283switch (compactType) {
 1284  case MOB:
-1285
compact(this.connection.getAdminForMaster(), getMobRegionInfo(tableName), 
major,
-1286  columnFamily);
+1285
compact(this.connection.getAdminForMaster(), 
RegionInfo.createMobRegionInfo(tableName),
+1286major, columnFamily);
 1287break;
 1288  case NORMAL:
 1289checkTableExists(tableName);
@@ -3248,7 +3248,7 @@
 3240  new 
CallableAdminProtos.GetRegionInfoResponse.CompactionState() {
 3241@Override
 3242public 
AdminProtos.GetRegionInfoResponse.CompactionState call() throws Exception {
-3243  RegionInfo info = 
getMobRegionInfo(tableName);
+3243  RegionInfo info = 
RegionInfo.createMobRegionInfo(tableName);
 3244  GetRegionInfoRequest 
request =
 3245
RequestConverter.buildGetRegionInfoRequest(info.getRegionName(), true);
 3246  GetRegionInfoResponse 
response = masterAdmin.getRegionInfo(rpcController, request);
@@ -3312,7 +3312,7 @@
 3304}
 3305break;
 3306  default:
-3307throw new 
IllegalArgumentException("Unknowne compactType: " + compactType);
+3307throw new 
IllegalArgumentException("Unknown compactType: " + compactType);
 3308}
 3309if (state != null) {
 3310  return 
ProtobufUtil.createCompactionState(state);
@@ -3847,325 +3847,320 @@
 3839});
 3840  }
 3841
-3842  private RegionInfo 
getMobRegionInfo(TableName tableName) {
-3843return 
RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes(".mob")).setRegionId(0)
-3844.build();
-3845  }
-3846
-3847  private RpcControllerFactory 
getRpcControllerFactory() {
-3848return this.rpcControllerFactory;
-3849  }
-3850
-3851  @Override
-3852  public void addReplicationPeer(String 
peerId, ReplicationPeerConfig peerConfig, boolean enabled)
-3853  throws IOException {
-3854executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3855  @Override
-3856  protected Void rpcCall() throws 
Exception {
-3857
master.addReplicationPeer(getRpcController(),
-3858  
RequestConverter.buildAddReplicationPeerRequest(peerId, peerConfig, 
enabled));
-3859return null;
-3860  }
-3861});
-3862  }
-3863
-3864  @Override
-3865  public void 
removeReplicationPeer(String peerId) throws IOException {
-3866executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3867  @Override
-3868  protected Void rpcCall() throws 
Exception {
-3869
master.removeReplicationPeer(getRpcController(),
-3870  
RequestConverter.buildRemoveReplicationPeerRequest(peerId));
-3871return null;
-3872  }
-3873});
-3874  }
-3875
-3876  @Override
-3877  public void 
enableReplicationPeer(final String peerId) throws IOException {
-3878executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3879  @Override
-3880  protected Void rpcCall() throws 
Exception {
-3881
master.enableReplicationPeer(getRpcController(),
-3882  
RequestConverter.buildEnableReplicationPeerRequest(peerId));
-3883return null;
-3884  }
-3885});
-3886  }
-3887
-3888  @Override
-3889  public void 
disableReplicationPeer(final String peerId) throws IOException {
-3890executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3891  @Override
-3892  protected Void rpcCall() throws 
Exception {
-3893
master.disableReplicationPeer(getRpcController(),
-3894  
RequestConverter.buildDisableReplicationPeerRequest(peerId));
-3895return null;
-3896  }
-3897});
-3898  }
-3899
-3900  @Override
-3901  public ReplicationPeerConfig 
getReplicationPeerConfig(final String peerId) throws IOException {
-3902return executeCallable(new 
MasterCallableReplicationPeerConfig(getConnection(),
-3903getRpcControllerFactory()) {
-3904  @Override
-3905  protected ReplicationPeerConfig 
rpcCall() throws Exception {
-3906GetReplicationPeerConfigResponse 
response = master.getReplicationPeerConfig(
-3907  getRpcController(), 

[40/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/book.html
--
diff --git a/book.html b/book.html
index 3fe4c25..fe11dbb 100644
--- a/book.html
+++ b/book.html
@@ -36739,7 +36739,7 @@ The server will return cellblocks compressed using this 
same compressor as long
 
 
 Version 3.0.0-SNAPSHOT
-Last updated 2017-11-28 14:29:39 UTC
+Last updated 2017-11-29 14:29:37 UTC
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/bulk-loads.html
--
diff --git a/bulk-loads.html b/bulk-loads.html
index 8763e22..c64e34c 100644
--- a/bulk-loads.html
+++ b/bulk-loads.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase   
   Bulk Loads in Apache HBase (TM)
@@ -311,7 +311,7 @@ under the License. -->
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-11-28
+  Last Published: 
2017-11-29
 
 
 



[32/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/org/apache/hadoop/hbase/client/AsyncTable.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/AsyncTable.html 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncTable.html
index 4e26db8..8067f23 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/AsyncTable.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/AsyncTable.html
@@ -179,7 +179,7 @@ public interface 
 Thttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in 
java.util.concurrent">CompletableFutureT
 batch(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">List? extends Rowactions)
-Method that does a batch call on Deletes, Gets, Puts, 
Increments and Appends.
+Method that does a batch call on Deletes, Gets, Puts, 
Increments, Appends and RowMutations.
 
 
 
@@ -987,13 +987,13 @@ public interface 
 batch
 Thttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in 
java.util.concurrent">CompletableFutureTbatch(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">List? extends Rowactions)
-Method that does a batch call on Deletes, Gets, Puts, 
Increments and Appends. The ordering of
- execution of the actions is not defined. Meaning if you do a Put and a Get in 
the same
- batch(java.util.List?
 extends org.apache.hadoop.hbase.client.Row) call, you will not 
necessarily be guaranteed that the Get returns what the Put
- had put.
+Method that does a batch call on Deletes, Gets, Puts, 
Increments, Appends and RowMutations. The
+ ordering of execution of the actions is not defined. Meaning if you do a Put 
and a Get in the
+ same batch(java.util.List?
 extends org.apache.hadoop.hbase.client.Row) call, you will not 
necessarily be guaranteed that the Get returns what the
+ Put had put.
 
 Parameters:
-actions - list of Get, Put, Delete, Increment, Append 
objects
+actions - list of Get, Put, Delete, Increment, Append, and 
RowMutations objects
 Returns:
 A list of http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in 
java.util.concurrent">CompletableFutures that represent the 
result for each action.
 
@@ -1010,7 +1010,7 @@ public interface Parameters:
-actions - list of Get, Put, Delete, Increment, Append 
objects
+actions - list of Get, Put, Delete, Increment, Append and 
RowMutations objects
 Returns:
 A list of the result for the actions. Wrapped by a http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in 
java.util.concurrent">CompletableFuture.
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/org/apache/hadoop/hbase/client/AsyncTableImpl.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/AsyncTableImpl.html 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncTableImpl.html
index 0500d46..d472bcd 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/AsyncTableImpl.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/AsyncTableImpl.html
@@ -205,7 +205,7 @@ implements 
 Thttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in 
java.util.concurrent">CompletableFutureT
 batch(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">List? extends Rowactions)
-Method that does a batch call on Deletes, Gets, Puts, 
Increments and Appends.
+Method that does a batch call on Deletes, Gets, Puts, 
Increments, Appends and RowMutations.
 
 
 
@@ -896,15 +896,15 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in 
java.util.concurrent">CompletableFutureTbatch(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">List? extends Rowactions)
 Description copied from 
interface:AsyncTable
-Method that does a batch call on Deletes, Gets, Puts, 
Increments and Appends. The ordering of
- 

[15/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AddColumnFamilyFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AddColumnFamilyFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AddColumnFamilyFuture.html
index d438f22..7c59e27 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AddColumnFamilyFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AddColumnFamilyFuture.html
@@ -1290,8 +1290,8 @@
 1282   CompactType 
compactType) throws IOException {
 1283switch (compactType) {
 1284  case MOB:
-1285
compact(this.connection.getAdminForMaster(), getMobRegionInfo(tableName), 
major,
-1286  columnFamily);
+1285
compact(this.connection.getAdminForMaster(), 
RegionInfo.createMobRegionInfo(tableName),
+1286major, columnFamily);
 1287break;
 1288  case NORMAL:
 1289checkTableExists(tableName);
@@ -3248,7 +3248,7 @@
 3240  new 
CallableAdminProtos.GetRegionInfoResponse.CompactionState() {
 3241@Override
 3242public 
AdminProtos.GetRegionInfoResponse.CompactionState call() throws Exception {
-3243  RegionInfo info = 
getMobRegionInfo(tableName);
+3243  RegionInfo info = 
RegionInfo.createMobRegionInfo(tableName);
 3244  GetRegionInfoRequest 
request =
 3245
RequestConverter.buildGetRegionInfoRequest(info.getRegionName(), true);
 3246  GetRegionInfoResponse 
response = masterAdmin.getRegionInfo(rpcController, request);
@@ -3312,7 +3312,7 @@
 3304}
 3305break;
 3306  default:
-3307throw new 
IllegalArgumentException("Unknowne compactType: " + compactType);
+3307throw new 
IllegalArgumentException("Unknown compactType: " + compactType);
 3308}
 3309if (state != null) {
 3310  return 
ProtobufUtil.createCompactionState(state);
@@ -3847,325 +3847,320 @@
 3839});
 3840  }
 3841
-3842  private RegionInfo 
getMobRegionInfo(TableName tableName) {
-3843return 
RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes(".mob")).setRegionId(0)
-3844.build();
-3845  }
-3846
-3847  private RpcControllerFactory 
getRpcControllerFactory() {
-3848return this.rpcControllerFactory;
-3849  }
-3850
-3851  @Override
-3852  public void addReplicationPeer(String 
peerId, ReplicationPeerConfig peerConfig, boolean enabled)
-3853  throws IOException {
-3854executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3855  @Override
-3856  protected Void rpcCall() throws 
Exception {
-3857
master.addReplicationPeer(getRpcController(),
-3858  
RequestConverter.buildAddReplicationPeerRequest(peerId, peerConfig, 
enabled));
-3859return null;
-3860  }
-3861});
-3862  }
-3863
-3864  @Override
-3865  public void 
removeReplicationPeer(String peerId) throws IOException {
-3866executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3867  @Override
-3868  protected Void rpcCall() throws 
Exception {
-3869
master.removeReplicationPeer(getRpcController(),
-3870  
RequestConverter.buildRemoveReplicationPeerRequest(peerId));
-3871return null;
-3872  }
-3873});
-3874  }
-3875
-3876  @Override
-3877  public void 
enableReplicationPeer(final String peerId) throws IOException {
-3878executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3879  @Override
-3880  protected Void rpcCall() throws 
Exception {
-3881
master.enableReplicationPeer(getRpcController(),
-3882  
RequestConverter.buildEnableReplicationPeerRequest(peerId));
-3883return null;
-3884  }
-3885});
-3886  }
-3887
-3888  @Override
-3889  public void 
disableReplicationPeer(final String peerId) throws IOException {
-3890executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3891  @Override
-3892  protected Void rpcCall() throws 
Exception {
-3893
master.disableReplicationPeer(getRpcController(),
-3894  
RequestConverter.buildDisableReplicationPeerRequest(peerId));
-3895return null;
-3896  }
-3897});
-3898  }
-3899
-3900  @Override
-3901  public ReplicationPeerConfig 
getReplicationPeerConfig(final String peerId) throws IOException {
-3902return executeCallable(new 
MasterCallableReplicationPeerConfig(getConnection(),
-3903getRpcControllerFactory()) {
-3904  @Override
-3905  protected ReplicationPeerConfig 
rpcCall() throws Exception {
-3906GetReplicationPeerConfigResponse 
response = master.getReplicationPeerConfig(
-3907  getRpcController(), 

[21/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
index 92c1c97..68f8a94 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
@@ -307,860 +307,919 @@
 299   * was sent to HBase and may need some 
time to finish the compact operation.
 300   * @param tableName table to compact
 301   */
-302  CompletableFutureVoid 
compact(TableName tableName);
-303
-304  /**
-305   * Compact a column family within a 
table. When the returned CompletableFuture is done, it only
-306   * means the compact request was sent 
to HBase and may need some time to finish the compact
-307   * operation.
-308   * @param tableName table to compact
-309   * @param columnFamily column family 
within a table. If not present, compact the table's all
-310   *  column families.
-311   */
-312  CompletableFutureVoid 
compact(TableName tableName, byte[] columnFamily);
-313
-314  /**
-315   * Compact an individual region. When 
the returned CompletableFuture is done, it only means the
-316   * compact request was sent to HBase 
and may need some time to finish the compact operation.
-317   * @param regionName region to 
compact
-318   */
-319  CompletableFutureVoid 
compactRegion(byte[] regionName);
-320
-321  /**
-322   * Compact a column family within a 
region. When the returned CompletableFuture is done, it only
-323   * means the compact request was sent 
to HBase and may need some time to finish the compact
-324   * operation.
-325   * @param regionName region to 
compact
-326   * @param columnFamily column family 
within a region. If not present, compact the region's all
-327   *  column families.
-328   */
-329  CompletableFutureVoid 
compactRegion(byte[] regionName, byte[] columnFamily);
-330
-331  /**
-332   * Major compact a table. When the 
returned CompletableFuture is done, it only means the compact
-333   * request was sent to HBase and may 
need some time to finish the compact operation.
-334   * @param tableName table to major 
compact
-335   */
-336  CompletableFutureVoid 
majorCompact(TableName tableName);
-337
-338  /**
-339   * Major compact a column family within 
a table. When the returned CompletableFuture is done, it
-340   * only means the compact request was 
sent to HBase and may need some time to finish the compact
-341   * operation.
-342   * @param tableName table to major 
compact
-343   * @param columnFamily column family 
within a table. If not present, major compact the table's all
-344   *  column families.
-345   */
-346  CompletableFutureVoid 
majorCompact(TableName tableName, byte[] columnFamily);
-347
-348  /**
-349   * Major compact a region. When the 
returned CompletableFuture is done, it only means the compact
-350   * request was sent to HBase and may 
need some time to finish the compact operation.
-351   * @param regionName region to major 
compact
-352   */
-353  CompletableFutureVoid 
majorCompactRegion(byte[] regionName);
-354
-355  /**
-356   * Major compact a column family within 
region. When the returned CompletableFuture is done, it
-357   * only means the compact request was 
sent to HBase and may need some time to finish the compact
-358   * operation.
-359   * @param regionName region to major 
compact
-360   * @param columnFamily column family 
within a region. If not present, major compact the region's
-361   *  all column families.
-362   */
-363  CompletableFutureVoid 
majorCompactRegion(byte[] regionName, byte[] columnFamily);
-364
-365  /**
-366   * Compact all regions on the region 
server.
-367   * @param serverName the region server 
name
-368   */
-369  CompletableFutureVoid 
compactRegionServer(ServerName serverName);
-370
-371  /**
-372   * Compact all regions on the region 
server.
-373   * @param serverName the region server 
name
-374   */
-375  CompletableFutureVoid 
majorCompactRegionServer(ServerName serverName);
-376
-377  /**
-378   * Turn the Merge switch on or off.
-379   * @param on
-380   * @return Previous switch value 
wrapped by a {@link CompletableFuture}
-381   */
-382  CompletableFutureBoolean 
mergeSwitch(boolean on);
-383
-384  /**
-385   * Query the current state of the Merge 
switch.
-386   * @return true if the switch is on, 
false otherwise. The return value will be wrapped by a
-387   * {@link CompletableFuture}
-388   */
-389  CompletableFutureBoolean 
isMergeEnabled();
-390
-391  /**
-392   * Turn the Split switch on or off.
-393   * @param on
-394   * @return Previous switch value 
wrapped by a {@link CompletableFuture}
-395   */
-396  CompletableFutureBoolean 
splitSwitch(boolean on);
-397
-398  /**
-399   * 

[24/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/org/apache/hadoop/hbase/rest/client/RemoteHTable.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/rest/client/RemoteHTable.html 
b/devapidocs/org/apache/hadoop/hbase/rest/client/RemoteHTable.html
index b963bf7..c00ba22 100644
--- a/devapidocs/org/apache/hadoop/hbase/rest/client/RemoteHTable.html
+++ b/devapidocs/org/apache/hadoop/hbase/rest/client/RemoteHTable.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":42,"i32":10,"i33":42,"i34":10,"i35":10,"i36":42,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":42,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":42,"i55":10,"i56":9};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":42,"i11":10,"i12":42,"i13":10,"i14":10,"i15":42,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":42,"i32":10,"i33":42,"i34":10,"i35":10,"i36":42,"i37":10,"i38":10,"i39":10,"i40":10,"i41":42,"i42":10,"i43":42,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":42,"i53":42,"i54":42,"i55":42,"i56":9};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete 
Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -314,8 +314,7 @@ implements CompareFilter.CompareOpcompareOp,
   byte[]value,
   Deletedelete)
-Atomically checks if a row/family/qualifier value matches 
the expected
- value.
+Deprecated.
 
 
 
@@ -338,7 +337,7 @@ implements CompareFilter.CompareOpcompareOp,
   byte[]value,
   RowMutationsrm)
-Atomically checks if a row/family/qualifier value matches 
the expected value.
+Deprecated.
 
 
 
@@ -371,8 +370,7 @@ implements CompareFilter.CompareOpcompareOp,
byte[]value,
Putput)
-Atomically checks if a row/family/qualifier value matches 
the expected
- value.
+Deprecated.
 
 
 
@@ -546,7 +544,7 @@ implements 
 HTableDescriptor
 getTableDescriptor()
-Gets the table descriptor for 
this table.
+Deprecated.
 
 
 
@@ -615,15 +613,13 @@ implements 
 void
 setOperationTimeout(intoperationTimeout)
-Set timeout (millisecond) of each operation in this Table 
instance, will override the value
- of hbase.client.operation.timeout in configuration.
+Deprecated.
 
 
 
 void
 setReadRpcTimeout(intreadRpcTimeout)
-Set timeout (millisecond) of each rpc read request in 
operations of this Table instance, will
- override the value of hbase.rpc.read.timeout in configuration.
+Deprecated.
 
 
 
@@ -635,8 +631,7 @@ implements 
 void
 setWriteRpcTimeout(intwriteRpcTimeout)
-Set timeout (millisecond) of each rpc write request in 
operations of this Table instance, will
- override the value of hbase.rpc.write.timeout in configuration.
+Deprecated.
 
 
 
@@ -866,8 +861,10 @@ implements 
 
 getTableDescriptor
-publicHTableDescriptorgetTableDescriptor()
-throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
+http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true;
 title="class or interface in java.lang">@Deprecated
+publicHTableDescriptorgetTableDescriptor()
+throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
+Deprecated.
 Description copied from 
interface:Table
 Gets the table descriptor for 
this table.
 
@@ -884,7 +881,7 @@ implements 
 
 close
-publicvoidclose()
+publicvoidclose()
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Description copied from 
interface:Table
 Releases any resources held or pending changes in internal 
buffers.
@@ -906,7 +903,7 @@ implements 
 
 get
-publicResultget(Getget)
+publicResultget(Getget)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Description copied from 
interface:Table
 Extracts certain cells from a given row.
@@ -930,7 +927,7 @@ implements 
 
 get
-publicResult[]get(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListGetgets)

[18/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.html
index 8ba8dc9..f973938 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.html
@@ -37,36 +37,36 @@
 029import java.io.IOException;
 030import java.util.ArrayList;
 031import java.util.Collections;
-032import java.util.IdentityHashMap;
-033import java.util.List;
-034import java.util.Map;
-035import java.util.Optional;
-036import 
java.util.concurrent.CompletableFuture;
-037import 
java.util.concurrent.ConcurrentHashMap;
-038import 
java.util.concurrent.ConcurrentLinkedQueue;
-039import 
java.util.concurrent.ConcurrentMap;
-040import 
java.util.concurrent.ConcurrentSkipListMap;
-041import java.util.concurrent.TimeUnit;
-042import java.util.function.Supplier;
-043import java.util.stream.Collectors;
-044import java.util.stream.Stream;
-045
-046import org.apache.commons.logging.Log;
-047import 
org.apache.commons.logging.LogFactory;
-048import 
org.apache.hadoop.hbase.CellScannable;
-049import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-050import 
org.apache.hadoop.hbase.HRegionLocation;
-051import 
org.apache.hadoop.hbase.ServerName;
-052import 
org.apache.hadoop.hbase.TableName;
-053import 
org.apache.yetus.audience.InterfaceAudience;
-054import 
org.apache.hadoop.hbase.client.MultiResponse.RegionResult;
-055import 
org.apache.hadoop.hbase.client.RetriesExhaustedException.ThrowableWithExtraContext;
-056import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-057import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-058import 
org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
-059import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-060import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
-061import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
+032import java.util.HashMap;
+033import java.util.IdentityHashMap;
+034import java.util.List;
+035import java.util.Map;
+036import java.util.Optional;
+037import 
java.util.concurrent.CompletableFuture;
+038import 
java.util.concurrent.ConcurrentHashMap;
+039import 
java.util.concurrent.ConcurrentLinkedQueue;
+040import 
java.util.concurrent.ConcurrentMap;
+041import 
java.util.concurrent.ConcurrentSkipListMap;
+042import java.util.concurrent.TimeUnit;
+043import java.util.function.Supplier;
+044import java.util.stream.Collectors;
+045import java.util.stream.Stream;
+046
+047import org.apache.commons.logging.Log;
+048import 
org.apache.commons.logging.LogFactory;
+049import 
org.apache.hadoop.hbase.CellScannable;
+050import 
org.apache.hadoop.hbase.DoNotRetryIOException;
+051import 
org.apache.hadoop.hbase.HRegionLocation;
+052import 
org.apache.hadoop.hbase.ServerName;
+053import 
org.apache.hadoop.hbase.TableName;
+054import 
org.apache.yetus.audience.InterfaceAudience;
+055import 
org.apache.hadoop.hbase.client.MultiResponse.RegionResult;
+056import 
org.apache.hadoop.hbase.client.RetriesExhaustedException.ThrowableWithExtraContext;
+057import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
+058import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
+059import 
org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
+060import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
+061import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
 062import 
org.apache.hadoop.hbase.util.Bytes;
 063import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 064
@@ -240,212 +240,208 @@
 232  }
 233
 234  private ClientProtos.MultiRequest 
buildReq(Mapbyte[], RegionRequest actionsByRegion,
-235  ListCellScannable cells) 
throws IOException {
+235  ListCellScannable cells, 
MapInteger, Integer rowMutationsIndexMap) throws IOException {
 236ClientProtos.MultiRequest.Builder 
multiRequestBuilder = ClientProtos.MultiRequest.newBuilder();
 237ClientProtos.RegionAction.Builder 
regionActionBuilder = ClientProtos.RegionAction.newBuilder();
 238ClientProtos.Action.Builder 
actionBuilder = ClientProtos.Action.newBuilder();
 239ClientProtos.MutationProto.Builder 
mutationBuilder = ClientProtos.MutationProto.newBuilder();
 240for (Map.Entrybyte[], 
RegionRequest entry : actionsByRegion.entrySet()) {
-241  // TODO: remove the extra for loop 
as we will iterate it in mutationBuilder.
-242  if 
(!multiRequestBuilder.hasNonceGroup()) {
-243for (Action action : 
entry.getValue().actions) {
-244  if (action.hasNonce()) {
-245 

[09/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.MergeTableRegionsFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.MergeTableRegionsFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.MergeTableRegionsFuture.html
index d438f22..7c59e27 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.MergeTableRegionsFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.MergeTableRegionsFuture.html
@@ -1290,8 +1290,8 @@
 1282   CompactType 
compactType) throws IOException {
 1283switch (compactType) {
 1284  case MOB:
-1285
compact(this.connection.getAdminForMaster(), getMobRegionInfo(tableName), 
major,
-1286  columnFamily);
+1285
compact(this.connection.getAdminForMaster(), 
RegionInfo.createMobRegionInfo(tableName),
+1286major, columnFamily);
 1287break;
 1288  case NORMAL:
 1289checkTableExists(tableName);
@@ -3248,7 +3248,7 @@
 3240  new 
CallableAdminProtos.GetRegionInfoResponse.CompactionState() {
 3241@Override
 3242public 
AdminProtos.GetRegionInfoResponse.CompactionState call() throws Exception {
-3243  RegionInfo info = 
getMobRegionInfo(tableName);
+3243  RegionInfo info = 
RegionInfo.createMobRegionInfo(tableName);
 3244  GetRegionInfoRequest 
request =
 3245
RequestConverter.buildGetRegionInfoRequest(info.getRegionName(), true);
 3246  GetRegionInfoResponse 
response = masterAdmin.getRegionInfo(rpcController, request);
@@ -3312,7 +3312,7 @@
 3304}
 3305break;
 3306  default:
-3307throw new 
IllegalArgumentException("Unknowne compactType: " + compactType);
+3307throw new 
IllegalArgumentException("Unknown compactType: " + compactType);
 3308}
 3309if (state != null) {
 3310  return 
ProtobufUtil.createCompactionState(state);
@@ -3847,325 +3847,320 @@
 3839});
 3840  }
 3841
-3842  private RegionInfo 
getMobRegionInfo(TableName tableName) {
-3843return 
RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes(".mob")).setRegionId(0)
-3844.build();
-3845  }
-3846
-3847  private RpcControllerFactory 
getRpcControllerFactory() {
-3848return this.rpcControllerFactory;
-3849  }
-3850
-3851  @Override
-3852  public void addReplicationPeer(String 
peerId, ReplicationPeerConfig peerConfig, boolean enabled)
-3853  throws IOException {
-3854executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3855  @Override
-3856  protected Void rpcCall() throws 
Exception {
-3857
master.addReplicationPeer(getRpcController(),
-3858  
RequestConverter.buildAddReplicationPeerRequest(peerId, peerConfig, 
enabled));
-3859return null;
-3860  }
-3861});
-3862  }
-3863
-3864  @Override
-3865  public void 
removeReplicationPeer(String peerId) throws IOException {
-3866executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3867  @Override
-3868  protected Void rpcCall() throws 
Exception {
-3869
master.removeReplicationPeer(getRpcController(),
-3870  
RequestConverter.buildRemoveReplicationPeerRequest(peerId));
-3871return null;
-3872  }
-3873});
-3874  }
-3875
-3876  @Override
-3877  public void 
enableReplicationPeer(final String peerId) throws IOException {
-3878executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3879  @Override
-3880  protected Void rpcCall() throws 
Exception {
-3881
master.enableReplicationPeer(getRpcController(),
-3882  
RequestConverter.buildEnableReplicationPeerRequest(peerId));
-3883return null;
-3884  }
-3885});
-3886  }
-3887
-3888  @Override
-3889  public void 
disableReplicationPeer(final String peerId) throws IOException {
-3890executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3891  @Override
-3892  protected Void rpcCall() throws 
Exception {
-3893
master.disableReplicationPeer(getRpcController(),
-3894  
RequestConverter.buildDisableReplicationPeerRequest(peerId));
-3895return null;
-3896  }
-3897});
-3898  }
-3899
-3900  @Override
-3901  public ReplicationPeerConfig 
getReplicationPeerConfig(final String peerId) throws IOException {
-3902return executeCallable(new 
MasterCallableReplicationPeerConfig(getConnection(),
-3903getRpcControllerFactory()) {
-3904  @Override
-3905  protected ReplicationPeerConfig 
rpcCall() throws Exception {
-3906GetReplicationPeerConfigResponse 
response = master.getReplicationPeerConfig(
-3907  getRpcController(), 

[20/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.RegionRequest.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.RegionRequest.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.RegionRequest.html
index 8ba8dc9..f973938 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.RegionRequest.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.RegionRequest.html
@@ -37,36 +37,36 @@
 029import java.io.IOException;
 030import java.util.ArrayList;
 031import java.util.Collections;
-032import java.util.IdentityHashMap;
-033import java.util.List;
-034import java.util.Map;
-035import java.util.Optional;
-036import 
java.util.concurrent.CompletableFuture;
-037import 
java.util.concurrent.ConcurrentHashMap;
-038import 
java.util.concurrent.ConcurrentLinkedQueue;
-039import 
java.util.concurrent.ConcurrentMap;
-040import 
java.util.concurrent.ConcurrentSkipListMap;
-041import java.util.concurrent.TimeUnit;
-042import java.util.function.Supplier;
-043import java.util.stream.Collectors;
-044import java.util.stream.Stream;
-045
-046import org.apache.commons.logging.Log;
-047import 
org.apache.commons.logging.LogFactory;
-048import 
org.apache.hadoop.hbase.CellScannable;
-049import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-050import 
org.apache.hadoop.hbase.HRegionLocation;
-051import 
org.apache.hadoop.hbase.ServerName;
-052import 
org.apache.hadoop.hbase.TableName;
-053import 
org.apache.yetus.audience.InterfaceAudience;
-054import 
org.apache.hadoop.hbase.client.MultiResponse.RegionResult;
-055import 
org.apache.hadoop.hbase.client.RetriesExhaustedException.ThrowableWithExtraContext;
-056import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-057import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-058import 
org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
-059import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-060import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
-061import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
+032import java.util.HashMap;
+033import java.util.IdentityHashMap;
+034import java.util.List;
+035import java.util.Map;
+036import java.util.Optional;
+037import 
java.util.concurrent.CompletableFuture;
+038import 
java.util.concurrent.ConcurrentHashMap;
+039import 
java.util.concurrent.ConcurrentLinkedQueue;
+040import 
java.util.concurrent.ConcurrentMap;
+041import 
java.util.concurrent.ConcurrentSkipListMap;
+042import java.util.concurrent.TimeUnit;
+043import java.util.function.Supplier;
+044import java.util.stream.Collectors;
+045import java.util.stream.Stream;
+046
+047import org.apache.commons.logging.Log;
+048import 
org.apache.commons.logging.LogFactory;
+049import 
org.apache.hadoop.hbase.CellScannable;
+050import 
org.apache.hadoop.hbase.DoNotRetryIOException;
+051import 
org.apache.hadoop.hbase.HRegionLocation;
+052import 
org.apache.hadoop.hbase.ServerName;
+053import 
org.apache.hadoop.hbase.TableName;
+054import 
org.apache.yetus.audience.InterfaceAudience;
+055import 
org.apache.hadoop.hbase.client.MultiResponse.RegionResult;
+056import 
org.apache.hadoop.hbase.client.RetriesExhaustedException.ThrowableWithExtraContext;
+057import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
+058import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
+059import 
org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
+060import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
+061import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
 062import 
org.apache.hadoop.hbase.util.Bytes;
 063import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 064
@@ -240,212 +240,208 @@
 232  }
 233
 234  private ClientProtos.MultiRequest 
buildReq(Mapbyte[], RegionRequest actionsByRegion,
-235  ListCellScannable cells) 
throws IOException {
+235  ListCellScannable cells, 
MapInteger, Integer rowMutationsIndexMap) throws IOException {
 236ClientProtos.MultiRequest.Builder 
multiRequestBuilder = ClientProtos.MultiRequest.newBuilder();
 237ClientProtos.RegionAction.Builder 
regionActionBuilder = ClientProtos.RegionAction.newBuilder();
 238ClientProtos.Action.Builder 
actionBuilder = ClientProtos.Action.newBuilder();
 239ClientProtos.MutationProto.Builder 
mutationBuilder = ClientProtos.MutationProto.newBuilder();
 240for (Map.Entrybyte[], 
RegionRequest entry : actionsByRegion.entrySet()) {
-241  // TODO: remove the extra for loop 
as we will iterate it in mutationBuilder.
-242  if 
(!multiRequestBuilder.hasNonceGroup()) {
-243for (Action action : 

[29/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/org/apache/hadoop/hbase/client/MultiServerCallable.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/MultiServerCallable.html 
b/devapidocs/org/apache/hadoop/hbase/client/MultiServerCallable.html
index 1a8ad60..df460a8 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/MultiServerCallable.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/MultiServerCallable.html
@@ -133,7 +133,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-class MultiServerCallable
+class MultiServerCallable
 extends CancellableRegionServerCallableMultiResponse
 Callable that handles the multi method call 
going against a single
  regionserver; i.e. A RegionServerCallable for the multi call (It is NOT a
@@ -296,7 +296,7 @@ extends 
 
 multiAction
-privateMultiAction multiAction
+privateMultiAction multiAction
 
 
 
@@ -305,7 +305,7 @@ extends 
 
 cellBlock
-privateboolean cellBlock
+privateboolean cellBlock
 
 
 
@@ -322,7 +322,7 @@ extends 
 
 MultiServerCallable
-MultiServerCallable(ClusterConnectionconnection,
+MultiServerCallable(ClusterConnectionconnection,
 TableNametableName,
 ServerNamelocation,
 MultiActionmulti,
@@ -346,7 +346,7 @@ extends 
 
 reset
-publicvoidreset(ServerNamelocation,
+publicvoidreset(ServerNamelocation,
   MultiActionmultiAction)
 
 
@@ -356,7 +356,7 @@ extends 
 
 getLocation
-protectedHRegionLocationgetLocation()
+protectedHRegionLocationgetLocation()
 
 Overrides:
 getLocationin
 classRegionServerCallableMultiResponse,org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService.BlockingInterface
@@ -369,7 +369,7 @@ extends 
 
 getHRegionInfo
-publicHRegionInfogetHRegionInfo()
+publicHRegionInfogetHRegionInfo()
 
 Overrides:
 getHRegionInfoin
 classRegionServerCallableMultiResponse,org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService.BlockingInterface
@@ -384,7 +384,7 @@ extends 
 
 getMulti
-MultiActiongetMulti()
+MultiActiongetMulti()
 
 
 
@@ -393,7 +393,7 @@ extends 
 
 rpcCall
-protectedMultiResponserpcCall()
+protectedMultiResponserpcCall()
  throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 Description copied from 
class:RegionServerCallable
 Run the RPC call. Implement this method. To get at the 
rpcController that has been created
@@ -415,7 +415,7 @@ extends 
 
 isCellBlock
-privatebooleanisCellBlock()
+privatebooleanisCellBlock()
 
 Returns:
 True if we should send data in cellblocks.  This is an expensive call.  
Cache the
@@ -429,7 +429,7 @@ extends 
 
 prepare
-publicvoidprepare(booleanreload)
+publicvoidprepare(booleanreload)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Description copied from 
interface:RetryingCallable
 Prepare by setting up any connections to servers, etc., 
ahead of call invocation.
@@ -452,7 +452,7 @@ extends 
 
 getServerName
-ServerNamegetServerName()
+ServerNamegetServerName()
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
index 3557e34..96dc671 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
@@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private class RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer
+private class RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer
 extends RawAsyncHBaseAdmin.TableProcedureBiConsumer
 
 
@@ -232,7 +232,7 @@ extends 
 
 AddColumnFamilyProcedureBiConsumer
-AddColumnFamilyProcedureBiConsumer(TableNametableName)
+AddColumnFamilyProcedureBiConsumer(TableNametableName)
 
 
 
@@ -249,7 +249,7 @@ extends 
 
 getOperationType
-http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetOperationType()
+http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetOperationType()
 
 Specified by:
 getOperationTypein
 classRawAsyncHBaseAdmin.TableProcedureBiConsumer


[16/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CheckAndMutateBuilder.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CheckAndMutateBuilder.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CheckAndMutateBuilder.html
index c2c122a..7cece5c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CheckAndMutateBuilder.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CheckAndMutateBuilder.html
@@ -439,11 +439,11 @@
 431  }
 432
 433  /**
-434   * Method that does a batch call on 
Deletes, Gets, Puts, Increments and Appends. The ordering of
-435   * execution of the actions is not 
defined. Meaning if you do a Put and a Get in the same
-436   * {@link #batch} call, you will not 
necessarily be guaranteed that the Get returns what the Put
-437   * had put.
-438   * @param actions list of Get, Put, 
Delete, Increment, Append objects
+434   * Method that does a batch call on 
Deletes, Gets, Puts, Increments, Appends and RowMutations. The
+435   * ordering of execution of the actions 
is not defined. Meaning if you do a Put and a Get in the
+436   * same {@link #batch} call, you will 
not necessarily be guaranteed that the Get returns what the
+437   * Put had put.
+438   * @param actions list of Get, Put, 
Delete, Increment, Append, and RowMutations objects
 439   * @return A list of {@link 
CompletableFuture}s that represent the result for each action.
 440   */
 441  T 
ListCompletableFutureT batch(List? extends Row 
actions);
@@ -451,7 +451,7 @@
 443  /**
 444   * A simple version of batch. It will 
fail if there are any failures and you will get the whole
 445   * result list at once if the operation 
is succeeded.
-446   * @param actions list of Get, Put, 
Delete, Increment, Append objects
+446   * @param actions list of Get, Put, 
Delete, Increment, Append and RowMutations objects
 447   * @return A list of the result for the 
actions. Wrapped by a {@link CompletableFuture}.
 448   */
 449  default T 
CompletableFutureListT batchAll(List? extends Row 
actions) {

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CoprocessorCallback.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CoprocessorCallback.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CoprocessorCallback.html
index c2c122a..7cece5c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CoprocessorCallback.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CoprocessorCallback.html
@@ -439,11 +439,11 @@
 431  }
 432
 433  /**
-434   * Method that does a batch call on 
Deletes, Gets, Puts, Increments and Appends. The ordering of
-435   * execution of the actions is not 
defined. Meaning if you do a Put and a Get in the same
-436   * {@link #batch} call, you will not 
necessarily be guaranteed that the Get returns what the Put
-437   * had put.
-438   * @param actions list of Get, Put, 
Delete, Increment, Append objects
+434   * Method that does a batch call on 
Deletes, Gets, Puts, Increments, Appends and RowMutations. The
+435   * ordering of execution of the actions 
is not defined. Meaning if you do a Put and a Get in the
+436   * same {@link #batch} call, you will 
not necessarily be guaranteed that the Get returns what the
+437   * Put had put.
+438   * @param actions list of Get, Put, 
Delete, Increment, Append, and RowMutations objects
 439   * @return A list of {@link 
CompletableFuture}s that represent the result for each action.
 440   */
 441  T 
ListCompletableFutureT batch(List? extends Row 
actions);
@@ -451,7 +451,7 @@
 443  /**
 444   * A simple version of batch. It will 
fail if there are any failures and you will get the whole
 445   * result list at once if the operation 
is succeeded.
-446   * @param actions list of Get, Put, 
Delete, Increment, Append objects
+446   * @param actions list of Get, Put, 
Delete, Increment, Append and RowMutations objects
 447   * @return A list of the result for the 
actions. Wrapped by a {@link CompletableFuture}.
 448   */
 449  default T 
CompletableFutureListT batchAll(List? extends Row 
actions) {

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CoprocessorServiceBuilder.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CoprocessorServiceBuilder.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CoprocessorServiceBuilder.html
index c2c122a..7cece5c 100644
--- 

[26/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
index 97e977c..c8967e5 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
@@ -543,23 +543,23 @@
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
 org.apache.hadoop.hbase.client.ScannerCallable.MoreResults
-org.apache.hadoop.hbase.client.SnapshotType
-org.apache.hadoop.hbase.client.AsyncRequestFutureImpl.Retry
-org.apache.hadoop.hbase.client.RequestController.ReturnCode
-org.apache.hadoop.hbase.client.MobCompactPartitionPolicy
-org.apache.hadoop.hbase.client.RegionLocateType
+org.apache.hadoop.hbase.client.MasterSwitchType
 org.apache.hadoop.hbase.client.CompactType
 org.apache.hadoop.hbase.client.Scan.ReadType
-org.apache.hadoop.hbase.client.IsolationLevel
-org.apache.hadoop.hbase.client.CompactionState
+org.apache.hadoop.hbase.client.RegionLocateType
+org.apache.hadoop.hbase.client.SnapshotType
+org.apache.hadoop.hbase.client.MobCompactPartitionPolicy
+org.apache.hadoop.hbase.client.AsyncScanSingleRegionRpcRetryingCaller.ScanResumerState
+org.apache.hadoop.hbase.client.Durability
 org.apache.hadoop.hbase.client.AsyncScanSingleRegionRpcRetryingCaller.ScanControllerState
+org.apache.hadoop.hbase.client.CompactionState
+org.apache.hadoop.hbase.client.RequestController.ReturnCode
+org.apache.hadoop.hbase.client.IsolationLevel
 org.apache.hadoop.hbase.client.Consistency
-org.apache.hadoop.hbase.client.AsyncScanSingleRegionRpcRetryingCaller.ScanResumerState
 org.apache.hadoop.hbase.client.AbstractResponse.ResponseType
-org.apache.hadoop.hbase.client.MasterSwitchType
 org.apache.hadoop.hbase.client.TableState.State
 org.apache.hadoop.hbase.client.AsyncProcessTask.SubmittedRows
-org.apache.hadoop.hbase.client.Durability
+org.apache.hadoop.hbase.client.AsyncRequestFutureImpl.Retry
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.GetDataAsyncCallback.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.GetDataAsyncCallback.html
 
b/devapidocs/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.GetDataAsyncCallback.html
index f6dbdb5..ebf2337 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.GetDataAsyncCallback.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.GetDataAsyncCallback.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-class ZkSplitLogWorkerCoordination.GetDataAsyncCallback
+class ZkSplitLogWorkerCoordination.GetDataAsyncCallback
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements org.apache.zookeeper.AsyncCallback.DataCallback
 Asynchronous handler for zk get-data-set-watch on node 
results.
@@ -226,7 +226,7 @@ implements 
org.apache.zookeeper.AsyncCallback.DataCallback
 
 
 LOG
-private finalorg.apache.commons.logging.Log LOG
+private finalorg.apache.commons.logging.Log LOG
 
 
 
@@ -243,7 +243,7 @@ implements 
org.apache.zookeeper.AsyncCallback.DataCallback
 
 
 GetDataAsyncCallback
-GetDataAsyncCallback()
+GetDataAsyncCallback()
 
 
 
@@ -260,7 +260,7 @@ implements 
org.apache.zookeeper.AsyncCallback.DataCallback
 
 
 processResult
-publicvoidprocessResult(intrc,
+publicvoidprocessResult(intrc,
   http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringpath,
   http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Objectctx,
   byte[]data,

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.ZkSplitTaskDetails.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.ZkSplitTaskDetails.html
 

[01/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 83b248d3f -> fd365a2bc


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.TableWaitForStateCallable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.TableWaitForStateCallable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.TableWaitForStateCallable.html
index d438f22..7c59e27 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.TableWaitForStateCallable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.TableWaitForStateCallable.html
@@ -1290,8 +1290,8 @@
 1282   CompactType 
compactType) throws IOException {
 1283switch (compactType) {
 1284  case MOB:
-1285
compact(this.connection.getAdminForMaster(), getMobRegionInfo(tableName), 
major,
-1286  columnFamily);
+1285
compact(this.connection.getAdminForMaster(), 
RegionInfo.createMobRegionInfo(tableName),
+1286major, columnFamily);
 1287break;
 1288  case NORMAL:
 1289checkTableExists(tableName);
@@ -3248,7 +3248,7 @@
 3240  new 
CallableAdminProtos.GetRegionInfoResponse.CompactionState() {
 3241@Override
 3242public 
AdminProtos.GetRegionInfoResponse.CompactionState call() throws Exception {
-3243  RegionInfo info = 
getMobRegionInfo(tableName);
+3243  RegionInfo info = 
RegionInfo.createMobRegionInfo(tableName);
 3244  GetRegionInfoRequest 
request =
 3245
RequestConverter.buildGetRegionInfoRequest(info.getRegionName(), true);
 3246  GetRegionInfoResponse 
response = masterAdmin.getRegionInfo(rpcController, request);
@@ -3312,7 +3312,7 @@
 3304}
 3305break;
 3306  default:
-3307throw new 
IllegalArgumentException("Unknowne compactType: " + compactType);
+3307throw new 
IllegalArgumentException("Unknown compactType: " + compactType);
 3308}
 3309if (state != null) {
 3310  return 
ProtobufUtil.createCompactionState(state);
@@ -3847,325 +3847,320 @@
 3839});
 3840  }
 3841
-3842  private RegionInfo 
getMobRegionInfo(TableName tableName) {
-3843return 
RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes(".mob")).setRegionId(0)
-3844.build();
-3845  }
-3846
-3847  private RpcControllerFactory 
getRpcControllerFactory() {
-3848return this.rpcControllerFactory;
-3849  }
-3850
-3851  @Override
-3852  public void addReplicationPeer(String 
peerId, ReplicationPeerConfig peerConfig, boolean enabled)
-3853  throws IOException {
-3854executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3855  @Override
-3856  protected Void rpcCall() throws 
Exception {
-3857
master.addReplicationPeer(getRpcController(),
-3858  
RequestConverter.buildAddReplicationPeerRequest(peerId, peerConfig, 
enabled));
-3859return null;
-3860  }
-3861});
-3862  }
-3863
-3864  @Override
-3865  public void 
removeReplicationPeer(String peerId) throws IOException {
-3866executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3867  @Override
-3868  protected Void rpcCall() throws 
Exception {
-3869
master.removeReplicationPeer(getRpcController(),
-3870  
RequestConverter.buildRemoveReplicationPeerRequest(peerId));
-3871return null;
-3872  }
-3873});
-3874  }
-3875
-3876  @Override
-3877  public void 
enableReplicationPeer(final String peerId) throws IOException {
-3878executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3879  @Override
-3880  protected Void rpcCall() throws 
Exception {
-3881
master.enableReplicationPeer(getRpcController(),
-3882  
RequestConverter.buildEnableReplicationPeerRequest(peerId));
-3883return null;
-3884  }
-3885});
-3886  }
-3887
-3888  @Override
-3889  public void 
disableReplicationPeer(final String peerId) throws IOException {
-3890executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3891  @Override
-3892  protected Void rpcCall() throws 
Exception {
-3893
master.disableReplicationPeer(getRpcController(),
-3894  
RequestConverter.buildDisableReplicationPeerRequest(peerId));
-3895return null;
-3896  }
-3897});
-3898  }
-3899
-3900  @Override
-3901  public ReplicationPeerConfig 
getReplicationPeerConfig(final String peerId) throws IOException {
-3902return executeCallable(new 
MasterCallableReplicationPeerConfig(getConnection(),
-3903getRpcControllerFactory()) {
-3904  @Override
-3905  protected ReplicationPeerConfig 

[03/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
index d438f22..7c59e27 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
@@ -1290,8 +1290,8 @@
 1282   CompactType 
compactType) throws IOException {
 1283switch (compactType) {
 1284  case MOB:
-1285
compact(this.connection.getAdminForMaster(), getMobRegionInfo(tableName), 
major,
-1286  columnFamily);
+1285
compact(this.connection.getAdminForMaster(), 
RegionInfo.createMobRegionInfo(tableName),
+1286major, columnFamily);
 1287break;
 1288  case NORMAL:
 1289checkTableExists(tableName);
@@ -3248,7 +3248,7 @@
 3240  new 
CallableAdminProtos.GetRegionInfoResponse.CompactionState() {
 3241@Override
 3242public 
AdminProtos.GetRegionInfoResponse.CompactionState call() throws Exception {
-3243  RegionInfo info = 
getMobRegionInfo(tableName);
+3243  RegionInfo info = 
RegionInfo.createMobRegionInfo(tableName);
 3244  GetRegionInfoRequest 
request =
 3245
RequestConverter.buildGetRegionInfoRequest(info.getRegionName(), true);
 3246  GetRegionInfoResponse 
response = masterAdmin.getRegionInfo(rpcController, request);
@@ -3312,7 +3312,7 @@
 3304}
 3305break;
 3306  default:
-3307throw new 
IllegalArgumentException("Unknowne compactType: " + compactType);
+3307throw new 
IllegalArgumentException("Unknown compactType: " + compactType);
 3308}
 3309if (state != null) {
 3310  return 
ProtobufUtil.createCompactionState(state);
@@ -3847,325 +3847,320 @@
 3839});
 3840  }
 3841
-3842  private RegionInfo 
getMobRegionInfo(TableName tableName) {
-3843return 
RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes(".mob")).setRegionId(0)
-3844.build();
-3845  }
-3846
-3847  private RpcControllerFactory 
getRpcControllerFactory() {
-3848return this.rpcControllerFactory;
-3849  }
-3850
-3851  @Override
-3852  public void addReplicationPeer(String 
peerId, ReplicationPeerConfig peerConfig, boolean enabled)
-3853  throws IOException {
-3854executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3855  @Override
-3856  protected Void rpcCall() throws 
Exception {
-3857
master.addReplicationPeer(getRpcController(),
-3858  
RequestConverter.buildAddReplicationPeerRequest(peerId, peerConfig, 
enabled));
-3859return null;
-3860  }
-3861});
-3862  }
-3863
-3864  @Override
-3865  public void 
removeReplicationPeer(String peerId) throws IOException {
-3866executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3867  @Override
-3868  protected Void rpcCall() throws 
Exception {
-3869
master.removeReplicationPeer(getRpcController(),
-3870  
RequestConverter.buildRemoveReplicationPeerRequest(peerId));
-3871return null;
-3872  }
-3873});
-3874  }
-3875
-3876  @Override
-3877  public void 
enableReplicationPeer(final String peerId) throws IOException {
-3878executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3879  @Override
-3880  protected Void rpcCall() throws 
Exception {
-3881
master.enableReplicationPeer(getRpcController(),
-3882  
RequestConverter.buildEnableReplicationPeerRequest(peerId));
-3883return null;
-3884  }
-3885});
-3886  }
-3887
-3888  @Override
-3889  public void 
disableReplicationPeer(final String peerId) throws IOException {
-3890executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3891  @Override
-3892  protected Void rpcCall() throws 
Exception {
-3893
master.disableReplicationPeer(getRpcController(),
-3894  
RequestConverter.buildDisableReplicationPeerRequest(peerId));
-3895return null;
-3896  }
-3897});
-3898  }
-3899
-3900  @Override
-3901  public ReplicationPeerConfig 
getReplicationPeerConfig(final String peerId) throws IOException {
-3902return executeCallable(new 
MasterCallableReplicationPeerConfig(getConnection(),
-3903getRpcControllerFactory()) {
-3904  @Override
-3905  protected ReplicationPeerConfig 
rpcCall() throws Exception {
-3906GetReplicationPeerConfigResponse 
response = master.getReplicationPeerConfig(
-3907  getRpcController(), 

[43/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html 
b/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
index 92c1c97..68f8a94 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
@@ -307,860 +307,919 @@
 299   * was sent to HBase and may need some 
time to finish the compact operation.
 300   * @param tableName table to compact
 301   */
-302  CompletableFutureVoid 
compact(TableName tableName);
-303
-304  /**
-305   * Compact a column family within a 
table. When the returned CompletableFuture is done, it only
-306   * means the compact request was sent 
to HBase and may need some time to finish the compact
-307   * operation.
-308   * @param tableName table to compact
-309   * @param columnFamily column family 
within a table. If not present, compact the table's all
-310   *  column families.
-311   */
-312  CompletableFutureVoid 
compact(TableName tableName, byte[] columnFamily);
-313
-314  /**
-315   * Compact an individual region. When 
the returned CompletableFuture is done, it only means the
-316   * compact request was sent to HBase 
and may need some time to finish the compact operation.
-317   * @param regionName region to 
compact
-318   */
-319  CompletableFutureVoid 
compactRegion(byte[] regionName);
-320
-321  /**
-322   * Compact a column family within a 
region. When the returned CompletableFuture is done, it only
-323   * means the compact request was sent 
to HBase and may need some time to finish the compact
-324   * operation.
-325   * @param regionName region to 
compact
-326   * @param columnFamily column family 
within a region. If not present, compact the region's all
-327   *  column families.
-328   */
-329  CompletableFutureVoid 
compactRegion(byte[] regionName, byte[] columnFamily);
-330
-331  /**
-332   * Major compact a table. When the 
returned CompletableFuture is done, it only means the compact
-333   * request was sent to HBase and may 
need some time to finish the compact operation.
-334   * @param tableName table to major 
compact
-335   */
-336  CompletableFutureVoid 
majorCompact(TableName tableName);
-337
-338  /**
-339   * Major compact a column family within 
a table. When the returned CompletableFuture is done, it
-340   * only means the compact request was 
sent to HBase and may need some time to finish the compact
-341   * operation.
-342   * @param tableName table to major 
compact
-343   * @param columnFamily column family 
within a table. If not present, major compact the table's all
-344   *  column families.
-345   */
-346  CompletableFutureVoid 
majorCompact(TableName tableName, byte[] columnFamily);
-347
-348  /**
-349   * Major compact a region. When the 
returned CompletableFuture is done, it only means the compact
-350   * request was sent to HBase and may 
need some time to finish the compact operation.
-351   * @param regionName region to major 
compact
-352   */
-353  CompletableFutureVoid 
majorCompactRegion(byte[] regionName);
-354
-355  /**
-356   * Major compact a column family within 
region. When the returned CompletableFuture is done, it
-357   * only means the compact request was 
sent to HBase and may need some time to finish the compact
-358   * operation.
-359   * @param regionName region to major 
compact
-360   * @param columnFamily column family 
within a region. If not present, major compact the region's
-361   *  all column families.
-362   */
-363  CompletableFutureVoid 
majorCompactRegion(byte[] regionName, byte[] columnFamily);
-364
-365  /**
-366   * Compact all regions on the region 
server.
-367   * @param serverName the region server 
name
-368   */
-369  CompletableFutureVoid 
compactRegionServer(ServerName serverName);
-370
-371  /**
-372   * Compact all regions on the region 
server.
-373   * @param serverName the region server 
name
-374   */
-375  CompletableFutureVoid 
majorCompactRegionServer(ServerName serverName);
-376
-377  /**
-378   * Turn the Merge switch on or off.
-379   * @param on
-380   * @return Previous switch value 
wrapped by a {@link CompletableFuture}
-381   */
-382  CompletableFutureBoolean 
mergeSwitch(boolean on);
-383
-384  /**
-385   * Query the current state of the Merge 
switch.
-386   * @return true if the switch is on, 
false otherwise. The return value will be wrapped by a
-387   * {@link CompletableFuture}
-388   */
-389  CompletableFutureBoolean 
isMergeEnabled();
-390
-391  /**
-392   * Turn the Split switch on or off.
-393   * @param on
-394   * @return Previous switch value 
wrapped by a {@link CompletableFuture}
-395   */
-396  CompletableFutureBoolean 
splitSwitch(boolean on);
-397
-398  /**
-399   * Query the current 

[30/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/org/apache/hadoop/hbase/client/HTable.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/HTable.html 
b/devapidocs/org/apache/hadoop/hbase/client/HTable.html
index 11c857e..380c1d7 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/HTable.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/HTable.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":9,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":9,"i33":10,"i34":10,"i35":10,"i36":9,"i37":10,"i38":42,"i39":10,"i40":10,"i41":42,"i42":10,"i43":10,"i44":42,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":42,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":42,"i61":42,"i62":42,"i63":42,"i64":10,"i65":10,"i66":9};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":42,"i8":10,"i9":42,"i10":10,"i11":10,"i12":42,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":9,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":9,"i33":10,"i34":10,"i35":10,"i36":9,"i37":10,"i38":42,"i39":10,"i40":10,"i41":42,"i42":10,"i43":10,"i44":42,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":42,"i51":42,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":42,"i61":42,"i62":42,"i63":42,"i64":10,"i65":10,"i66":9};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete 
Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -348,8 +348,7 @@ implements CompareFilter.CompareOpcompareOp,
   byte[]value,
   Deletedelete)
-Atomically checks if a row/family/qualifier value matches 
the expected
- value.
+Deprecated.
 
 
 
@@ -372,7 +371,7 @@ implements CompareFilter.CompareOpcompareOp,
   byte[]value,
   RowMutationsrm)
-Atomically checks if a row/family/qualifier value matches 
the expected value.
+Deprecated.
 
 
 
@@ -405,8 +404,7 @@ implements CompareFilter.CompareOpcompareOp,
byte[]value,
Putput)
-Atomically checks if a row/family/qualifier value matches 
the expected
- value.
+Deprecated.
 
 
 
@@ -667,7 +665,7 @@ implements 
 HTableDescriptor
 getTableDescriptor()
-Gets the table descriptor for 
this table.
+Deprecated.
 
 
 
@@ -1088,8 +1086,10 @@ public statichttp://docs.oracle.com/javase/8/docs/api/java/util/c
 
 
 getTableDescriptor
-publicHTableDescriptorgetTableDescriptor()
-throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
+http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true;
 title="class or interface in java.lang">@Deprecated
+publicHTableDescriptorgetTableDescriptor()
+throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
+Deprecated.
 Gets the table descriptor for 
this table.
 
 Specified by:
@@ -1105,7 +1105,7 @@ public statichttp://docs.oracle.com/javase/8/docs/api/java/util/c
 
 
 getDescriptor
-publicTableDescriptorgetDescriptor()
+publicTableDescriptorgetDescriptor()
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Description copied from 
interface:Table
 Gets the table 
descriptor for this table.
@@ -1123,7 +1123,7 @@ public statichttp://docs.oracle.com/javase/8/docs/api/java/util/c
 
 
 getKeysAndRegionsInRange
-privatePairhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listbyte[],http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionLocationgetKeysAndRegionsInRange(byte[]startKey,
+privatePairhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listbyte[],http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionLocationgetKeysAndRegionsInRange(byte[]startKey,
   
byte[]endKey,
   
booleanincludeEndKey)
throws 

hbase git commit: HBASE-19290 Reduce zk request when doing split log

2017-11-29 Thread binlijin
Repository: hbase
Updated Branches:
  refs/heads/branch-2 0c4c39553 -> 64ddce303


HBASE-19290 Reduce zk request when doing split log


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/64ddce30
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/64ddce30
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/64ddce30

Branch: refs/heads/branch-2
Commit: 64ddce303ee352a3ab1eb9cedf3fad097be7569c
Parents: 0c4c395
Author: binlijin 
Authored: Wed Nov 29 18:43:41 2017 +0800
Committer: binlijin 
Committed: Wed Nov 29 18:43:41 2017 +0800

--
 .../ZkSplitLogWorkerCoordination.java   | 78 +---
 1 file changed, 51 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/64ddce30/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java
index e64907c..0540a8f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java
@@ -19,6 +19,7 @@
 
 package org.apache.hadoop.hbase.coordination;
 
+import java.util.Collections;
 import java.util.List;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.LongAdder;
@@ -199,27 +200,28 @@ public class ZkSplitLogWorkerCoordination extends 
ZKListener implements
* try to grab a 'lock' on the task zk node to own and execute the task.
* 
* @param path zk node for the task
+   * @return boolean value when grab a task success return true otherwise false
*/
-  private void grabTask(String path) {
+  private boolean grabTask(String path) {
 Stat stat = new Stat();
 byte[] data;
 synchronized (grabTaskLock) {
   currentTask = path;
   workerInGrabTask = true;
   if (Thread.interrupted()) {
-return;
+return false;
   }
 }
 try {
   try {
 if ((data = ZKUtil.getDataNoWatch(watcher, path, stat)) == null) {
   SplitLogCounters.tot_wkr_failed_to_grab_task_no_data.increment();
-  return;
+  return false;
 }
   } catch (KeeperException e) {
 LOG.warn("Failed to get data for znode " + path, e);
 SplitLogCounters.tot_wkr_failed_to_grab_task_exception.increment();
-return;
+return false;
   }
   SplitLogTask slt;
   try {
@@ -227,18 +229,18 @@ public class ZkSplitLogWorkerCoordination extends 
ZKListener implements
   } catch (DeserializationException e) {
 LOG.warn("Failed parse data for znode " + path, e);
 SplitLogCounters.tot_wkr_failed_to_grab_task_exception.increment();
-return;
+return false;
   }
   if (!slt.isUnassigned()) {
 SplitLogCounters.tot_wkr_failed_to_grab_task_owned.increment();
-return;
+return false;
   }
 
   currentVersion =
   attemptToOwnTask(true, watcher, server.getServerName(), path, 
stat.getVersion());
   if (currentVersion < 0) {
 SplitLogCounters.tot_wkr_failed_to_grab_task_lost_race.increment();
-return;
+return false;
   }
 
   if (ZKSplitLog.isRescanNode(watcher, currentTask)) {
@@ -249,7 +251,7 @@ public class ZkSplitLogWorkerCoordination extends 
ZKListener implements
 
 endTask(new SplitLogTask.Done(server.getServerName()),
   SplitLogCounters.tot_wkr_task_acquired_rescan, splitTaskDetails);
-return;
+return false;
   }
 
   LOG.info("worker " + server.getServerName() + " acquired task " + path);
@@ -266,6 +268,7 @@ public class ZkSplitLogWorkerCoordination extends 
ZKListener implements
 LOG.warn("Interrupted while yielding for other region servers", e);
 Thread.currentThread().interrupt();
   }
+  return true;
 } finally {
   synchronized (grabTaskLock) {
 workerInGrabTask = false;
@@ -316,12 +319,13 @@ public class ZkSplitLogWorkerCoordination extends 
ZKListener implements
   }
 
   /**
-   * This function calculates how many splitters it could create based on 
expected average tasks per
-   * RS and the hard limit upper bound(maxConcurrentTasks) set by 
configuration. 
+   * This function calculates how many splitters this RS should create based 
on expected average
+   * tasks per RS and the hard limit upper bound(maxConcurrentTasks) set by 
configuration. 
* At any given time, a RS allows spawn 

hbase git commit: HBASE-19290 Reduce zk request when doing split log

2017-11-29 Thread binlijin
Repository: hbase
Updated Branches:
  refs/heads/master b4a4be65e -> 8b32d3792


HBASE-19290 Reduce zk request when doing split log


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8b32d379
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8b32d379
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8b32d379

Branch: refs/heads/master
Commit: 8b32d3792934507c774997cd82dc061b75410f83
Parents: b4a4be6
Author: binlijin 
Authored: Wed Nov 29 18:42:14 2017 +0800
Committer: binlijin 
Committed: Wed Nov 29 18:42:14 2017 +0800

--
 .../ZkSplitLogWorkerCoordination.java   | 78 +---
 1 file changed, 51 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8b32d379/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java
index e64907c..0540a8f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java
@@ -19,6 +19,7 @@
 
 package org.apache.hadoop.hbase.coordination;
 
+import java.util.Collections;
 import java.util.List;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.LongAdder;
@@ -199,27 +200,28 @@ public class ZkSplitLogWorkerCoordination extends 
ZKListener implements
* try to grab a 'lock' on the task zk node to own and execute the task.
* 
* @param path zk node for the task
+   * @return boolean value when grab a task success return true otherwise false
*/
-  private void grabTask(String path) {
+  private boolean grabTask(String path) {
 Stat stat = new Stat();
 byte[] data;
 synchronized (grabTaskLock) {
   currentTask = path;
   workerInGrabTask = true;
   if (Thread.interrupted()) {
-return;
+return false;
   }
 }
 try {
   try {
 if ((data = ZKUtil.getDataNoWatch(watcher, path, stat)) == null) {
   SplitLogCounters.tot_wkr_failed_to_grab_task_no_data.increment();
-  return;
+  return false;
 }
   } catch (KeeperException e) {
 LOG.warn("Failed to get data for znode " + path, e);
 SplitLogCounters.tot_wkr_failed_to_grab_task_exception.increment();
-return;
+return false;
   }
   SplitLogTask slt;
   try {
@@ -227,18 +229,18 @@ public class ZkSplitLogWorkerCoordination extends 
ZKListener implements
   } catch (DeserializationException e) {
 LOG.warn("Failed parse data for znode " + path, e);
 SplitLogCounters.tot_wkr_failed_to_grab_task_exception.increment();
-return;
+return false;
   }
   if (!slt.isUnassigned()) {
 SplitLogCounters.tot_wkr_failed_to_grab_task_owned.increment();
-return;
+return false;
   }
 
   currentVersion =
   attemptToOwnTask(true, watcher, server.getServerName(), path, 
stat.getVersion());
   if (currentVersion < 0) {
 SplitLogCounters.tot_wkr_failed_to_grab_task_lost_race.increment();
-return;
+return false;
   }
 
   if (ZKSplitLog.isRescanNode(watcher, currentTask)) {
@@ -249,7 +251,7 @@ public class ZkSplitLogWorkerCoordination extends 
ZKListener implements
 
 endTask(new SplitLogTask.Done(server.getServerName()),
   SplitLogCounters.tot_wkr_task_acquired_rescan, splitTaskDetails);
-return;
+return false;
   }
 
   LOG.info("worker " + server.getServerName() + " acquired task " + path);
@@ -266,6 +268,7 @@ public class ZkSplitLogWorkerCoordination extends 
ZKListener implements
 LOG.warn("Interrupted while yielding for other region servers", e);
 Thread.currentThread().interrupt();
   }
+  return true;
 } finally {
   synchronized (grabTaskLock) {
 workerInGrabTask = false;
@@ -316,12 +319,13 @@ public class ZkSplitLogWorkerCoordination extends 
ZKListener implements
   }
 
   /**
-   * This function calculates how many splitters it could create based on 
expected average tasks per
-   * RS and the hard limit upper bound(maxConcurrentTasks) set by 
configuration. 
+   * This function calculates how many splitters this RS should create based 
on expected average
+   * tasks per RS and the hard limit upper bound(maxConcurrentTasks) set by 
configuration. 
* At any given time, a RS allows spawn 

hbase git commit: HBASE-19362 Remove unused imports from hbase-thrift module

2017-11-29 Thread janh
Repository: hbase
Updated Branches:
  refs/heads/master e67a3699c -> b4a4be65e


HBASE-19362 Remove unused imports from hbase-thrift module

Signed-off-by: Jan Hentschel 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b4a4be65
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b4a4be65
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b4a4be65

Branch: refs/heads/master
Commit: b4a4be65ea658f02fdaa2e99532c9b8fecd521cc
Parents: e67a369
Author: Guangxu Cheng 
Authored: Wed Nov 29 14:43:17 2017 +0800
Committer: Jan Hentschel 
Committed: Wed Nov 29 11:00:59 2017 +0100

--
 .../org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java| 1 -
 .../src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java | 1 -
 .../src/test/java/org/apache/hadoop/hbase/thrift/TestCallQueue.java | 1 -
 3 files changed, 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b4a4be65/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java
index 6bcd181..59825b1 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/TBoundedThreadPoolServer.java
@@ -29,7 +29,6 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.thrift.CallQueue.Call;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.thrift.TException;
 import org.apache.thrift.TProcessor;

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4a4be65/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java
index c590370..000c115 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java
@@ -57,7 +57,6 @@ import org.apache.hadoop.hbase.security.SaslUtil;
 import org.apache.hadoop.hbase.security.SecurityUtil;
 import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.thrift.CallQueue;
-import org.apache.hadoop.hbase.thrift.CallQueue.Call;
 import org.apache.hadoop.hbase.thrift.THBaseThreadPoolExecutor;
 import org.apache.hadoop.hbase.thrift.ThriftMetrics;
 import org.apache.hadoop.hbase.thrift2.generated.THBaseService;

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4a4be65/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestCallQueue.java
--
diff --git 
a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestCallQueue.java 
b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestCallQueue.java
index e595847..ed9ca6b 100644
--- 
a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestCallQueue.java
+++ 
b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestCallQueue.java
@@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.test.MetricsAssertHelper;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
-import org.apache.hadoop.hbase.thrift.CallQueue.Call;
 import org.junit.experimental.categories.Category;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;