[05/20] hbase git commit: HBASE-20125 Add UT for serial replication after region split and merge

2018-04-07 Thread zhangduo
HBASE-20125 Add UT for serial replication after region split and merge


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/688287a6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/688287a6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/688287a6

Branch: refs/heads/HBASE-20046-branch-2
Commit: 688287a6b8ca27708f231203dbf7340a88fae173
Parents: 80e00ee
Author: zhangduo 
Authored: Tue Mar 6 21:31:05 2018 +0800
Committer: zhangduo 
Committed: Sun Apr 8 11:13:39 2018 +0800

--
 .../hbase/replication/WALEntryFilter.java   |  17 +-
 .../regionserver/ReplicationSourceShipper.java  |   4 +-
 .../ReplicationSourceWALReader.java |  54 +++--
 .../regionserver/WALEntryStream.java|  73 ---
 .../replication/TestSerialReplication.java  | 200 ---
 5 files changed, 270 insertions(+), 78 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/688287a6/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/WALEntryFilter.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/WALEntryFilter.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/WALEntryFilter.java
index 417f868..cd3f1bd 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/WALEntryFilter.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/WALEntryFilter.java
@@ -15,7 +15,6 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.hbase.replication;
 
 import org.apache.yetus.audience.InterfaceAudience;
@@ -35,12 +34,20 @@ import org.apache.hadoop.hbase.wal.WAL.Entry;
  */
 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.REPLICATION)
 public interface WALEntryFilter {
+
   /**
-   * Applies the filter, possibly returning a different Entry instance.
-   * If null is returned, the entry will be skipped.
+   * 
+   * Applies the filter, possibly returning a different Entry instance. If 
null is returned, the
+   * entry will be skipped.
+   * 
+   * 
+   * Notice that you are free to modify the cell list of the give entry, but 
do not change the
+   * content of the cell, it may be used by others at the same time(and 
usually you can not modify a
+   * cell unless you cast it to the implementation class, which is not a good 
idea).
+   * 
* @param entry Entry to filter
-   * @return a (possibly modified) Entry to use. Returning null or an entry 
with
-   * no cells will cause the entry to be skipped for replication.
+   * @return a (possibly modified) Entry to use. Returning null or an entry 
with no cells will cause
+   * the entry to be skipped for replication.
*/
   public Entry filter(Entry entry);
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/688287a6/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java
index d207d77..50aaf95 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java
@@ -120,7 +120,7 @@ public class ReplicationSourceShipper extends Thread {
   /**
* Do the shipping logic
*/
-  protected void shipEdits(WALEntryBatch entryBatch) {
+  protected final void shipEdits(WALEntryBatch entryBatch) {
 List entries = entryBatch.getWalEntries();
 long lastReadPosition = entryBatch.getLastWalPosition();
 currentPath = entryBatch.getLastWalPath();
@@ -253,7 +253,7 @@ public class ReplicationSourceShipper extends Thread {
 return 0;
   }
 
-  protected boolean isActive() {
+  protected final boolean isActive() {
 return source.isSourceActive() && state == WorkerState.RUNNING && 
!isInterrupted();
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/688287a6/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java
index fe87aec..ad3baaf 100644
--- 

[12/20] hbase git commit: HBASE-20242 The open sequence number will grow if we fail to open a region after writing the max sequence id file

2018-04-07 Thread zhangduo
HBASE-20242 The open sequence number will grow if we fail to open a region 
after writing the max sequence id file


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/70d14c31
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/70d14c31
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/70d14c31

Branch: refs/heads/HBASE-20046-branch-2
Commit: 70d14c310245b87d09880760b8e5ae9698044936
Parents: 15e7f9d
Author: zhangduo 
Authored: Wed Mar 21 21:35:34 2018 +0800
Committer: zhangduo 
Committed: Sun Apr 8 11:21:26 2018 +0800

--
 .../hadoop/hbase/regionserver/HRegion.java  |   5 +-
 .../TestOpenSeqNumUnexpectedIncrease.java   | 111 +++
 2 files changed, 114 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/70d14c31/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 23284be..9b9136b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -968,7 +968,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   WALSplitter.getMaxRegionSequenceId(fs.getFileSystem(), 
fs.getRegionDir());
 long nextSeqId = Math.max(maxSeqId, maxSeqIdFromFile) + 1;
 if (writestate.writesEnabled) {
-  WALSplitter.writeRegionSequenceIdFile(fs.getFileSystem(), 
fs.getRegionDir(), nextSeqId);
+  WALSplitter.writeRegionSequenceIdFile(fs.getFileSystem(), 
fs.getRegionDir(), nextSeqId - 1);
 }
 
 LOG.info("Opened {}; next sequenceid={}", 
this.getRegionInfo().getShortNameToLog(), nextSeqId);
@@ -1097,7 +1097,8 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 return allStoreFiles;
   }
 
-  private void writeRegionOpenMarker(WAL wal, long openSeqId) throws 
IOException {
+  @VisibleForTesting
+  protected void writeRegionOpenMarker(WAL wal, long openSeqId) throws 
IOException {
 Map storeFiles = getStoreFiles();
 RegionEventDescriptor regionOpenDesc = 
ProtobufUtil.toRegionEventDescriptor(
   RegionEventDescriptor.EventType.REGION_OPEN, getRegionInfo(), openSeqId,

http://git-wip-us.apache.org/repos/asf/hbase/blob/70d14c31/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestOpenSeqNumUnexpectedIncrease.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestOpenSeqNumUnexpectedIncrease.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestOpenSeqNumUnexpectedIncrease.java
new file mode 100644
index 000..14d5a98
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestOpenSeqNumUnexpectedIncrease.java
@@ -0,0 +1,111 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+import java.util.concurrent.atomic.AtomicInteger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.testclassification.RegionServerTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.wal.WAL;
+import 

[01/20] hbase git commit: HBASE-20050 Reimplement updateReplicationPositions logic in serial replication based on the newly introduced replication storage layer

2018-04-07 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/HBASE-20046-branch-2 [created] e821b9ac5


HBASE-20050 Reimplement updateReplicationPositions logic in serial replication 
based on the newly introduced replication storage layer

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a0c53e87
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a0c53e87
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a0c53e87

Branch: refs/heads/HBASE-20046-branch-2
Commit: a0c53e87558779dd646e05b37001511befd29c5f
Parents: 10ed3f4
Author: huzheng 
Authored: Wed Feb 28 16:25:24 2018 +0800
Committer: zhangduo 
Committed: Sun Apr 8 11:08:03 2018 +0800

--
 .../replication/ReplicationQueueStorage.java| 15 +++-
 .../replication/ZKReplicationQueueStorage.java  | 88 ++--
 .../replication/TestReplicationStateBasic.java  | 48 ++-
 .../TestZKReplicationQueueStorage.java  |  7 +-
 .../regionserver/ReplicationSourceManager.java  |  4 +-
 5 files changed, 146 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a0c53e87/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueStorage.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueStorage.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueStorage.java
index e774148..4c93da6 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueStorage.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueStorage.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hbase.replication;
 
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 import java.util.SortedSet;
 
@@ -63,9 +64,19 @@ public interface ReplicationQueueStorage {
* @param queueId a String that identifies the queue
* @param fileName name of the WAL
* @param position the current position in the file
+   * @param lastSeqIds map with {encodedRegionName, sequenceId} pairs for 
serial replication.
*/
-  void setWALPosition(ServerName serverName, String queueId, String fileName, 
long position)
-  throws ReplicationException;
+  void setWALPosition(ServerName serverName, String queueId, String fileName, 
long position,
+  Map lastSeqIds) throws ReplicationException;
+
+  /**
+   * Read the max sequence id of the specific region for a given peer. For 
serial replication, we
+   * need the max sequenced id to decide whether we can push the next entries.
+   * @param encodedRegionName the encoded region name
+   * @param peerId peer id
+   * @return the max sequence id of the specific region for a given peer.
+   */
+  long getLastSequenceId(String encodedRegionName, String peerId) throws 
ReplicationException;
 
   /**
* Get the current position for a specific WAL in a given queue for a given 
regionserver.

http://git-wip-us.apache.org/repos/asf/hbase/blob/a0c53e87/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java
index da96c65..adbf259 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java
@@ -23,6 +23,8 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashSet;
 import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
 import java.util.Set;
 import java.util.SortedSet;
 import java.util.TreeSet;
@@ -85,6 +87,10 @@ class ZKReplicationQueueStorage extends 
ZKReplicationStorageBase
   "zookeeper.znode.replication.hfile.refs";
   public static final String ZOOKEEPER_ZNODE_REPLICATION_HFILE_REFS_DEFAULT = 
"hfile-refs";
 
+  public static final String ZOOKEEPER_ZNODE_REPLICATION_REGIONS_KEY =
+  "zookeeper.znode.replication.regions";
+  public static final String ZOOKEEPER_ZNODE_REPLICATION_REGIONS_DEFAULT = 
"regions";
+
   /**
* The name of the znode that contains all replication queues
*/
@@ -95,6 +101,8 @@ class ZKReplicationQueueStorage extends 
ZKReplicationStorageBase
*/
   private final String hfileRefsZNode;
 
+  private final String regionsZNode;
+
   public 

[08/20] hbase git commit: HBASE-20165 Shell command to make a normal peer to be a serial replication peer

2018-04-07 Thread zhangduo
HBASE-20165 Shell command to make a normal peer to be a serial replication peer


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d74e7f87
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d74e7f87
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d74e7f87

Branch: refs/heads/HBASE-20046-branch-2
Commit: d74e7f87b6959ffcff4b17192c0bdbb68a053061
Parents: 91854f4
Author: openinx 
Authored: Sat Mar 10 19:36:43 2018 +0800
Committer: zhangduo 
Committed: Sun Apr 8 11:17:32 2018 +0800

--
 .../src/main/ruby/hbase/replication_admin.rb| 11 -
 hbase-shell/src/main/ruby/shell.rb  |  1 +
 .../src/main/ruby/shell/commands/list_peers.rb  |  5 +-
 .../main/ruby/shell/commands/set_peer_serial.rb | 49 
 .../test/ruby/hbase/replication_admin_test.rb   | 23 +
 5 files changed, 86 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d74e7f87/hbase-shell/src/main/ruby/hbase/replication_admin.rb
--
diff --git a/hbase-shell/src/main/ruby/hbase/replication_admin.rb 
b/hbase-shell/src/main/ruby/hbase/replication_admin.rb
index b9d4a0c..5b87595 100644
--- a/hbase-shell/src/main/ruby/hbase/replication_admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/replication_admin.rb
@@ -284,6 +284,15 @@ module Hbase
   @admin.updateReplicationPeerConfig(id, rpc)
 end
 
+def set_peer_serial(id, peer_serial)
+  rpc = get_peer_config(id)
+  return if rpc.nil?
+  rpc_builder = org.apache.hadoop.hbase.replication.ReplicationPeerConfig
+   .newBuilder(rpc)
+  new_rpc = rpc_builder.setSerial(peer_serial).build
+  @admin.updateReplicationPeerConfig(id, new_rpc)
+end
+
 # Set exclude namespaces config for the specified peer
 def set_peer_exclude_namespaces(id, exclude_namespaces)
   return if exclude_namespaces.nil?
@@ -362,7 +371,7 @@ module Hbase
   # Create and populate a ReplicationPeerConfig
   replication_peer_config = get_peer_config(id)
   builder = org.apache.hadoop.hbase.replication.ReplicationPeerConfig
-.newBuilder(replication_peer_config)
+   .newBuilder(replication_peer_config)
   unless config.nil?
 builder.putAllConfiguration(config)
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/d74e7f87/hbase-shell/src/main/ruby/shell.rb
--
diff --git a/hbase-shell/src/main/ruby/shell.rb 
b/hbase-shell/src/main/ruby/shell.rb
index 3efe7e9..2e228f5 100644
--- a/hbase-shell/src/main/ruby/shell.rb
+++ b/hbase-shell/src/main/ruby/shell.rb
@@ -373,6 +373,7 @@ Shell.load_command_group(
 enable_peer
 disable_peer
 set_peer_replicate_all
+set_peer_serial
 set_peer_namespaces
 append_peer_namespaces
 remove_peer_namespaces

http://git-wip-us.apache.org/repos/asf/hbase/blob/d74e7f87/hbase-shell/src/main/ruby/shell/commands/list_peers.rb
--
diff --git a/hbase-shell/src/main/ruby/shell/commands/list_peers.rb 
b/hbase-shell/src/main/ruby/shell/commands/list_peers.rb
index 522d23d..eefcc42 100644
--- a/hbase-shell/src/main/ruby/shell/commands/list_peers.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/list_peers.rb
@@ -39,7 +39,8 @@ EOF
 peers = replication_admin.list_peers
 
 formatter.header(%w[PEER_ID CLUSTER_KEY ENDPOINT_CLASSNAME
-STATE REPLICATE_ALL NAMESPACES TABLE_CFS 
BANDWIDTH])
+STATE REPLICATE_ALL NAMESPACES TABLE_CFS BANDWIDTH
+SERIAL])
 
 peers.each do |peer|
   id = peer.getPeerId
@@ -55,7 +56,7 @@ EOF
   formatter.row([id, config.getClusterKey,
  config.getReplicationEndpointImpl, state,
  config.replicateAllUserTables, namespaces, tableCFs,
- config.getBandwidth])
+ config.getBandwidth, config.isSerial])
 end
 
 formatter.footer

http://git-wip-us.apache.org/repos/asf/hbase/blob/d74e7f87/hbase-shell/src/main/ruby/shell/commands/set_peer_serial.rb
--
diff --git a/hbase-shell/src/main/ruby/shell/commands/set_peer_serial.rb 
b/hbase-shell/src/main/ruby/shell/commands/set_peer_serial.rb
new file mode 100644
index 000..d556077
--- /dev/null
+++ b/hbase-shell/src/main/ruby/shell/commands/set_peer_serial.rb
@@ -0,0 +1,49 @@
+#
+# Copyright The Apache Software Foundation
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license 

[17/20] hbase git commit: HBASE-20127 Add UT for serial replication after failover

2018-04-07 Thread zhangduo
HBASE-20127 Add UT for serial replication after failover


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f3dbc727
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f3dbc727
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f3dbc727

Branch: refs/heads/HBASE-20046-branch-2
Commit: f3dbc727882e013259f83edfac843a589b064915
Parents: daa5e0b
Author: zhangduo 
Authored: Mon Mar 26 16:08:20 2018 +0800
Committer: zhangduo 
Committed: Sun Apr 8 11:22:03 2018 +0800

--
 .../replication/SerialReplicationTestBase.java  |  7 ++
 .../replication/TestSerialReplication.java  |  5 --
 .../TestSerialReplicationFailover.java  | 76 
 3 files changed, 83 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f3dbc727/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SerialReplicationTestBase.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SerialReplicationTestBase.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SerialReplicationTestBase.java
index 83afd81..b5aae85 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SerialReplicationTestBase.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SerialReplicationTestBase.java
@@ -113,6 +113,7 @@ public class SerialReplicationTestBase {
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
 UTIL.getConfiguration().setInt("replication.source.nb.capacity", 10);
+UTIL.getConfiguration().setLong("replication.sleep.before.failover", 1000);
 UTIL.startMiniCluster(3);
 // disable balancer
 UTIL.getAdmin().balancerSwitch(false, true);
@@ -200,6 +201,11 @@ public class SerialReplicationTestBase {
 });
   }
 
+  protected final void enablePeerAndWaitUntilReplicationDone(int 
expectedEntries) throws Exception {
+UTIL.getAdmin().enableReplicationPeer(PEER_ID);
+waitUntilReplicationDone(expectedEntries);
+  }
+
   protected final void addPeer(boolean enabled) throws IOException {
 UTIL.getAdmin().addReplicationPeer(PEER_ID,
   ReplicationPeerConfig.newBuilder().setClusterKey("127.0.0.1:2181:/hbase")
@@ -221,6 +227,7 @@ public class SerialReplicationTestBase {
 assertTrue(
   "Sequence id go backwards from " + seqId + " to " + 
entry.getKey().getSequenceId(),
   entry.getKey().getSequenceId() >= seqId);
+seqId = entry.getKey().getSequenceId();
 count++;
   }
   assertEquals(expectedEntries, count);

http://git-wip-us.apache.org/repos/asf/hbase/blob/f3dbc727/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplication.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplication.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplication.java
index 94b79d9..bedb2ec 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplication.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplication.java
@@ -63,11 +63,6 @@ public class TestSerialReplication extends 
SerialReplicationTestBase {
 addPeer(false);
   }
 
-  private void enablePeerAndWaitUntilReplicationDone(int expectedEntries) 
throws Exception {
-UTIL.getAdmin().enableReplicationPeer(PEER_ID);
-waitUntilReplicationDone(expectedEntries);
-  }
-
   @Test
   public void testRegionMove() throws Exception {
 TableName tableName = TableName.valueOf(name.getMethodName());

http://git-wip-us.apache.org/repos/asf/hbase/blob/f3dbc727/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplicationFailover.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplicationFailover.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplicationFailover.java
new file mode 100644
index 000..324a69f
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplicationFailover.java
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain 

[16/20] hbase git commit: HBASE-20227 Add UT for ReplicationUtils.contains method

2018-04-07 Thread zhangduo
HBASE-20227 Add UT for ReplicationUtils.contains method

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/daa5e0b9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/daa5e0b9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/daa5e0b9

Branch: refs/heads/HBASE-20046-branch-2
Commit: daa5e0b98de57f49be8f162e48562f01ae89d923
Parents: 5510389
Author: tianjingyun 
Authored: Sat Mar 24 18:57:48 2018 +0800
Committer: zhangduo 
Committed: Sun Apr 8 11:21:52 2018 +0800

--
 .../hbase/replication/ReplicationUtils.java |   2 +-
 .../hbase/replication/TestReplicationUtil.java  | 235 +++
 2 files changed, 236 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/daa5e0b9/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
index 1c42de4..c7568bb 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
@@ -145,7 +145,7 @@ public final class ReplicationUtils {
   if (excludeNamespaces != null && excludeNamespaces.contains(namespace)) {
 return false;
   }
-  Map excludedTableCFs = 
peerConfig.getTableCFsMap();
+  Map excludedTableCFs = 
peerConfig.getExcludeTableCFsMap();
   // trap here, must check existence first since HashMap allows null value.
   if (excludedTableCFs == null || 
!excludedTableCFs.containsKey(tableName)) {
 return true;

http://git-wip-us.apache.org/repos/asf/hbase/blob/daa5e0b9/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationUtil.java
--
diff --git 
a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationUtil.java
 
b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationUtil.java
new file mode 100644
index 000..f8543fe
--- /dev/null
+++ 
b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationUtil.java
@@ -0,0 +1,235 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.testclassification.ReplicationTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.Assert;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({ ReplicationTests.class, SmallTests.class })
+public class TestReplicationUtil {
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+HBaseClassTestRule.forClass(TestReplicationUtil.class);
+
+  private static TableName TABLE_A = TableName.valueOf("replication", "testA");
+  private static TableName TABLE_B = TableName.valueOf("replication", "testB");
+
+  @Test
+  public void testContainsWithReplicatingAll() {
+ReplicationPeerConfig peerConfig;
+ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl builder =
+  new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl();
+Map tableCfs = new HashMap<>();
+Set namespaces = new HashSet<>();
+
+// 1. replication_all flag is true, no namespaces and table-cfs config
+builder.setReplicateAllUserTables(true);
+peerConfig = builder.build();
+

[03/20] hbase git commit: HBASE-20115 Reimplement serial replication based on the new replication storage layer

2018-04-07 Thread zhangduo
HBASE-20115 Reimplement serial replication based on the new replication storage 
layer


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/79e0c40c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/79e0c40c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/79e0c40c

Branch: refs/heads/HBASE-20046-branch-2
Commit: 79e0c40cd137c60df7d65b0d0eded3dc3819b234
Parents: a0c53e8
Author: zhangduo 
Authored: Mon Mar 5 16:47:03 2018 +0800
Committer: zhangduo 
Committed: Sun Apr 8 11:13:25 2018 +0800

--
 .../apache/hadoop/hbase/HTableDescriptor.java   |   8 +
 .../apache/hadoop/hbase/MetaTableAccessor.java  | 211 ---
 .../hadoop/hbase/client/TableDescriptor.java|   8 +-
 .../hbase/client/TableDescriptorBuilder.java|   9 +
 .../org/apache/hadoop/hbase/HConstants.java |  12 +
 .../hadoop/hbase/master/MasterFileSystem.java   |  11 +-
 .../master/assignment/AssignmentManager.java|   3 +-
 .../master/assignment/RegionStateStore.java |  60 +++--
 .../assignment/SplitTableRegionProcedure.java   |   4 +-
 .../AbstractStateMachineTableProcedure.java |   8 +-
 .../hbase/regionserver/HRegionFileSystem.java   |  11 +-
 .../NamespaceTableCfWALEntryFilter.java |   8 +-
 .../hbase/replication/ScopeWALEntryFilter.java  |  34 ++-
 .../RecoveredReplicationSource.java |   5 +
 .../RecoveredReplicationSourceShipper.java  |  12 +-
 .../RecoveredReplicationSourceWALReader.java|   9 +-
 .../regionserver/ReplicationSource.java |   8 +
 .../ReplicationSourceInterface.java |   7 +
 .../regionserver/ReplicationSourceManager.java  |   4 +-
 .../regionserver/ReplicationSourceShipper.java  |  17 +-
 .../ReplicationSourceWALActionListener.java |  39 ++-
 .../ReplicationSourceWALReader.java | 188 +-
 .../regionserver/SerialReplicationChecker.java  | 255 +++
 .../replication/regionserver/WALEntryBatch.java | 138 ++
 .../regionserver/WALEntryStream.java|  29 +--
 .../hadoop/hbase/util/FSTableDescriptors.java   |   8 +
 .../org/apache/hadoop/hbase/util/FSUtils.java   |  28 +-
 .../org/apache/hadoop/hbase/wal/WALKeyImpl.java |  12 +-
 .../hadoop/hbase/TestMetaTableAccessor.java |  14 +-
 .../regionserver/TestHRegionFileSystem.java |  14 +-
 .../regionserver/TestRegionServerMetrics.java   |   4 +-
 .../TestReplicationDroppedTables.java   |   8 +-
 .../replication/TestSerialReplication.java  | 234 +
 .../TestReplicationSourceManager.java   |   2 +-
 .../TestSerialReplicationChecker.java   | 176 +
 .../regionserver/TestWALEntryStream.java|  19 +-
 36 files changed, 1279 insertions(+), 338 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/79e0c40c/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
index 960b91f..ca0cb91 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
@@ -539,6 +539,14 @@ public class HTableDescriptor implements TableDescriptor, 
Comparablehttp://git-wip-us.apache.org/repos/asf/hbase/blob/79e0c40c/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
index d6bbf53..109f2d0 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
@@ -34,6 +34,8 @@ import java.util.SortedMap;
 import java.util.TreeMap;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell.Type;
 import org.apache.hadoop.hbase.client.Connection;
@@ -56,6 +58,7 @@ import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
 import org.apache.hadoop.hbase.master.RegionState;
+import org.apache.hadoop.hbase.master.RegionState.State;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
 import 

[11/20] hbase git commit: HBASE-20116 Optimize the region last pushed sequence id layout on zk

2018-04-07 Thread zhangduo
HBASE-20116 Optimize the region last pushed sequence id layout on zk


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/15e7f9d5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/15e7f9d5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/15e7f9d5

Branch: refs/heads/HBASE-20046-branch-2
Commit: 15e7f9d538b3043245d329d64d079c8067c7d91f
Parents: e1ec013
Author: huzheng 
Authored: Tue Mar 20 10:13:15 2018 +0800
Committer: zhangduo 
Committed: Sun Apr 8 11:21:21 2018 +0800

--
 .../replication/ZKReplicationQueueStorage.java  | 22 ++--
 .../replication/TestReplicationStateBasic.java  |  3 ++-
 .../TestZKReplicationQueueStorage.java  | 10 +
 3 files changed, 28 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/15e7f9d5/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java
index 63f43e8..6c9752a 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
@@ -138,19 +139,28 @@ class ZKReplicationQueueStorage extends 
ZKReplicationStorageBase
* So the final znode path will be format like this:
*
* 
-   * /hbase/replication/regions/254/dd04e76a6966d4ffa908ed0586764767-100
+   * /hbase/replication/regions/e1/ff/dd04e76a6966d4ffa908ed0586764767-100
* 
*
-   * The 254 indicate the hash of encoded region name, the 100 indicate the 
peer id.
+   * The e1 indicate the first level hash of encoded region name, and the ff 
indicate the second
+   * level hash of encoded region name, the 100 indicate the peer id. 
+   * Note that here we use two-level hash because if only one-level hash (such 
as mod 65535), it
+   * will still lead to too many children under the /hbase/replication/regions 
znode.
* @param encodedRegionName the encoded region name.
* @param peerId peer id for replication.
* @return ZNode path to persist the max sequence id that we've pushed for 
the given region and
* peer.
*/
-  private String getSerialReplicationRegionPeerNode(String encodedRegionName, 
String peerId) {
-int hash = encodedRegionName.hashCode() & 0x;
-String hashPath = ZNodePaths.joinZNode(regionsZNode, String.valueOf(hash));
-return ZNodePaths.joinZNode(hashPath, String.format("%s-%s", 
encodedRegionName, peerId));
+  @VisibleForTesting
+  public String getSerialReplicationRegionPeerNode(String encodedRegionName, 
String peerId) {
+if (encodedRegionName == null || encodedRegionName.length() != 
RegionInfo.MD5_HEX_LENGTH) {
+  throw new IllegalArgumentException(
+  "Invalid encoded region name: " + encodedRegionName + ", length 
should be 32.");
+}
+return new 
StringBuilder(regionsZNode).append(ZNodePaths.ZNODE_PATH_SEPARATOR)
+.append(encodedRegionName.substring(0, 
2)).append(ZNodePaths.ZNODE_PATH_SEPARATOR)
+.append(encodedRegionName.substring(2, 
4)).append(ZNodePaths.ZNODE_PATH_SEPARATOR)
+.append(encodedRegionName).append("-").append(peerId).toString();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/15e7f9d5/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java
--
diff --git 
a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java
 
b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java
index 21b09aa..3ed4121 100644
--- 
a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java
+++ 
b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java
@@ -285,7 +285,8 @@ public abstract class TestReplicationStateBasic {
 ServerName serverName1 = ServerName.valueOf("127.0.0.1", 8000, 1);
 

[09/20] hbase git commit: HBASE-20117 Cleanup the unused replication barriers in meta table

2018-04-07 Thread zhangduo
HBASE-20117 Cleanup the unused replication barriers in meta table


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/71f72f42
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/71f72f42
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/71f72f42

Branch: refs/heads/HBASE-20046-branch-2
Commit: 71f72f42a62a14d56cc38a74c02fdf8e805f136a
Parents: d74e7f8
Author: zhangduo 
Authored: Tue Mar 13 21:36:06 2018 +0800
Committer: zhangduo 
Committed: Sun Apr 8 11:18:38 2018 +0800

--
 .../apache/hadoop/hbase/MetaTableAccessor.java  |   2 +-
 .../hbase/replication/ReplicationUtils.java |  56 +++-
 .../org/apache/hadoop/hbase/master/HMaster.java |  91 +++---
 .../cleaner/ReplicationBarrierCleaner.java  | 162 ++
 .../replication/ReplicationPeerManager.java |  10 +
 .../NamespaceTableCfWALEntryFilter.java |  39 +--
 .../cleaner/TestReplicationBarrierCleaner.java  | 293 +++
 .../TestSerialReplicationChecker.java   |   2 +-
 8 files changed, 565 insertions(+), 90 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/71f72f42/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
index 2a88b56..a800c1c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
@@ -2053,7 +2053,7 @@ public class MetaTableAccessor {
 return Bytes.toLong(c.getValueArray(), c.getValueOffset(), 
c.getValueLength());
   }
 
-  private static long[] getReplicationBarriers(Result result) {
+  public static long[] getReplicationBarriers(Result result) {
 return result.getColumnCells(HConstants.REPLICATION_BARRIER_FAMILY, 
HConstants.SEQNUM_QUALIFIER)
   
.stream().mapToLong(MetaTableAccessor::getReplicationBarrier).sorted().distinct().toArray();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/71f72f42/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
index 857b385..e2479e0 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
@@ -39,15 +39,6 @@ public final class ReplicationUtils {
   private ReplicationUtils() {
   }
 
-  /**
-   * @param c Configuration to look at
-   * @return True if replication for bulk load data is enabled.
-   */
-  public static boolean isReplicationForBulkLoadDataEnabled(final 
Configuration c) {
-return c.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY,
-  HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT);
-  }
-
   public static Configuration 
getPeerClusterConfiguration(ReplicationPeerConfig peerConfig,
   Configuration baseConf) throws ReplicationException {
 Configuration otherConf;
@@ -135,4 +126,51 @@ public final class ReplicationUtils {
 isTableCFsEqual(rpc1.getTableCFsMap(), rpc2.getTableCFsMap());
 }
   }
+
+  /**
+   * @param c Configuration to look at
+   * @return True if replication for bulk load data is enabled.
+   */
+  public static boolean isReplicationForBulkLoadDataEnabled(final 
Configuration c) {
+return c.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY,
+  HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT);
+  }
+
+  /**
+   * Returns whether we should replicate the given table.
+   */
+  public static boolean contains(ReplicationPeerConfig peerConfig, TableName 
tableName) {
+String namespace = tableName.getNamespaceAsString();
+if (peerConfig.replicateAllUserTables()) {
+  // replicate all user tables, but filter by exclude namespaces and 
table-cfs config
+  Set excludeNamespaces = peerConfig.getExcludeNamespaces();
+  if (excludeNamespaces != null && excludeNamespaces.contains(namespace)) {
+return false;
+  }
+  Map excludedTableCFs = 
peerConfig.getTableCFsMap();
+  // trap here, must check existence first since HashMap allows null value.
+  if (excludedTableCFs == null || 
!excludedTableCFs.containsKey(tableName)) {
+return true;
+  }
+  List cfs = excludedTableCFs.get(tableName);
+  // if cfs is null or empty then we can make 

[18/20] hbase git commit: HBASE-20138 Find a way to deal with the conflicts when updating replication position

2018-04-07 Thread zhangduo
HBASE-20138 Find a way to deal with the conflicts when updating replication 
position


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ce0c8377
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ce0c8377
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ce0c8377

Branch: refs/heads/HBASE-20046-branch-2
Commit: ce0c83772c9853dad6531a44477fad1eb8ccf15c
Parents: f3dbc72
Author: huzheng 
Authored: Wed Mar 21 17:34:10 2018 +0800
Committer: zhangduo 
Committed: Sun Apr 8 11:22:08 2018 +0800

--
 .../replication/ZKReplicationQueueStorage.java  | 110 +--
 .../replication/TestReplicationStateBasic.java  |   6 +
 .../TestZKReplicationQueueStorage.java  |  48 +++-
 .../apache/hadoop/hbase/zookeeper/ZKUtil.java   |  34 --
 .../hadoop/hbase/zookeeper/TestZKUtil.java  |  25 +
 5 files changed, 178 insertions(+), 45 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ce0c8377/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java
index 2ab08ae..2e7a012 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java
@@ -203,20 +203,25 @@ class ZKReplicationQueueStorage extends 
ZKReplicationStorageBase
   }
 
   private void addLastSeqIdsToOps(String queueId, Map lastSeqIds,
-  List listOfOps) throws KeeperException {
+  List listOfOps) throws KeeperException, ReplicationException {
+String peerId = new ReplicationQueueInfo(queueId).getPeerId();
 for (Entry lastSeqEntry : lastSeqIds.entrySet()) {
-  String peerId = new ReplicationQueueInfo(queueId).getPeerId();
   String path = getSerialReplicationRegionPeerNode(lastSeqEntry.getKey(), 
peerId);
-  /*
-   * Make sure the existence of path
-   * /hbase/replication/regions//-. As 
the javadoc in
-   * multiOrSequential() method said, if received a NodeExistsException, 
all operations will
-   * fail. So create the path here, and in fact, no need to add this 
operation to listOfOps,
-   * because only need to make sure that update file position and sequence 
id atomically.
-   */
-  ZKUtil.createWithParents(zookeeper, path);
-  // Persist the max sequence id of region to zookeeper.
-  listOfOps.add(ZKUtilOp.setData(path, 
ZKUtil.positionToByteArray(lastSeqEntry.getValue(;
+  Pair p = 
getLastSequenceIdWithVersion(lastSeqEntry.getKey(), peerId);
+  byte[] data = ZKUtil.positionToByteArray(lastSeqEntry.getValue());
+  if (p.getSecond() < 0) { // ZNode does not exist.
+ZKUtil.createWithParents(zookeeper,
+  path.substring(0, 
path.lastIndexOf(ZNodePaths.ZNODE_PATH_SEPARATOR)));
+listOfOps.add(ZKUtilOp.createAndFailSilent(path, data));
+continue;
+  }
+  // Perform CAS in a specific version v0 (HBASE-20138)
+  int v0 = p.getSecond();
+  long lastPushedSeqId = p.getFirst();
+  if (lastSeqEntry.getValue() <= lastPushedSeqId) {
+continue;
+  }
+  listOfOps.add(ZKUtilOp.setData(path, data, v0));
 }
   }
 
@@ -224,50 +229,85 @@ class ZKReplicationQueueStorage extends 
ZKReplicationStorageBase
   public void setWALPosition(ServerName serverName, String queueId, String 
fileName, long position,
   Map lastSeqIds) throws ReplicationException {
 try {
-  List listOfOps = new ArrayList<>();
-  if (position > 0) {
-listOfOps.add(ZKUtilOp.setData(getFileNode(serverName, queueId, 
fileName),
-  ZKUtil.positionToByteArray(position)));
+  for (int retry = 0;; retry++) {
+List listOfOps = new ArrayList<>();
+if (position > 0) {
+  listOfOps.add(ZKUtilOp.setData(getFileNode(serverName, queueId, 
fileName),
+ZKUtil.positionToByteArray(position)));
+}
+// Persist the max sequence id(s) of regions for serial replication 
atomically.
+addLastSeqIdsToOps(queueId, lastSeqIds, listOfOps);
+if (listOfOps.isEmpty()) {
+  return;
+}
+try {
+  ZKUtil.multiOrSequential(zookeeper, listOfOps, false);
+  return;
+} catch (KeeperException.BadVersionException | 
KeeperException.NodeExistsException e) {
+  LOG.warn(
+"Bad version(or node exist) when 

[hbase] Git Push Summary

2018-04-07 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/HBASE-19397-branch-2 [deleted] c9bcc0b9b


[15/20] hbase git commit: HBASE-20271 ReplicationSourceWALReader.switched should use the file name instead of the path object directly

2018-04-07 Thread zhangduo
HBASE-20271 ReplicationSourceWALReader.switched should use the file name 
instead of the path object directly


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5510389b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5510389b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5510389b

Branch: refs/heads/HBASE-20046-branch-2
Commit: 5510389bf76dfcc14afa3411502ba0c58b1bc189
Parents: 2d5c0e2
Author: zhangduo 
Authored: Sat Mar 24 16:25:20 2018 +0800
Committer: zhangduo 
Committed: Sun Apr 8 11:21:41 2018 +0800

--
 .../replication/regionserver/ReplicationSourceWALReader.java  | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5510389b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java
index 2154856..7ba347f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java
@@ -174,7 +174,8 @@ class ReplicationSourceWALReader extends Thread {
   }
 
   protected static final boolean switched(WALEntryStream entryStream, Path 
path) {
-return !path.equals(entryStream.getCurrentPath());
+Path newPath = entryStream.getCurrentPath();
+return newPath == null || !path.getName().equals(newPath.getName());
   }
 
   protected WALEntryBatch readWALEntries(WALEntryStream entryStream)



[19/20] hbase git commit: HBASE-20285 Delete all last pushed sequence ids when removing a peer or removing the serial flag for a peer

2018-04-07 Thread zhangduo
HBASE-20285 Delete all last pushed sequence ids when removing a peer or 
removing the serial flag for a peer


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/eb5dd939
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/eb5dd939
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/eb5dd939

Branch: refs/heads/HBASE-20046-branch-2
Commit: eb5dd9391d87b6419e529fa010214f6b986976eb
Parents: ce0c837
Author: zhangduo 
Authored: Mon Mar 26 22:17:00 2018 +0800
Committer: zhangduo 
Committed: Sun Apr 8 11:33:27 2018 +0800

--
 .../src/main/protobuf/MasterProcedure.proto | 10 +++
 .../replication/ReplicationQueueStorage.java|  5 ++
 .../replication/ZKReplicationQueueStorage.java  | 37 ++-
 .../TestZKReplicationQueueStorage.java  | 31 -
 .../replication/DisablePeerProcedure.java   | 15 +
 .../master/replication/EnablePeerProcedure.java | 15 +
 .../master/replication/RemovePeerProcedure.java | 31 -
 .../replication/ReplicationPeerManager.java |  8 ++-
 .../replication/UpdatePeerConfigProcedure.java  |  3 +
 .../replication/SerialReplicationTestBase.java  | 19 +-
 .../TestAddToSerialReplicationPeer.java | 28 ++--
 .../replication/TestSerialReplication.java  | 68 
 12 files changed, 227 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/eb5dd939/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto 
b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
index f710759..b37557c 100644
--- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
@@ -421,3 +421,13 @@ message UpdatePeerConfigStateData {
   required ReplicationPeer peer_config = 1;
   optional ReplicationPeer old_peer_config = 2;
 }
+
+message RemovePeerStateData {
+  optional ReplicationPeer peer_config = 1;
+}
+
+message EnablePeerStateData {
+}
+
+message DisablePeerStateData {
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/eb5dd939/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueStorage.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueStorage.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueStorage.java
index 99a1e97..cd37ac2 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueStorage.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueStorage.java
@@ -87,6 +87,11 @@ public interface ReplicationQueueStorage {
   void setLastSequenceIds(String peerId, Map lastSeqIds) throws 
ReplicationException;
 
   /**
+   * Remove all the max sequence id record for the given peer.
+   * @param peerId peer id
+   */
+  void removeLastSequenceIds(String peerId) throws ReplicationException;
+  /**
* Get the current position for a specific WAL in a given queue for a given 
regionserver.
* @param serverName the name of the regionserver
* @param queueId a String that identifies the queue

http://git-wip-us.apache.org/repos/asf/hbase/blob/eb5dd939/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java
index 2e7a012..96b0b91 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java
@@ -102,7 +102,8 @@ class ZKReplicationQueueStorage extends 
ZKReplicationStorageBase
*/
   private final String hfileRefsZNode;
 
-  private final String regionsZNode;
+  @VisibleForTesting
+  final String regionsZNode;
 
   public ZKReplicationQueueStorage(ZKWatcher zookeeper, Configuration conf) {
 super(zookeeper, conf);
@@ -312,6 +313,40 @@ class ZKReplicationQueueStorage extends 
ZKReplicationStorageBase
   }
 
   @Override
+  public void removeLastSequenceIds(String peerId) throws ReplicationException 
{
+String suffix = "-" + peerId;
+try {
+  StringBuilder sb = new StringBuilder(regionsZNode);
+  int regionsZNodeLength = regionsZNode.length();
+ 

[06/20] hbase git commit: HBASE-20148 Make serial replication as a option for a peer instead of a table

2018-04-07 Thread zhangduo
HBASE-20148 Make serial replication as a option for a peer instead of a table


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/de685df1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/de685df1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/de685df1

Branch: refs/heads/HBASE-20046-branch-2
Commit: de685df16aeefa91044704e6332ca69abce52b84
Parents: 688287a
Author: zhangduo 
Authored: Fri Mar 9 15:00:59 2018 +0800
Committer: zhangduo 
Committed: Sun Apr 8 11:17:17 2018 +0800

--
 .../apache/hadoop/hbase/HTableDescriptor.java   |  8 -
 .../hadoop/hbase/client/TableDescriptor.java| 19 +++-
 .../hbase/client/TableDescriptorBuilder.java|  9 --
 .../replication/ReplicationPeerConfigUtil.java  |  5 +++
 .../replication/ReplicationPeerConfig.java  | 32 +++-
 .../ReplicationPeerConfigBuilder.java   | 12 
 .../org/apache/hadoop/hbase/HConstants.java |  6 
 .../src/main/protobuf/Replication.proto |  1 +
 .../hbase/replication/ReplicationUtils.java |  3 ++
 .../master/assignment/RegionStateStore.java | 14 -
 .../hbase/replication/ScopeWALEntryFilter.java  | 32 ++--
 .../regionserver/ReplicationSource.java |  4 +++
 .../ReplicationSourceWALActionListener.java | 10 +-
 .../ReplicationSourceWALReader.java |  6 ++--
 .../regionserver/SerialReplicationChecker.java  |  2 +-
 .../org/apache/hadoop/hbase/wal/WALKeyImpl.java |  8 -
 .../TestReplicationWALEntryFilters.java | 15 ++---
 .../replication/TestSerialReplication.java  |  9 +++---
 18 files changed, 104 insertions(+), 91 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/de685df1/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
index ca0cb91..960b91f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
@@ -539,14 +539,6 @@ public class HTableDescriptor implements TableDescriptor, 
Comparablehttp://git-wip-us.apache.org/repos/asf/hbase/blob/de685df1/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java
index 13ad0e2..0a0683b 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java
@@ -25,6 +25,7 @@ import java.util.Iterator;
 import java.util.Map;
 import java.util.Set;
 import java.util.stream.Collectors;
+import java.util.stream.Stream;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -245,11 +246,6 @@ public interface TableDescriptor {
   boolean hasRegionMemStoreReplication();
 
   /**
-   * @return true if there are at least one cf whose replication scope is 
serial.
-   */
-  boolean hasSerialReplicationScope();
-
-  /**
* Check if the compaction enable flag of the table is true. If flag is false
* then no minor/major compactions will be done in real.
*
@@ -288,6 +284,16 @@ public interface TableDescriptor {
   boolean isReadOnly();
 
   /**
+   * Check if any of the table's cfs' replication scope are set to
+   * {@link HConstants#REPLICATION_SCOPE_GLOBAL}.
+   * @return {@code true} if we have, otherwise {@code false}.
+   */
+  default boolean hasGlobalReplicationScope() {
+return Stream.of(getColumnFamilies())
+  .anyMatch(cf -> cf.getScope() == HConstants.REPLICATION_SCOPE_GLOBAL);
+  }
+
+  /**
* Check if the table's cfs' replication scope matched with the replication 
state
* @param enabled replication state
* @return true if matched, otherwise false
@@ -297,8 +303,7 @@ public interface TableDescriptor {
 boolean hasDisabled = false;
 
 for (ColumnFamilyDescriptor cf : getColumnFamilies()) {
-  if (cf.getScope() != HConstants.REPLICATION_SCOPE_GLOBAL &&
-cf.getScope() != HConstants.REPLICATION_SCOPE_SERIAL) {
+  if (cf.getScope() != HConstants.REPLICATION_SCOPE_GLOBAL) {
 hasDisabled = true;
   } else {
 hasEnabled = true;


[07/20] hbase git commit: HBASE-20167 Optimize the implementation of ReplicationSourceWALReader

2018-04-07 Thread zhangduo
HBASE-20167 Optimize the implementation of ReplicationSourceWALReader


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/91854f4c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/91854f4c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/91854f4c

Branch: refs/heads/HBASE-20046-branch-2
Commit: 91854f4cd37fe87a3bb85e9d18221708abf7089b
Parents: de685df
Author: zhangduo 
Authored: Mon Mar 12 12:21:44 2018 +0800
Committer: zhangduo 
Committed: Sun Apr 8 11:17:26 2018 +0800

--
 .../RecoveredReplicationSource.java |  67 +--
 .../RecoveredReplicationSourceShipper.java  |  48 ++--
 .../RecoveredReplicationSourceWALReader.java|  56 --
 .../regionserver/ReplicationSource.java |  36 +++---
 .../regionserver/ReplicationSourceShipper.java  |  27 +++--
 .../ReplicationSourceWALReader.java | 101 +
 .../SerialReplicationSourceWALReader.java   | 112 +++
 7 files changed, 218 insertions(+), 229 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/91854f4c/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java
index d9506c0..169b469 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java
@@ -20,8 +20,8 @@ package org.apache.hadoop.hbase.replication.regionserver;
 import java.io.IOException;
 import java.util.List;
 import java.util.UUID;
+import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.PriorityBlockingQueue;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -59,31 +59,41 @@ public class RecoveredReplicationSource extends 
ReplicationSource {
   }
 
   @Override
-  protected void tryStartNewShipper(String walGroupId, 
PriorityBlockingQueue queue) {
-final RecoveredReplicationSourceShipper worker =
-new RecoveredReplicationSourceShipper(conf, walGroupId, queue, this,
-this.queueStorage);
-ReplicationSourceShipper extant = workerThreads.putIfAbsent(walGroupId, 
worker);
-if (extant != null) {
-  LOG.debug("Someone has beat us to start a worker thread for wal group " 
+ walGroupId);
-} else {
-  LOG.debug("Starting up worker for wal group " + walGroupId);
-  worker.startup(this::uncaughtException);
-  worker.setWALReader(
-startNewWALReader(worker.getName(), walGroupId, queue, 
worker.getStartPosition()));
-  workerThreads.put(walGroupId, worker);
-}
+  protected RecoveredReplicationSourceShipper createNewShipper(String 
walGroupId,
+  PriorityBlockingQueue queue) {
+return new RecoveredReplicationSourceShipper(conf, walGroupId, queue, 
this, queueStorage);
+  }
+
+  private void handleEmptyWALEntryBatch0(ReplicationSourceWALReader reader,
+  BlockingQueue entryBatchQueue, Path currentPath) throws 
InterruptedException {
+LOG.trace("Didn't read any new entries from WAL");
+// we're done with queue recovery, shut ourself down
+reader.setReaderRunning(false);
+// shuts down shipper thread immediately
+entryBatchQueue.put(new WALEntryBatch(0, currentPath));
   }
 
   @Override
-  protected ReplicationSourceWALReader startNewWALReader(String threadName, 
String walGroupId,
+  protected ReplicationSourceWALReader createNewWALReader(String walGroupId,
   PriorityBlockingQueue queue, long startPosition) {
-ReplicationSourceWALReader walReader =
-  new RecoveredReplicationSourceWALReader(fs, conf, queue, startPosition, 
walEntryFilter, this);
-Threads.setDaemonThreadRunning(walReader,
-  threadName + ".replicationSource.replicationWALReaderThread." + 
walGroupId + "," + queueId,
-  this::uncaughtException);
-return walReader;
+if (replicationPeer.getPeerConfig().isSerial()) {
+  return new SerialReplicationSourceWALReader(fs, conf, queue, 
startPosition, walEntryFilter,
+this) {
+
+@Override
+protected void handleEmptyWALEntryBatch(Path currentPath) throws 
InterruptedException {
+  handleEmptyWALEntryBatch0(this, entryBatchQueue, currentPath);
+}
+  };
+} else {
+  return new ReplicationSourceWALReader(fs, conf, queue, startPosition, 
walEntryFilter, 

[04/20] hbase git commit: HBASE-20129 Add UT for serial replication checker

2018-04-07 Thread zhangduo
HBASE-20129 Add UT for serial replication checker


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/80e00ee5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/80e00ee5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/80e00ee5

Branch: refs/heads/HBASE-20046-branch-2
Commit: 80e00ee5315266d819fcd7c3f8476d4ed6d7b5ea
Parents: 79e0c40
Author: zhangduo 
Authored: Tue Mar 6 08:40:31 2018 +0800
Committer: zhangduo 
Committed: Sun Apr 8 11:13:33 2018 +0800

--
 .../apache/hadoop/hbase/MetaTableAccessor.java  |  71 --
 .../regionserver/SerialReplicationChecker.java  |  18 +++
 .../TestSerialReplicationChecker.java   | 133 ++-
 3 files changed, 208 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/80e00ee5/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
index 109f2d0..2a88b56 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
@@ -19,12 +19,14 @@ package org.apache.hadoop.hbase;
 
 import edu.umd.cs.findbugs.annotations.NonNull;
 import edu.umd.cs.findbugs.annotations.Nullable;
+import java.io.ByteArrayOutputStream;
 import java.io.Closeable;
 import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
+import java.util.Iterator;
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
@@ -35,7 +37,6 @@ import java.util.TreeMap;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 import java.util.stream.Collectors;
-import java.util.stream.Stream;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell.Type;
 import org.apache.hadoop.hbase.client.Connection;
@@ -150,11 +151,13 @@ public class MetaTableAccessor {
   META_REGION_PREFIX, 0, len);
   }
 
-  private static final byte[] REPLICATION_PARENT_QUALIFIER = 
Bytes.toBytes("parent");
+  @VisibleForTesting
+  public static final byte[] REPLICATION_PARENT_QUALIFIER = 
Bytes.toBytes("parent");
+
+  private static final byte ESCAPE_BYTE = (byte) 0xFF;
 
-  private static final String REPLICATION_PARENT_SEPARATOR = "|";
+  private static final byte SEPARATED_BYTE = 0x00;
 
-  private static final String REPLICATION_PARENT_SEPARATOR_REGEX = "\\|";
   /**
* Lists all of the table regions currently in META.
* Deprecated, keep there until some test use this.
@@ -1921,10 +1924,51 @@ public class MetaTableAccessor {
   .build());
   }
 
+  private static void writeRegionName(ByteArrayOutputStream out, byte[] 
regionName) {
+for (byte b : regionName) {
+  if (b == ESCAPE_BYTE) {
+out.write(ESCAPE_BYTE);
+  }
+  out.write(b);
+}
+  }
+
+  @VisibleForTesting
+  public static byte[] getParentsBytes(List parents) {
+ByteArrayOutputStream bos = new ByteArrayOutputStream();
+Iterator iter = parents.iterator();
+writeRegionName(bos, iter.next().getRegionName());
+while (iter.hasNext()) {
+  bos.write(ESCAPE_BYTE);
+  bos.write(SEPARATED_BYTE);
+  writeRegionName(bos, iter.next().getRegionName());
+}
+return bos.toByteArray();
+  }
+
+  private static List parseParentsBytes(byte[] bytes) {
+List parents = new ArrayList<>();
+ByteArrayOutputStream bos = new ByteArrayOutputStream();
+for (int i = 0; i < bytes.length; i++) {
+  if (bytes[i] == ESCAPE_BYTE) {
+i++;
+if (bytes[i] == SEPARATED_BYTE) {
+  parents.add(bos.toByteArray());
+  bos.reset();
+  continue;
+}
+// fall through to append the byte
+  }
+  bos.write(bytes[i]);
+}
+if (bos.size() > 0) {
+  parents.add(bos.toByteArray());
+}
+return parents;
+  }
+
   private static void addReplicationParent(Put put, List parents) 
throws IOException {
-byte[] value = 
parents.stream().map(RegionReplicaUtil::getRegionInfoForDefaultReplica)
-  .map(RegionInfo::getRegionNameAsString).collect(Collectors
-.collectingAndThen(Collectors.joining(REPLICATION_PARENT_SEPARATOR), 
Bytes::toBytes));
+byte[] value = getParentsBytes(parents);
 
put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow())
   
.setFamily(HConstants.REPLICATION_BARRIER_FAMILY).setQualifier(REPLICATION_PARENT_QUALIFIER)
   

[02/20] hbase git commit: HBASE-20115 Reimplement serial replication based on the new replication storage layer

2018-04-07 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase/blob/79e0c40c/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryBatch.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryBatch.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryBatch.java
new file mode 100644
index 000..31c3ac7
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryBatch.java
@@ -0,0 +1,138 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication.regionserver;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.wal.WAL.Entry;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * Holds a batch of WAL entries to replicate, along with some statistics
+ */
+@InterfaceAudience.Private
+class WALEntryBatch {
+  private List walEntries;
+  // last WAL that was read
+  private Path lastWalPath;
+  // position in WAL of last entry in this batch
+  private long lastWalPosition = 0;
+  // number of distinct row keys in this batch
+  private int nbRowKeys = 0;
+  // number of HFiles
+  private int nbHFiles = 0;
+  // heap size of data we need to replicate
+  private long heapSize = 0;
+  // save the last sequenceid for each region if the table has 
serial-replication scope
+  private Map lastSeqIds = new HashMap<>();
+
+  /**
+   * @param lastWalPath Path of the WAL the last entry in this batch was read 
from
+   */
+  WALEntryBatch(int maxNbEntries, Path lastWalPath) {
+this.walEntries = new ArrayList<>(maxNbEntries);
+this.lastWalPath = lastWalPath;
+  }
+
+  public void addEntry(Entry entry) {
+walEntries.add(entry);
+  }
+
+  /**
+   * @return the WAL Entries.
+   */
+  public List getWalEntries() {
+return walEntries;
+  }
+
+  /**
+   * @return the path of the last WAL that was read.
+   */
+  public Path getLastWalPath() {
+return lastWalPath;
+  }
+
+  /**
+   * @return the position in the last WAL that was read.
+   */
+  public long getLastWalPosition() {
+return lastWalPosition;
+  }
+
+  public void setLastWalPosition(long lastWalPosition) {
+this.lastWalPosition = lastWalPosition;
+  }
+
+  public int getNbEntries() {
+return walEntries.size();
+  }
+
+  /**
+   * @return the number of distinct row keys in this batch
+   */
+  public int getNbRowKeys() {
+return nbRowKeys;
+  }
+
+  /**
+   * @return the number of HFiles in this batch
+   */
+  public int getNbHFiles() {
+return nbHFiles;
+  }
+
+  /**
+   * @return total number of operations in this batch
+   */
+  public int getNbOperations() {
+return getNbRowKeys() + getNbHFiles();
+  }
+
+  /**
+   * @return the heap size of this batch
+   */
+  public long getHeapSize() {
+return heapSize;
+  }
+
+  /**
+   * @return the last sequenceid for each region if the table has 
serial-replication scope
+   */
+  public Map getLastSeqIds() {
+return lastSeqIds;
+  }
+
+  public void incrementNbRowKeys(int increment) {
+nbRowKeys += increment;
+  }
+
+  public void incrementNbHFiles(int increment) {
+nbHFiles += increment;
+  }
+
+  public void incrementHeapSize(long increment) {
+heapSize += increment;
+  }
+
+  public void setLastSeqId(String region, long sequenceId) {
+lastSeqIds.put(region, sequenceId);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/79e0c40c/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java
index 7c83c0c..bcab9b4 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java
+++ 

[10/20] hbase git commit: HBASE-20206 WALEntryStream should not switch WAL file silently

2018-04-07 Thread zhangduo
HBASE-20206 WALEntryStream should not switch WAL file silently


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e1ec0138
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e1ec0138
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e1ec0138

Branch: refs/heads/HBASE-20046-branch-2
Commit: e1ec0138c8cf9d17bab4c916ccee6754d642eb6b
Parents: 71f72f4
Author: zhangduo 
Authored: Sun Mar 18 18:09:45 2018 +0800
Committer: zhangduo 
Committed: Sun Apr 8 11:20:21 2018 +0800

--
 .../replication/ReplicationQueueStorage.java|   2 +-
 .../replication/ZKReplicationQueueStorage.java  |  39 ++--
 .../replication/TestReplicationStateBasic.java  |   3 +-
 .../TestZKReplicationQueueStorage.java  |   6 +-
 .../RecoveredReplicationSource.java |  33 
 .../RecoveredReplicationSourceShipper.java  |  13 +-
 .../regionserver/ReplicationSource.java |   2 +-
 .../regionserver/ReplicationSourceManager.java  | 100 +-
 .../regionserver/ReplicationSourceShipper.java  |  96 +-
 .../ReplicationSourceWALReader.java |  50 -
 .../SerialReplicationSourceWALReader.java   |  29 ++-
 .../replication/regionserver/WALEntryBatch.java |  22 +++
 .../regionserver/WALEntryStream.java|   5 +-
 .../TestReplicationSourceManager.java   |  17 +-
 .../regionserver/TestWALEntryStream.java| 188 ++-
 15 files changed, 384 insertions(+), 221 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e1ec0138/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueStorage.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueStorage.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueStorage.java
index 4c93da6..cfe9c9c 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueStorage.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueStorage.java
@@ -63,7 +63,7 @@ public interface ReplicationQueueStorage {
* @param serverName the name of the regionserver
* @param queueId a String that identifies the queue
* @param fileName name of the WAL
-   * @param position the current position in the file
+   * @param position the current position in the file. Will ignore if less 
than or equal to 0.
* @param lastSeqIds map with {encodedRegionName, sequenceId} pairs for 
serial replication.
*/
   void setWALPosition(ServerName serverName, String queueId, String fileName, 
long position,

http://git-wip-us.apache.org/repos/asf/hbase/blob/e1ec0138/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java
index adbf259..63f43e8 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java
@@ -193,27 +193,28 @@ class ZKReplicationQueueStorage extends 
ZKReplicationStorageBase
   Map lastSeqIds) throws ReplicationException {
 try {
   List listOfOps = new ArrayList<>();
-  listOfOps.add(ZKUtilOp.setData(getFileNode(serverName, queueId, 
fileName),
-ZKUtil.positionToByteArray(position)));
+  if (position > 0) {
+listOfOps.add(ZKUtilOp.setData(getFileNode(serverName, queueId, 
fileName),
+  ZKUtil.positionToByteArray(position)));
+  }
   // Persist the max sequence id(s) of regions for serial replication 
atomically.
-  if (lastSeqIds != null && lastSeqIds.size() > 0) {
-for (Entry lastSeqEntry : lastSeqIds.entrySet()) {
-  String peerId = new ReplicationQueueInfo(queueId).getPeerId();
-  String path = 
getSerialReplicationRegionPeerNode(lastSeqEntry.getKey(), peerId);
-  /*
-   * Make sure the existence of path
-   * 
/hbase/replication/regions//-. As the 
javadoc in
-   * multiOrSequential() method said, if received a 
NodeExistsException, all operations will
-   * fail. So create the path here, and in fact, no need to add this 
operation to listOfOps,
-   * because only need to make sure that update file position and 
sequence id atomically.
-   */
-  

[13/20] hbase git commit: HBASE-20116 addendum fix javadoc and also a simple optimization

2018-04-07 Thread zhangduo
HBASE-20116 addendum fix javadoc and also a simple optimization


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8f379fce
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8f379fce
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8f379fce

Branch: refs/heads/HBASE-20046-branch-2
Commit: 8f379fce52611a7d38e897a52f923b7b01619f24
Parents: 70d14c3
Author: zhangduo 
Authored: Thu Mar 22 08:31:20 2018 +0800
Committer: zhangduo 
Committed: Sun Apr 8 11:21:31 2018 +0800

--
 .../replication/ZKReplicationQueueStorage.java  | 22 
 .../TestZKReplicationQueueStorage.java  |  2 +-
 2 files changed, 14 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8f379fce/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java
index 6c9752a..1a5749e 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java
@@ -133,26 +133,30 @@ class ZKReplicationQueueStorage extends 
ZKReplicationStorageBase
   }
 
   /**
+   * 
* Put all regions under /hbase/replication/regions znode will lead to too 
many children because
-   * of the huge number of regions in real production environment. So here we 
use hash of encoded
-   * region name to distribute the znode into multiple znodes. 
+   * of the huge number of regions in real production environment. So here we 
will distribute the
+   * znodes to multiple directories.
+   * 
+   * 
* So the final znode path will be format like this:
*
* 
-   * /hbase/replication/regions/e1/ff/dd04e76a6966d4ffa908ed0586764767-100
+   * /hbase/replication/regions/dd/04/e76a6966d4ffa908ed0586764767-100
* 
*
-   * The e1 indicate the first level hash of encoded region name, and the ff 
indicate the second
-   * level hash of encoded region name, the 100 indicate the peer id. 
-   * Note that here we use two-level hash because if only one-level hash (such 
as mod 65535), it
-   * will still lead to too many children under the /hbase/replication/regions 
znode.
+   * Here the full encoded region name is dd04e76a6966d4ffa908ed0586764767, 
and we use the first two
+   * characters 'dd' as the first level directory name, and use the next two 
characters '04' as the
+   * second level directory name, and the rest part as the prefix of the 
znode, and the suffix '100'
+   * is the peer id.
+   * 
* @param encodedRegionName the encoded region name.
* @param peerId peer id for replication.
* @return ZNode path to persist the max sequence id that we've pushed for 
the given region and
* peer.
*/
   @VisibleForTesting
-  public String getSerialReplicationRegionPeerNode(String encodedRegionName, 
String peerId) {
+  String getSerialReplicationRegionPeerNode(String encodedRegionName, String 
peerId) {
 if (encodedRegionName == null || encodedRegionName.length() != 
RegionInfo.MD5_HEX_LENGTH) {
   throw new IllegalArgumentException(
   "Invalid encoded region name: " + encodedRegionName + ", length 
should be 32.");
@@ -160,7 +164,7 @@ class ZKReplicationQueueStorage extends 
ZKReplicationStorageBase
 return new 
StringBuilder(regionsZNode).append(ZNodePaths.ZNODE_PATH_SEPARATOR)
 .append(encodedRegionName.substring(0, 
2)).append(ZNodePaths.ZNODE_PATH_SEPARATOR)
 .append(encodedRegionName.substring(2, 
4)).append(ZNodePaths.ZNODE_PATH_SEPARATOR)
-.append(encodedRegionName).append("-").append(peerId).toString();
+
.append(encodedRegionName.substring(4)).append("-").append(peerId).toString();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/8f379fce/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationQueueStorage.java
--
diff --git 
a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationQueueStorage.java
 
b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationQueueStorage.java
index 28cdff1..ca86a05 100644
--- 
a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationQueueStorage.java
+++ 
b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationQueueStorage.java
@@ -257,7 +257,7 

[20/20] hbase git commit: HBASE-20296 Remove last pushed sequence ids when removing tables from a peer

2018-04-07 Thread zhangduo
HBASE-20296 Remove last pushed sequence ids when removing tables from a peer


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e821b9ac
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e821b9ac
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e821b9ac

Branch: refs/heads/HBASE-20046-branch-2
Commit: e821b9ac5420bda2e2f964907a49b059a97525cc
Parents: eb5dd93
Author: zhangduo 
Authored: Sat Mar 31 20:25:13 2018 +0800
Committer: zhangduo 
Committed: Sun Apr 8 11:38:01 2018 +0800

--
 .../apache/hadoop/hbase/MetaTableAccessor.java  |  72 +-
 .../replication/ReplicationQueueStorage.java|   9 ++
 .../replication/ZKReplicationQueueStorage.java  |  15 +++
 .../master/replication/AddPeerProcedure.java|  14 +-
 .../master/replication/ModifyPeerProcedure.java | 134 ++-
 .../replication/UpdatePeerConfigProcedure.java  |  96 -
 .../hadoop/hbase/client/TestEnableTable.java|   4 +-
 .../TestRemoveFromSerialReplicationPeer.java| 120 +
 8 files changed, 363 insertions(+), 101 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e821b9ac/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
index 4cc46c8..0f5ef09 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
@@ -57,6 +57,8 @@ import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.master.RegionState.State;
@@ -682,20 +684,19 @@ public class MetaTableAccessor {
 scanMeta(connection, null, null, QueryType.ALL, v);
   }
 
-  public static void scanMetaForTableRegions(Connection connection,
-  Visitor visitor, TableName tableName) throws IOException {
+  public static void scanMetaForTableRegions(Connection connection, Visitor 
visitor,
+  TableName tableName) throws IOException {
 scanMeta(connection, tableName, QueryType.REGION, Integer.MAX_VALUE, 
visitor);
   }
 
-  public static void scanMeta(Connection connection, TableName table,
-  QueryType type, int maxRows, final Visitor visitor) throws IOException {
+  public static void scanMeta(Connection connection, TableName table, 
QueryType type, int maxRows,
+  final Visitor visitor) throws IOException {
 scanMeta(connection, getTableStartRowForMeta(table, type), 
getTableStopRowForMeta(table, type),
-type, maxRows, visitor);
+  type, maxRows, visitor);
   }
 
-  public static void scanMeta(Connection connection,
-  @Nullable final byte[] startRow, @Nullable final byte[] stopRow,
-  QueryType type, final Visitor visitor) throws IOException {
+  public static void scanMeta(Connection connection, @Nullable final byte[] 
startRow,
+  @Nullable final byte[] stopRow, QueryType type, final Visitor visitor) 
throws IOException {
 scanMeta(connection, startRow, stopRow, type, Integer.MAX_VALUE, visitor);
   }
 
@@ -708,26 +709,19 @@ public class MetaTableAccessor {
* @param tableName  table withing we scan
* @param rowstart scan from this row
* @param rowLimit   max number of rows to return
-   * @throws IOException
*/
-  public static void scanMeta(Connection connection,
-  final Visitor visitor, final TableName tableName,
-  final byte[] row, final int rowLimit)
-  throws IOException {
-
+  public static void scanMeta(Connection connection, final Visitor visitor,
+  final TableName tableName, final byte[] row, final int rowLimit) throws 
IOException {
 byte[] startRow = null;
 byte[] stopRow = null;
 if (tableName != null) {
-  startRow =
-  getTableStartRowForMeta(tableName, QueryType.REGION);
+  startRow = getTableStartRowForMeta(tableName, QueryType.REGION);
   if (row != null) {
-RegionInfo closestRi =
-getClosestRegionInfo(connection, tableName, row);
-startRow = RegionInfo
-.createRegionName(tableName, closestRi.getStartKey(), 
HConstants.ZEROES, false);
+RegionInfo closestRi = getClosestRegionInfo(connection, tableName, 
row);
+startRow =
+ 

[14/20] hbase git commit: HBASE-20147 Serial replication will be stuck if we create a table with serial replication but add it to a peer after there are region moves

2018-04-07 Thread zhangduo
HBASE-20147 Serial replication will be stuck if we create a table with serial 
replication but add it to a peer after there are region moves


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2d5c0e22
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2d5c0e22
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2d5c0e22

Branch: refs/heads/HBASE-20046-branch-2
Commit: 2d5c0e22c094456ae3e35af26c169ee726aa0939
Parents: 8f379fc
Author: zhangduo 
Authored: Wed Mar 21 21:03:14 2018 +0800
Committer: zhangduo 
Committed: Sun Apr 8 11:21:36 2018 +0800

--
 .../hadoop/hbase/AsyncMetaTableAccessor.java|  50 ++--
 .../apache/hadoop/hbase/MetaTableAccessor.java  |  26 ++-
 .../src/main/protobuf/MasterProcedure.proto |   7 +-
 .../replication/ReplicationQueueStorage.java|   8 +
 .../hbase/replication/ReplicationUtils.java |   6 +-
 .../replication/ZKReplicationQueueStorage.java  |  50 ++--
 .../master/replication/AddPeerProcedure.java|  21 +-
 .../master/replication/ModifyPeerProcedure.java | 166 +-
 .../replication/ReplicationPeerManager.java |  32 +--
 .../replication/UpdatePeerConfigProcedure.java  |  59 -
 .../regionserver/PeerProcedureHandlerImpl.java  |  17 +-
 .../regionserver/ReplicationSourceManager.java  |   4 +-
 .../replication/regionserver/WALEntryBatch.java |   8 +
 .../replication/SerialReplicationTestBase.java  | 229 +++
 .../TestAddToSerialReplicationPeer.java | 215 +
 .../replication/TestSerialReplication.java  | 191 +---
 16 files changed, 825 insertions(+), 264 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2d5c0e22/hbase-client/src/main/java/org/apache/hadoop/hbase/AsyncMetaTableAccessor.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/AsyncMetaTableAccessor.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/AsyncMetaTableAccessor.java
index 05e60d4..13245d3 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/AsyncMetaTableAccessor.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/AsyncMetaTableAccessor.java
@@ -489,16 +489,17 @@ public class AsyncMetaTableAccessor {
   QueryType type) {
 return tableName.map((table) -> {
   switch (type) {
-  case REGION:
-byte[] startRow = new byte[table.getName().length + 2];
-System.arraycopy(table.getName(), 0, startRow, 0, 
table.getName().length);
-startRow[startRow.length - 2] = HConstants.DELIMITER;
-startRow[startRow.length - 1] = HConstants.DELIMITER;
-return startRow;
-  case ALL:
-  case TABLE:
-  default:
-return table.getName();
+case REGION:
+case REPLICATION:
+  byte[] startRow = new byte[table.getName().length + 2];
+  System.arraycopy(table.getName(), 0, startRow, 0, 
table.getName().length);
+  startRow[startRow.length - 2] = HConstants.DELIMITER;
+  startRow[startRow.length - 1] = HConstants.DELIMITER;
+  return startRow;
+case ALL:
+case TABLE:
+default:
+  return table.getName();
   }
 });
   }
@@ -512,20 +513,21 @@ public class AsyncMetaTableAccessor {
 return tableName.map((table) -> {
   final byte[] stopRow;
   switch (type) {
-  case REGION:
-stopRow = new byte[table.getName().length + 3];
-System.arraycopy(table.getName(), 0, stopRow, 0, 
table.getName().length);
-stopRow[stopRow.length - 3] = ' ';
-stopRow[stopRow.length - 2] = HConstants.DELIMITER;
-stopRow[stopRow.length - 1] = HConstants.DELIMITER;
-break;
-  case ALL:
-  case TABLE:
-  default:
-stopRow = new byte[table.getName().length + 1];
-System.arraycopy(table.getName(), 0, stopRow, 0, 
table.getName().length);
-stopRow[stopRow.length - 1] = ' ';
-break;
+case REGION:
+case REPLICATION:
+  stopRow = new byte[table.getName().length + 3];
+  System.arraycopy(table.getName(), 0, stopRow, 0, 
table.getName().length);
+  stopRow[stopRow.length - 3] = ' ';
+  stopRow[stopRow.length - 2] = HConstants.DELIMITER;
+  stopRow[stopRow.length - 1] = HConstants.DELIMITER;
+  break;
+case ALL:
+case TABLE:
+default:
+  stopRow = new byte[table.getName().length + 1];
+  System.arraycopy(table.getName(), 0, stopRow, 0, 
table.getName().length);
+  stopRow[stopRow.length - 1] = ' ';
+  break;
   }
   return stopRow;
 });


hbase git commit: HBASE-18828 [2.0] Generate CHANGES.txt; ADDENDUM2 Fix which JIRA we refer to in comment

2018-04-07 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 d4aee75f3 -> 4ffdca6f2


HBASE-18828 [2.0] Generate CHANGES.txt; ADDENDUM2 Fix which JIRA we refer to in 
comment


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4ffdca6f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4ffdca6f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4ffdca6f

Branch: refs/heads/branch-2.0
Commit: 4ffdca6f2b441d9de3b28531760b19b48ed7
Parents: d4aee75
Author: Michael Stack 
Authored: Sat Apr 7 11:32:47 2018 -0700
Committer: Michael Stack 
Committed: Sat Apr 7 11:32:47 2018 -0700

--
 CHANGES.md  | 4 ++--
 RELEASENOTES.md | 4 ++--
 2 files changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4ffdca6f/CHANGES.md
--
diff --git a/CHANGES.md b/CHANGES.md
index 62af426..5c7f90f 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -21,12 +21,12 @@
 CHANGES.md and RELEASENOTES.md were generated using yetus releasedocmaker.
 
 First make sure what is in JIRA agrees with what is in git and vice-versa
-(See HBASE-14175 for original exposition on how to do this followed by
+(See HBASE-14025 for original exposition on how to do this followed by
 the travails of various release managers. See also HBASE-18828 for a
 deriviative makes the move to yetus releasedocmaker).
 
 Then make sure that anything in current release as noted in JIRA has
-not made it out in earlier major/minor (see HBASE-14175 for how to
+not made it out in earlier major/minor (see HBASE-14025 for how to
 reconcile which).
 
 Obtain a yetus release (see HBASE-18828 for some help). To run

http://git-wip-us.apache.org/repos/asf/hbase/blob/4ffdca6f/RELEASENOTES.md
--
diff --git a/RELEASENOTES.md b/RELEASENOTES.md
index 6b11c63..41240ce 100644
--- a/RELEASENOTES.md
+++ b/RELEASENOTES.md
@@ -22,12 +22,12 @@
 CHANGES.md and RELEASENOTES.md were generated using yetus releasedocmaker.
 
 First make sure what is in JIRA agrees with what is in git and vice-versa
-(See HBASE-14175 for original exposition on how to do this followed by
+(See HBASE-14025 for original exposition on how to do this followed by
 the travails of various release managers. See also HBASE-18828 for a
 deriviative makes the move to yetus releasedocmaker).
 
 Then make sure that anything in current release as noted in JIRA has
-not made it out in earlier major/minor (see HBASE-14175 for how to
+not made it out in earlier major/minor (see HBASE-14025 for how to
 reconcile which).
 
 Obtain a yetus release (see HBASE-18828 for some help). To run



hbase git commit: HBASE-18828 [2.0] Generate CHANGES.txt; ADDENDUM that adds comment to head of CHANGES.md and RELEASENOTES.md on how they were made

2018-04-07 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 d7547c615 -> d4aee75f3


HBASE-18828 [2.0] Generate CHANGES.txt; ADDENDUM that adds comment to head of 
CHANGES.md and RELEASENOTES.md on how they were made


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d4aee75f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d4aee75f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d4aee75f

Branch: refs/heads/branch-2.0
Commit: d4aee75f3b6d4c7f896aed7a20cc75ac012a5126
Parents: d7547c6
Author: Michael Stack 
Authored: Sat Apr 7 09:54:27 2018 -0700
Committer: Michael Stack 
Committed: Sat Apr 7 09:54:27 2018 -0700

--
 CHANGES.md  | 24 
 RELEASENOTES.md | 25 +
 2 files changed, 49 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d4aee75f/CHANGES.md
--
diff --git a/CHANGES.md b/CHANGES.md
index 493ac79..62af426 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -15,6 +15,30 @@
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License.
+
+
+
+CHANGES.md and RELEASENOTES.md were generated using yetus releasedocmaker.
+
+First make sure what is in JIRA agrees with what is in git and vice-versa
+(See HBASE-14175 for original exposition on how to do this followed by
+the travails of various release managers. See also HBASE-18828 for a
+deriviative makes the move to yetus releasedocmaker).
+
+Then make sure that anything in current release as noted in JIRA has
+not made it out in earlier major/minor (see HBASE-14175 for how to
+reconcile which).
+
+Obtain a yetus release (see HBASE-18828 for some help). To run
+releasedocmaker, do as follows (below example is for hbase-2.0.0):
+
+ $ ./release-doc-maker/releasedocmaker.py -p HBASE --fileversions \
+-v 2.0.0 -l --sortorder=newer --skip-credits
+
+Rename the output as CHANGES.md and RELEASENOTES.md. Edit both to put
+document title above the apache license so markdown readers work. You
+may have to bulk import old-style CHANGES.txt on to the end in a code
+comment to preserve continuity of the CHANGELOG.
 -->
 
 ## Release 2.0.0 - Unreleased (as of 2018-04-06)

http://git-wip-us.apache.org/repos/asf/hbase/blob/d4aee75f/RELEASENOTES.md
--
diff --git a/RELEASENOTES.md b/RELEASENOTES.md
index 7776c08..6b11c63 100644
--- a/RELEASENOTES.md
+++ b/RELEASENOTES.md
@@ -16,6 +16,31 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+
+
+
+CHANGES.md and RELEASENOTES.md were generated using yetus releasedocmaker.
+
+First make sure what is in JIRA agrees with what is in git and vice-versa
+(See HBASE-14175 for original exposition on how to do this followed by
+the travails of various release managers. See also HBASE-18828 for a
+deriviative makes the move to yetus releasedocmaker).
+
+Then make sure that anything in current release as noted in JIRA has
+not made it out in earlier major/minor (see HBASE-14175 for how to
+reconcile which).
+
+Obtain a yetus release (see HBASE-18828 for some help). To run
+releasedocmaker, do as follows (below example is for hbase-2.0.0):
+
+ $ ./release-doc-maker/releasedocmaker.py -p HBASE --fileversions \
+-v 2.0.0 -l --sortorder=newer --skip-credits
+
+Rename the output as CHANGES.md and RELEASENOTES.md. Edit both to put
+document title above the apache license so markdown readers work. You
+may have to bulk import old-style CHANGES.txt on to the end in a code
+comment to preserve continuity of the CHANGELOG.
+
 -->
 
 These release notes cover new developer and user-facing incompatibilities, 
important issues, features, and major improvements.



[4/5] hbase git commit: HBASE-18828 [2.0] Generate CHANGES.txt

2018-04-07 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/d7547c61/CHANGES.md
--
diff --git a/CHANGES.md b/CHANGES.md
new file mode 100644
index 000..493ac79
--- /dev/null
+++ b/CHANGES.md
@@ -0,0 +1,6034 @@
+# HBASE Changelog
+
+
+## Release 2.0.0 - Unreleased (as of 2018-04-06)
+
+### INCOMPATIBLE CHANGES:
+
+| JIRA | Summary | Priority | Component |
+|: |: | :--- |: |
+| [HBASE-16459](https://issues.apache.org/jira/browse/HBASE-16459) | Remove 
unused hbase shell --format option |  Trivial | shell |
+| [HBASE-19128](https://issues.apache.org/jira/browse/HBASE-19128) | Purge 
Distributed Log Replay from codebase, configurations, text; mark the feature as 
unsupported, broken. |  Major | documentation |
+| [HBASE-19504](https://issues.apache.org/jira/browse/HBASE-19504) | Add 
TimeRange support into checkAndMutate |  Major | . |
+| [HBASE-20119](https://issues.apache.org/jira/browse/HBASE-20119) | Introduce 
a pojo class to carry coprocessor information in order to make 
TableDescriptorBuilder accept multiple cp at once |  Minor | . |
+| [HBASE-19437](https://issues.apache.org/jira/browse/HBASE-19437) | Batch 
operation can't handle the null result for Append/Increment |  Critical | 
Usability |
+| [HBASE-19873](https://issues.apache.org/jira/browse/HBASE-19873) | Add a 
CategoryBasedTimeout ClassRule for all UTs |  Major | . |
+| [HBASE-19783](https://issues.apache.org/jira/browse/HBASE-19783) | Change 
replication peer cluster key/endpoint from a not-null value to null is not 
allowed |  Minor | Replication |
+| [HBASE-19483](https://issues.apache.org/jira/browse/HBASE-19483) | Add 
proper privilege check for rsgroup commands |  Major | rsgroup, security |
+| [HBASE-19492](https://issues.apache.org/jira/browse/HBASE-19492) | Add 
EXCLUDE\_NAMESPACE and EXCLUDE\_TABLECFS support to replication peer config |  
Major | . |
+| [HBASE-19357](https://issues.apache.org/jira/browse/HBASE-19357) | Bucket 
cache no longer L2 for LRU cache |  Major | . |
+| [HBASE-19359](https://issues.apache.org/jira/browse/HBASE-19359) | Revisit 
the default config of hbase client retries number |  Major | . |
+| [HBASE-19092](https://issues.apache.org/jira/browse/HBASE-19092) | Make Tag 
IA.LimitedPrivate and expose for CPs |  Critical | Coprocessors |
+| [HBASE-19187](https://issues.apache.org/jira/browse/HBASE-19187) | Remove 
option to create on heap bucket cache |  Minor | regionserver |
+| [HBASE-19033](https://issues.apache.org/jira/browse/HBASE-19033) | Allow CP 
users to change versions and TTL before opening StoreScanner |  Blocker | 
Coprocessors |
+| [HBASE-19047](https://issues.apache.org/jira/browse/HBASE-19047) | CP 
exposed Scanner types should not extend Shipper |  Critical | Coprocessors |
+| [HBASE-18905](https://issues.apache.org/jira/browse/HBASE-18905) | Allow CPs 
to request flush on Region and know the completion of the requested flush |  
Major | Coprocessors |
+| [HBASE-18410](https://issues.apache.org/jira/browse/HBASE-18410) | 
FilterList  Improvement. |  Major | Filters |
+| [HBASE-18893](https://issues.apache.org/jira/browse/HBASE-18893) | Remove 
Add/Modify/DeleteColumnFamilyProcedure in favor of using ModifyTableProcedure | 
 Major | Coprocessors, master |
+| [HBASE-19067](https://issues.apache.org/jira/browse/HBASE-19067) | Do not 
expose getHDFSBlockDistribution in StoreFile |  Major | Coprocessors |
+| [HBASE-18989](https://issues.apache.org/jira/browse/HBASE-18989) | Polish 
the compaction related CP hooks |  Major | Compaction, Coprocessors |
+| [HBASE-19046](https://issues.apache.org/jira/browse/HBASE-19046) | 
RegionObserver#postCompactSelection  Avoid passing shaded ImmutableList param | 
 Major | Coprocessors |
+| [HBASE-19001](https://issues.apache.org/jira/browse/HBASE-19001) | Remove 
the hooks in RegionObserver which are designed to construct a StoreScanner 
which is marked as IA.Private |  Major | Coprocessors |
+| [HBASE-14247](https://issues.apache.org/jira/browse/HBASE-14247) | Separate 
the old WALs into different regionserver directories |  Critical | wal |
+| [HBASE-18183](https://issues.apache.org/jira/browse/HBASE-18183) | Region 
interface cleanup for CP expose |  Major | Coprocessors |
+| [HBASE-18878](https://issues.apache.org/jira/browse/HBASE-18878) | Use 
Optional\ return types when T can be null |  Major | Coprocessors |
+| [HBASE-18649](https://issues.apache.org/jira/browse/HBASE-18649) | Deprecate 
KV Usage in MR to move to Cells in 3.0 |  Major | API, mapreduce |
+| [HBASE-18897](https://issues.apache.org/jira/browse/HBASE-18897) | 
Substitute MemStore for Memstore |  Major | . |
+| [HBASE-18883](https://issues.apache.org/jira/browse/HBASE-18883) | Upgrade 
to Curator 4.0 |  Major | Client, dependencies |
+| [HBASE-18839](https://issues.apache.org/jira/browse/HBASE-18839) | Apply 
RegionInfo to code base |  Major | Coprocessors |
+| 

[1/5] hbase git commit: HBASE-18828 [2.0] Generate CHANGES.txt

2018-04-07 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 57aaee5bb -> d7547c615


http://git-wip-us.apache.org/repos/asf/hbase/blob/d7547c61/hbase-assembly/src/main/assembly/components.xml
--
diff --git a/hbase-assembly/src/main/assembly/components.xml 
b/hbase-assembly/src/main/assembly/components.xml
index e6c45e0..fe57078 100644
--- a/hbase-assembly/src/main/assembly/components.xml
+++ b/hbase-assembly/src/main/assembly/components.xml
@@ -32,7 +32,8 @@
   ${project.basedir}/..
   .
   
-CHANGES.txt
+CHANGES.md
+RELEASENOTES.md
 README.txt
   
   0644

http://git-wip-us.apache.org/repos/asf/hbase/blob/d7547c61/pom.xml
--
diff --git a/pom.xml b/pom.xml
index dbe6026..3fe436b 100755
--- a/pom.xml
+++ b/pom.xml
@@ -830,7 +830,6 @@
   **/0016310
   **/.idea/**
   **/*.iml
-  **/CHANGES.txt
   **/generated/**
   **/gen-*/**
   

http://git-wip-us.apache.org/repos/asf/hbase/blob/d7547c61/src/main/asciidoc/_chapters/developer.adoc
--
diff --git a/src/main/asciidoc/_chapters/developer.adoc 
b/src/main/asciidoc/_chapters/developer.adoc
index 6d959c2..4e35fd2 100644
--- a/src/main/asciidoc/_chapters/developer.adoc
+++ b/src/main/asciidoc/_chapters/developer.adoc
@@ -578,8 +578,9 @@ You could also set this in an environment variable or alias 
in your shell.
 The script _dev-support/make_rc.sh_ automates many of the below steps.
 It will checkout a tag, clean the checkout, build src and bin tarballs,
 and deploy the built jars to repository.apache.org.
-It does NOT do the modification of the _CHANGES.txt_ for the release,
-the checking of the produced artifacts to ensure they are 'good' --
+It does NOT do the modification of the _CHANGES.md_ and _RELEASENOTES.md_
+(See HBASE-18828 for how to generate these files)
+for the release, the checking of the produced artifacts to ensure they are 
'good' --
 e.g. extracting the produced tarballs, verifying that they
 look right, then starting HBase and checking that everything is running
 correctly -- or the signing and pushing of the tarballs to
@@ -588,9 +589,9 @@ Take a look. Modify/improve as you see fit.
 
 
 .Procedure: Release Procedure
-. Update the _CHANGES.txt_ file and the POM files.
+. Update the _CHANGES.md and _RELEASENOTES.md_ files (See HBASE-18828 for 
how)_ and the POM files.
 +
-Update _CHANGES.txt_ with the changes since the last release.
+Update _CHANGES.md and _RELEASENOTES.md_ with the changes since the last 
release.
 Make sure the URL to the JIRA points to the proper location which lists fixes 
for this release.
 Adjust the version in all the POM files appropriately.
 If you are making a release candidate, you must remove the `-SNAPSHOT` label 
from all versions
@@ -604,7 +605,8 @@ To set a version in all the many poms of the hbase 
multi-module project, use a c
 $ mvn clean org.codehaus.mojo:versions-maven-plugin:2.5:set 
-DnewVersion=2.1.0-SNAPSHOT
 
 +
-Make sure all versions in poms are changed! Checkin the _CHANGES.txt_ and any 
maven version changes.
+Make sure all versions in poms are changed! Checkin the _CHANGES.md_ and 
_RELEASENOTES.md_
+and any maven version changes.
 
 . Update the documentation.
 +



[2/5] hbase git commit: HBASE-18828 [2.0] Generate CHANGES.txt

2018-04-07 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/d7547c61/RELEASENOTES.md
--
diff --git a/RELEASENOTES.md b/RELEASENOTES.md
new file mode 100644
index 000..7776c08
--- /dev/null
+++ b/RELEASENOTES.md
@@ -0,0 +1,8204 @@
+# HBASE  2.0.0 Release Notes
+
+
+
+These release notes cover new developer and user-facing incompatibilities, 
important issues, features, and major improvements.
+
+
+---
+
+* [HBASE-14175](https://issues.apache.org/jira/browse/HBASE-14175) | 
*Critical* | **Adopt releasedocmaker for better generated release notes**
+
+We will use yetus releasedocmaker to make our changes doc from here on out. A 
CHANGELOG.md will replace our current CHANGES.txt. Adjacent, we'll keep up a 
RELEASENOTES.md doc courtesy of releasedocmaker.
+
+Over in HBASE-18828 is where we are working through steps for the RM 
integrating this new tooling.
+
+
+---
+
+* [HBASE-16499](https://issues.apache.org/jira/browse/HBASE-16499) | 
*Critical* | **slow replication for small HBase clusters**
+
+Changed the default value for replication.source.ratio from 0.1 to 0.5. Which 
means now by default 50% of the total RegionServers in peer cluster(s) will 
participate in replication.
+
+
+---
+
+* [HBASE-16459](https://issues.apache.org/jira/browse/HBASE-16459) | *Trivial* 
| **Remove unused hbase shell --format option**
+
+
+
+The HBase `shell` command no longer recognizes the option `--format`. 
Previously this option only recognized the default value of 'console'. The 
default value is now always used.
+
+
+---
+
+* [HBASE-20259](https://issues.apache.org/jira/browse/HBASE-20259) | 
*Critical* | **Doc configs for in-memory-compaction and add detail to 
in-memory-compaction logging**
+
+Disables in-memory compaction as default.
+
+Adds logging of in-memory compaction configuration on creation.
+
+Adds a chapter to the refguide on this new feature.
+
+
+---
+
+* [HBASE-20282](https://issues.apache.org/jira/browse/HBASE-20282) | *Major* | 
**Provide short name invocations for useful tools**
+
+\`hbase regionsplitter\` is a new short invocation for \`hbase 
org.apache.hadoop.hbase.util.RegionSplitter\`
+
+
+---
+
+* [HBASE-20314](https://issues.apache.org/jira/browse/HBASE-20314) | *Major* | 
**Precommit build for master branch fails because of surefire fork fails**
+
+Upgrade surefire plugin to 2.21.0.
+
+
+---
+
+* [HBASE-20130](https://issues.apache.org/jira/browse/HBASE-20130) | 
*Critical* | **Use defaults (16020 & 16030) as base ports when the RS is bound 
to localhost**
+
+
+When region servers bind to localhost (mostly in pseudo distributed mode), 
default ports (16020 & 16030) are used as base ports. This will support up to 9 
instances of region servers by default with `local-regionservers.sh` script. If 
additional instances are needed, see the reference guide on how to deploy with 
a different range using the environment variables `HBASE_RS_BASE_PORT` and 
`HBASE_RS_INFO_BASE_PORT`.
+
+
+---
+
+* [HBASE-20111](https://issues.apache.org/jira/browse/HBASE-20111) | 
*Critical* | **Able to split region explicitly even on shouldSplit return false 
from split policy**
+
+When a split is requested on a Region, the RegionServer hosting that Region 
will now consult the configured SplitPolicy for that table when determining if 
a split of that Region is allowed. When a split is disallowed (due to the 
Region not being OPEN or the SplitPolicy denying the request), the operation 
will \*not\* be implicitly retried as it has previously done. Users will need 
to guard against and explicitly retry region split requests which are denied by 
the system.
+
+
+---
+
+* [HBASE-20223](https://issues.apache.org/jira/browse/HBASE-20223) | *Blocker* 
| **Use hbase-thirdparty 2.1.0**
+
+Moves commons-cli and commons-collections4 into the HBase thirdparty shaded 
jar which means that these are no longer generally available for users on the 
classpath.
+
+
+---
+
+* [HBASE-19128](https://issues.apache.org/jira/browse/HBASE-19128) | *Major* | 
**Purge Distributed Log Replay from codebase, configurations, text; mark the 
feature as unsupported, broken.**
+
+Removes Distributed Log Replay feature. Disable the feature before upgrading.
+
+
+---
+
+* [HBASE-19504](https://issues.apache.org/jira/browse/HBASE-19504) | *Major* | 
**Add TimeRange support into checkAndMutate**
+
+1) checkAndMutate accept a TimeRange to query the specified cell
+2) remove writeToWAL flag from Region#checkAndMutate since it is useless (this 
is a incompatible change)
+
+
+---
+
+* [HBASE-20224](https://issues.apache.org/jira/browse/HBASE-20224) | *Blocker* 
| **Web UI is broken in standalone mode**
+
+Standalone webui was broken inadvertently by HBASE-20027.
+
+
+---
+
+* [HBASE-20237](https://issues.apache.org/jira/browse/HBASE-20237) | 
*Critical* | **Put back getClosestRowBefore and throw UnknownProtocolException 
instead... for asynchbase client**
+
+Throw UnknownProtocolException if a client 

[5/5] hbase git commit: HBASE-18828 [2.0] Generate CHANGES.txt

2018-04-07 Thread stack
HBASE-18828 [2.0] Generate CHANGES.txt

Made sure what is in JIRA agreed with what is in git and vice-versa.
Then made it so issues in 1.0.0 and earlier were not counted as part
of the 2.0.0 release.

Then ran the yetus releasedocmaker like so:

$ ./release-doc-maker/releasedocmaker.py -p HBASE --fileversions -v 2.0.0
-l --sortorder=newer --skip-credits

... and renamed the output as CHANGES.md and RELEASENOTES.md. I edited
both to put the document title above the apache license so markdown
readers would work. I also bulk imported the 1.0.0 CHANGES.txt on to the
end of the CHANGES.md file.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d7547c61
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d7547c61
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d7547c61

Branch: refs/heads/branch-2.0
Commit: d7547c6157a9c44e9d6b6ebf1672cc3fa592972d
Parents: 57aaee5
Author: Michael Stack 
Authored: Fri Apr 6 11:43:20 2018 -0700
Committer: Michael Stack 
Committed: Sat Apr 7 09:37:09 2018 -0700

--
 CHANGES.md  | 6034 +
 CHANGES.txt | 4455 --
 RELEASENOTES.md | 8204 ++
 hbase-assembly/src/main/assembly/components.xml |3 +-
 pom.xml |1 -
 src/main/asciidoc/_chapters/developer.adoc  |   12 +-
 6 files changed, 14247 insertions(+), 4462 deletions(-)
--




[3/5] hbase git commit: HBASE-18828 [2.0] Generate CHANGES.txt

2018-04-07 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/d7547c61/CHANGES.txt
--
diff --git a/CHANGES.txt b/CHANGES.txt
deleted file mode 100755
index 52d2120..000
--- a/CHANGES.txt
+++ /dev/null
@@ -1,4455 +0,0 @@
-HBase Change Log
-Release 0.93.0 - Unreleased
-  *DO NOT ADD ISSUES HERE ON COMMIT ANY MORE.  WE'LL GENERATE THE LIST
-  FROM JIRA INSTEAD WHEN WE MAKE A RELEASE*
-
-Release 0.92.1 - Unreleased
-  BUG FIXES
-   HBASE-5176  AssignmentManager#getRegion: logging nit  adds a redundant '+' 
(Karthik K)
-   HBASE-5237  Addendum for HBASE-5160 and HBASE-4397 (Ram)
-   HBASE-5235  HLogSplitter writer thread's streams not getting closed when 
any 
-   of the writer threads has exceptions. (Ram)
-   HBASE-5243  LogSyncerThread not getting shutdown waiting for the 
interrupted flag (Ram)
-   HBASE-5255  Use singletons for OperationStatus to save memory (Benoit)
-   HBASE-5345  CheckAndPut doesn't work when value is empty byte[] (Evert 
Arckens)
-   HBASE-5466  Opening a table also opens the metatable and never closes it
-   (Ashley Taylor)
-
-  TESTS
-   HBASE-5223  TestMetaReaderEditor is missing call to CatalogTracker.stop()
-
-Release 0.92.0 - 01/23/2012
-  INCOMPATIBLE CHANGES
-   HBASE-2002  Coprocessors: Client side support; Support RPC interface
-   changes at runtime (Gary Helmling via Andrew Purtell)
-   HBASE-3677  Generate a globally unique cluster ID (changed
-   ClusterStatus serialization)
-   HBASE-3762  HTableFactory.releaseHTableInterface() should throw IOException
-   instead of wrapping in RuntimeException (Ted Yu via garyh)
-   HBASE-3629  Update our thrift to 0.6 (Moaz Reyad)
-   HBASE-1502  Remove need for heartbeats in HBase
-   HBASE-451   Remove HTableDescriptor from HRegionInfo (Subbu M Iyer)
-   HBASE-451   Remove HTableDescriptor from HRegionInfo
-   addendum that fixes TestTableMapReduce
-   HBASE-3534  Action should not store or serialize regionName (Ted Yu)
-   HBASE-4197  RegionServer expects all scanner to be subclasses of
-   HRegion.RegionScanner (Lars Hofhansl)
-   HBASE-4233  Update protobuf dependency to 2.4.0a (todd)
-   HBASE-4299  Update to Avro 1.5.3 and use Avro Maven plugin to generate
-   Avro classes. (Alejandro Abdelnur)
-   HBASE-4369  Deprecate HConnection#getZookeeperWatcher in prep for HBASE-1762
-   HBASE-4247  Add isAborted method to the Abortable interface
-   (Akash Ashok)
-   HBASE-4503  Purge deprecated HBaseClusterTestCase
-   HBASE-4374  Up default regions size from 256M to 1G
-   HBASE-4648  Bytes.toBigDecimal() doesn't use offset (Bryan Keller via Lars 
H)
-   HBASE-4715  Remove stale broke .rb scripts from bin dir
-   HBASE-3433  Remove the KV copy of every KV in Scan; introduced by 
HBASE-3232 (Lars H)
-   HBASE-5017  Bump the default hfile.block.cache.size because of HFileV2
-
-  BUG FIXES
-   HBASE-3280  YouAreDeadException being swallowed in HRS getMaster
-   HBASE-3282  Need to retain DeadServers to ensure we don't allow
-   previously expired RS instances to rejoin cluster
-   HBASE-3283  NPE in AssignmentManager if processing shutdown of RS who
-   doesn't have any regions assigned to it
-   HBASE-3173  HBase 2984 breaks ability to specify BLOOMFILTER &
-   COMPRESSION via shell
-   HBASE-3310  Failing creating/altering table with compression agrument from
-   the HBase shell (Igor Ranitovic via Stack)
-   HBASE-3317  Javadoc and Throws Declaration for Bytes.incrementBytes() is
-   Wrong (Ed Kohlwey via Stack)
-   HBASE-1888  KeyValue methods throw NullPointerException instead of
-   IllegalArgumentException during parameter sanity check
-   HBASE-3337  Restore HBCK fix of unassignment and dupe assignment for new
-   master
-   HBASE-3332  Regions stuck in transition after RS failure
-   HBASE-3418  Increment operations can break when qualifiers are split
-   between memstore/snapshot and storefiles
-   HBASE-3403  Region orphaned after failure during split
-   HBASE-3492  NPE while splitting table with empty column family store
-   HBASE-3400  Coprocessor Support for Generic Interfaces
-   (Ed Kohlwey via Gary Helmling)
-   HBASE-3552  Coprocessors are unable to load if RegionServer is launched
-   using a different classloader than system default
-   HBASE-3578  TableInputFormat does not setup the configuration for HBase
-   mapreduce jobs correctly (Dan Harvey via Stack)
-   HBASE-3601  TestMasterFailover broken in TRUNK
-   HBASE-3605  Fix balancer log message
-   HBASE-3538  Column families allow to have slashes in name (Ian Knome via 
Stack)
-   HBASE-3313  Table name isn't checked in isTableEnabled/isTableDisabled
-   (Ted Yu via Stack)
-   HBASE-3514  Speedup HFile.Writer append (Matteo Bertozzi via Ryan)
-   

hbase git commit: HBASE-20295 fix NullPointException in TableOutputFormat.checkOutputSpecs

2018-04-07 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2 2cfa344ab -> 10ed3f41f


HBASE-20295 fix NullPointException in TableOutputFormat.checkOutputSpecs


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/10ed3f41
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/10ed3f41
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/10ed3f41

Branch: refs/heads/branch-2
Commit: 10ed3f41f5c19f996a33dd606950e647aebda7c6
Parents: 2cfa344
Author: michael.jin 
Authored: Thu Mar 29 07:06:10 2018 +0800
Committer: Michael Stack 
Committed: Sat Apr 7 09:30:52 2018 -0700

--
 .../org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java | 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/10ed3f41/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
index 7598520..0a1928b 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
@@ -174,9 +174,13 @@ implements Configurable {
   @Override
   public void checkOutputSpecs(JobContext context) throws IOException,
   InterruptedException {
+Configuration hConf = context.getConfiguration();
+if(hConf == null) {
+  hConf = this.conf;
+}
 
-try (Admin admin = 
ConnectionFactory.createConnection(getConf()).getAdmin()) {
-  TableName tableName = TableName.valueOf(this.conf.get(OUTPUT_TABLE));
+try (Admin admin = ConnectionFactory.createConnection(hConf).getAdmin()) {
+  TableName tableName = TableName.valueOf(hConf.get(OUTPUT_TABLE));
   if (!admin.tableExists(tableName)) {
 throw new TableNotFoundException("Can't write, table does not exist:" +
 tableName.getNameAsString());



hbase git commit: HBASE-20295 fix NullPointException in TableOutputFormat.checkOutputSpecs

2018-04-07 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 d396e4bc0 -> 57aaee5bb


HBASE-20295 fix NullPointException in TableOutputFormat.checkOutputSpecs


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/57aaee5b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/57aaee5b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/57aaee5b

Branch: refs/heads/branch-2.0
Commit: 57aaee5bbb846e2679b857329d2cb2d91cd29827
Parents: d396e4b
Author: michael.jin 
Authored: Thu Mar 29 07:06:10 2018 +0800
Committer: Michael Stack 
Committed: Sat Apr 7 09:30:23 2018 -0700

--
 .../org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java | 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/57aaee5b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
index 7598520..0a1928b 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java
@@ -174,9 +174,13 @@ implements Configurable {
   @Override
   public void checkOutputSpecs(JobContext context) throws IOException,
   InterruptedException {
+Configuration hConf = context.getConfiguration();
+if(hConf == null) {
+  hConf = this.conf;
+}
 
-try (Admin admin = 
ConnectionFactory.createConnection(getConf()).getAdmin()) {
-  TableName tableName = TableName.valueOf(this.conf.get(OUTPUT_TABLE));
+try (Admin admin = ConnectionFactory.createConnection(hConf).getAdmin()) {
+  TableName tableName = TableName.valueOf(hConf.get(OUTPUT_TABLE));
   if (!admin.tableExists(tableName)) {
 throw new TableNotFoundException("Can't write, table does not exist:" +
 tableName.getNameAsString());



hbase git commit: HBASE-20287 After cluster startup list_regions command fails on disabled table

2018-04-07 Thread psomogyi
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 dd94fe9f1 -> d396e4bc0


HBASE-20287 After cluster startup list_regions command fails on disabled table

Add is_enabled check for list_regions command

Signed-off-by: Mike Drob 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d396e4bc
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d396e4bc
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d396e4bc

Branch: refs/heads/branch-2.0
Commit: d396e4bc075df271edf451bf1a24266f25f9aaf6
Parents: dd94fe9
Author: Peter Somogyi 
Authored: Thu Apr 5 12:12:14 2018 +0200
Committer: Peter Somogyi 
Committed: Sat Apr 7 18:12:46 2018 +0200

--
 .../src/main/ruby/shell/commands/list_regions.rb|  2 ++
 hbase-shell/src/test/ruby/hbase/admin_test.rb   | 12 
 2 files changed, 14 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d396e4bc/hbase-shell/src/main/ruby/shell/commands/list_regions.rb
--
diff --git a/hbase-shell/src/main/ruby/shell/commands/list_regions.rb 
b/hbase-shell/src/main/ruby/shell/commands/list_regions.rb
index bcc0c4a..0ce569c 100644
--- a/hbase-shell/src/main/ruby/shell/commands/list_regions.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/list_regions.rb
@@ -50,6 +50,8 @@ EOF
   options = { SERVER_NAME => options }
 end
 
+raise "Table #{table_name} must be enabled." unless 
admin.enabled?(table_name)
+
 size_hash = {}
 if cols.nil?
   size_hash = { 'SERVER_NAME' => 12, 'REGION_NAME' => 12, 'START_KEY' 
=> 10, 'END_KEY' => 10, 'SIZE' => 5, 'REQ' => 5, 'LOCALITY' => 10 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/d396e4bc/hbase-shell/src/test/ruby/hbase/admin_test.rb
--
diff --git a/hbase-shell/src/test/ruby/hbase/admin_test.rb 
b/hbase-shell/src/test/ruby/hbase/admin_test.rb
index 929484c..a27bbc5 100644
--- a/hbase-shell/src/test/ruby/hbase/admin_test.rb
+++ b/hbase-shell/src/test/ruby/hbase/admin_test.rb
@@ -319,6 +319,18 @@ module Hbase
   admin.truncate_preserve(@create_test_name, 
$TEST_CLUSTER.getConfiguration)
   assert_equal(splits, table(@create_test_name)._get_splits_internal())
 end
+
+
#---
+
+define_test "list_regions should fail for disabled table" do
+  drop_test_table(@create_test_name)
+  admin.create(@create_test_name, 'a')
+  command(:disable, @create_test_name)
+  assert(:is_disabled, @create_test_name)
+  assert_raise(RuntimeError) do
+command(:list_regions, @create_test_name)
+  end
+end
   end
 
   # Simple administration methods tests



hbase git commit: HBASE-20287 After cluster startup list_regions command fails on disabled table

2018-04-07 Thread psomogyi
Repository: hbase
Updated Branches:
  refs/heads/branch-2 dcc1d9e36 -> 2cfa344ab


HBASE-20287 After cluster startup list_regions command fails on disabled table

Add is_enabled check for list_regions command

Signed-off-by: Mike Drob 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2cfa344a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2cfa344a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2cfa344a

Branch: refs/heads/branch-2
Commit: 2cfa344ab15f52441c9cf8f48d6f3cdca565a067
Parents: dcc1d9e
Author: Peter Somogyi 
Authored: Thu Apr 5 12:12:14 2018 +0200
Committer: Peter Somogyi 
Committed: Sat Apr 7 18:12:04 2018 +0200

--
 .../src/main/ruby/shell/commands/list_regions.rb|  2 ++
 hbase-shell/src/test/ruby/hbase/admin_test.rb   | 12 
 2 files changed, 14 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2cfa344a/hbase-shell/src/main/ruby/shell/commands/list_regions.rb
--
diff --git a/hbase-shell/src/main/ruby/shell/commands/list_regions.rb 
b/hbase-shell/src/main/ruby/shell/commands/list_regions.rb
index bcc0c4a..0ce569c 100644
--- a/hbase-shell/src/main/ruby/shell/commands/list_regions.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/list_regions.rb
@@ -50,6 +50,8 @@ EOF
   options = { SERVER_NAME => options }
 end
 
+raise "Table #{table_name} must be enabled." unless 
admin.enabled?(table_name)
+
 size_hash = {}
 if cols.nil?
   size_hash = { 'SERVER_NAME' => 12, 'REGION_NAME' => 12, 'START_KEY' 
=> 10, 'END_KEY' => 10, 'SIZE' => 5, 'REQ' => 5, 'LOCALITY' => 10 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/2cfa344a/hbase-shell/src/test/ruby/hbase/admin_test.rb
--
diff --git a/hbase-shell/src/test/ruby/hbase/admin_test.rb 
b/hbase-shell/src/test/ruby/hbase/admin_test.rb
index 929484c..a27bbc5 100644
--- a/hbase-shell/src/test/ruby/hbase/admin_test.rb
+++ b/hbase-shell/src/test/ruby/hbase/admin_test.rb
@@ -319,6 +319,18 @@ module Hbase
   admin.truncate_preserve(@create_test_name, 
$TEST_CLUSTER.getConfiguration)
   assert_equal(splits, table(@create_test_name)._get_splits_internal())
 end
+
+
#---
+
+define_test "list_regions should fail for disabled table" do
+  drop_test_table(@create_test_name)
+  admin.create(@create_test_name, 'a')
+  command(:disable, @create_test_name)
+  assert(:is_disabled, @create_test_name)
+  assert_raise(RuntimeError) do
+command(:list_regions, @create_test_name)
+  end
+end
   end
 
   # Simple administration methods tests



hbase git commit: HBASE-20287 After cluster startup list_regions command fails on disabled table

2018-04-07 Thread psomogyi
Repository: hbase
Updated Branches:
  refs/heads/master adc0e85e8 -> bdc0d3a4c


HBASE-20287 After cluster startup list_regions command fails on disabled table

Add is_enabled check for list_regions command

Signed-off-by: Mike Drob 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bdc0d3a4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bdc0d3a4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bdc0d3a4

Branch: refs/heads/master
Commit: bdc0d3a4c612289af8e0472f4ae30b1c39c09c7f
Parents: adc0e85
Author: Peter Somogyi 
Authored: Thu Apr 5 12:12:14 2018 +0200
Committer: Peter Somogyi 
Committed: Sat Apr 7 18:11:29 2018 +0200

--
 .../src/main/ruby/shell/commands/list_regions.rb|  2 ++
 hbase-shell/src/test/ruby/hbase/admin_test.rb   | 12 
 2 files changed, 14 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/bdc0d3a4/hbase-shell/src/main/ruby/shell/commands/list_regions.rb
--
diff --git a/hbase-shell/src/main/ruby/shell/commands/list_regions.rb 
b/hbase-shell/src/main/ruby/shell/commands/list_regions.rb
index bcc0c4a..0ce569c 100644
--- a/hbase-shell/src/main/ruby/shell/commands/list_regions.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/list_regions.rb
@@ -50,6 +50,8 @@ EOF
   options = { SERVER_NAME => options }
 end
 
+raise "Table #{table_name} must be enabled." unless 
admin.enabled?(table_name)
+
 size_hash = {}
 if cols.nil?
   size_hash = { 'SERVER_NAME' => 12, 'REGION_NAME' => 12, 'START_KEY' 
=> 10, 'END_KEY' => 10, 'SIZE' => 5, 'REQ' => 5, 'LOCALITY' => 10 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdc0d3a4/hbase-shell/src/test/ruby/hbase/admin_test.rb
--
diff --git a/hbase-shell/src/test/ruby/hbase/admin_test.rb 
b/hbase-shell/src/test/ruby/hbase/admin_test.rb
index 929484c..a27bbc5 100644
--- a/hbase-shell/src/test/ruby/hbase/admin_test.rb
+++ b/hbase-shell/src/test/ruby/hbase/admin_test.rb
@@ -319,6 +319,18 @@ module Hbase
   admin.truncate_preserve(@create_test_name, 
$TEST_CLUSTER.getConfiguration)
   assert_equal(splits, table(@create_test_name)._get_splits_internal())
 end
+
+
#---
+
+define_test "list_regions should fail for disabled table" do
+  drop_test_table(@create_test_name)
+  admin.create(@create_test_name, 'a')
+  command(:disable, @create_test_name)
+  assert(:is_disabled, @create_test_name)
+  assert_raise(RuntimeError) do
+command(:list_regions, @create_test_name)
+  end
+end
   end
 
   # Simple administration methods tests



hbase-site git commit: INFRA-10751 Empty commit

2018-04-07 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 1ee650677 -> 3ef91fc43


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/3ef91fc4
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/3ef91fc4
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/3ef91fc4

Branch: refs/heads/asf-site
Commit: 3ef91fc43b5107e3f116231be99a2e347e33a9a4
Parents: 1ee6506
Author: jenkins 
Authored: Sat Apr 7 14:48:15 2018 +
Committer: jenkins 
Committed: Sat Apr 7 14:48:15 2018 +

--

--




[14/17] hbase-site git commit: Published site at adc0e85e8532870fa83cb21a44061c83ae77ec34.

2018-04-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ee65067/devapidocs/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.html 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.html
index b7bbf1e..cb87695 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.html
@@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-class AsyncNonMetaRegionLocator
+class AsyncNonMetaRegionLocator
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 The asynchronous locator for regions other than meta.
 
@@ -273,11 +273,11 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 byte[]row)
 
 
-private void
-onScanComplete(TableNametableName,
-  AsyncNonMetaRegionLocator.LocateRequestreq,
-  https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListResultresults,
-  https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in 
java.lang">Throwableerror)
+private boolean
+onScanNext(TableNametableName,
+  AsyncNonMetaRegionLocator.LocateRequestreq,
+  Resultresult,
+  https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in 
java.lang">Throwableerror)
 
 
 private void
@@ -316,7 +316,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 LOG
-private static finalorg.slf4j.Logger LOG
+private static finalorg.slf4j.Logger LOG
 
 
 
@@ -325,7 +325,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 MAX_CONCURRENT_LOCATE_REQUEST_PER_TABLE
-static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String MAX_CONCURRENT_LOCATE_REQUEST_PER_TABLE
+static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String MAX_CONCURRENT_LOCATE_REQUEST_PER_TABLE
 
 See Also:
 Constant
 Field Values
@@ -338,7 +338,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 DEFAULT_MAX_CONCURRENT_LOCATE_REQUEST_PER_TABLE
-private static finalint DEFAULT_MAX_CONCURRENT_LOCATE_REQUEST_PER_TABLE
+private static finalint DEFAULT_MAX_CONCURRENT_LOCATE_REQUEST_PER_TABLE
 
 See Also:
 Constant
 Field Values
@@ -351,7 +351,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 conn
-private finalAsyncConnectionImpl conn
+private finalAsyncConnectionImpl conn
 
 
 
@@ -360,7 +360,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 maxConcurrentLocateRequestPerTable
-private finalint maxConcurrentLocateRequestPerTable
+private finalint maxConcurrentLocateRequestPerTable
 
 
 
@@ -369,7 +369,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 cache
-private finalhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentMap.html?is-external=true;
 title="class or interface in java.util.concurrent">ConcurrentMapTableName,AsyncNonMetaRegionLocator.TableCache cache
+private finalhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentMap.html?is-external=true;
 title="class or interface in java.util.concurrent">ConcurrentMapTableName,AsyncNonMetaRegionLocator.TableCache cache
 
 
 
@@ -386,7 +386,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 AsyncNonMetaRegionLocator
-AsyncNonMetaRegionLocator(AsyncConnectionImplconn)
+AsyncNonMetaRegionLocator(AsyncConnectionImplconn)
 
 
 
@@ -403,7 +403,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getTableCache
-privateAsyncNonMetaRegionLocator.TableCachegetTableCache(TableNametableName)
+privateAsyncNonMetaRegionLocator.TableCachegetTableCache(TableNametableName)
 
 
 
@@ -412,7 +412,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 removeFromCache
-privatevoidremoveFromCache(HRegionLocationloc)
+privatevoidremoveFromCache(HRegionLocationloc)
 
 
 
@@ -421,7 +421,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 addToCache
-privatebooleanaddToCache(AsyncNonMetaRegionLocator.TableCachetableCache,
+privatebooleanaddToCache(AsyncNonMetaRegionLocator.TableCachetableCache,
HRegionLocationloc)
 
 
@@ -431,7 +431,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 addToCache

[07/17] hbase-site git commit: Published site at adc0e85e8532870fa83cb21a44061c83ae77ec34.

2018-04-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ee65067/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
index 98a45a0..4f02ded 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
@@ -66,1947 +66,1940 @@
 058import 
org.apache.hadoop.hbase.TableNotEnabledException;
 059import 
org.apache.hadoop.hbase.TableNotFoundException;
 060import 
org.apache.hadoop.hbase.ZooKeeperConnectionException;
-061import 
org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
-062import 
org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicyFactory;
-063import 
org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil;
-064import 
org.apache.hadoop.hbase.exceptions.RegionMovedException;
-065import 
org.apache.hadoop.hbase.ipc.RpcClient;
-066import 
org.apache.hadoop.hbase.ipc.RpcClientFactory;
-067import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-068import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-069import 
org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
-070import 
org.apache.hadoop.hbase.security.User;
-071import 
org.apache.hadoop.hbase.util.Bytes;
-072import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-073import 
org.apache.hadoop.hbase.util.ExceptionUtil;
-074import 
org.apache.hadoop.hbase.util.Pair;
-075import 
org.apache.hadoop.hbase.util.ReflectionUtils;
-076import 
org.apache.hadoop.hbase.util.Threads;
-077import 
org.apache.hadoop.ipc.RemoteException;
-078import 
org.apache.yetus.audience.InterfaceAudience;
-079import 
org.apache.zookeeper.KeeperException;
-080import org.slf4j.Logger;
-081import org.slf4j.LoggerFactory;
-082
-083import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-084import 
org.apache.hbase.thirdparty.com.google.common.base.Throwables;
-085import 
org.apache.hbase.thirdparty.com.google.protobuf.BlockingRpcChannel;
-086import 
org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
-087import 
org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService.BlockingInterface;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersRequest;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersResponse;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersRequest;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersResponse;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeRequest;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeResponse;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerRequest;
-105import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerResponse;
-106import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest;
-107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse;
-108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest;
-109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningResponse;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesRequest;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesRequest;
-113import 

[12/17] hbase-site git commit: Published site at adc0e85e8532870fa83cb21a44061c83ae77ec34.

2018-04-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ee65067/devapidocs/org/apache/hadoop/hbase/client/Durability.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/Durability.html 
b/devapidocs/org/apache/hadoop/hbase/client/Durability.html
index fda073f..c6f8c85 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/Durability.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/Durability.html
@@ -292,7 +292,7 @@ the order they are declared.
 
 
 values
-public staticDurability[]values()
+public staticDurability[]values()
 Returns an array containing the constants of this enum 
type, in
 the order they are declared.  This method may be used to iterate
 over the constants as follows:
@@ -312,7 +312,7 @@ for (Durability c : Durability.values())
 
 
 valueOf
-public staticDurabilityvalueOf(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname)
+public staticDurabilityvalueOf(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname)
 Returns the enum constant of this type with the specified 
name.
 The string must match exactly an identifier used to declare an
 enum constant in this type.  (Extraneous whitespace characters are 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ee65067/devapidocs/org/apache/hadoop/hbase/client/Scan.ReadType.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/Scan.ReadType.html 
b/devapidocs/org/apache/hadoop/hbase/client/Scan.ReadType.html
index 6f452a1..a249dd3 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/Scan.ReadType.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/Scan.ReadType.html
@@ -249,7 +249,7 @@ the order they are declared.
 
 
 values
-public staticScan.ReadType[]values()
+public staticScan.ReadType[]values()
 Returns an array containing the constants of this enum 
type, in
 the order they are declared.  This method may be used to iterate
 over the constants as follows:
@@ -269,7 +269,7 @@ for (Scan.ReadType c : Scan.ReadType.values())
 
 
 valueOf
-public staticScan.ReadTypevalueOf(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname)
+public staticScan.ReadTypevalueOf(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname)
 Returns the enum constant of this type with the specified 
name.
 The string must match exactly an identifier used to declare an
 enum constant in this type.  (Extraneous whitespace characters are 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ee65067/devapidocs/org/apache/hadoop/hbase/client/SnapshotType.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/SnapshotType.html 
b/devapidocs/org/apache/hadoop/hbase/client/SnapshotType.html
index 22c8307..27cba94 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/SnapshotType.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/SnapshotType.html
@@ -246,7 +246,7 @@ the order they are declared.
 
 
 values
-public staticSnapshotType[]values()
+public staticSnapshotType[]values()
 Returns an array containing the constants of this enum 
type, in
 the order they are declared.  This method may be used to iterate
 over the constants as follows:
@@ -266,7 +266,7 @@ for (SnapshotType c : SnapshotType.values())
 
 
 valueOf
-public staticSnapshotTypevalueOf(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname)
+public staticSnapshotTypevalueOf(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname)
 Returns the enum constant of this type with the specified 
name.
 The string must match exactly an identifier used to declare an
 enum constant in this type.  (Extraneous whitespace characters are 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ee65067/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncNonMetaRegionLocator.LocateRequest.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncNonMetaRegionLocator.LocateRequest.html
 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncNonMetaRegionLocator.LocateRequest.html
index b7cf901..b584ce9 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncNonMetaRegionLocator.LocateRequest.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncNonMetaRegionLocator.LocateRequest.html
@@ -151,11 +151,11 @@
 AsyncNonMetaRegionLocator.LocateRequestreq)
 
 
-private void

[08/17] hbase-site git commit: Published site at adc0e85e8532870fa83cb21a44061c83ae77ec34.

2018-04-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ee65067/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
index 98a45a0..4f02ded 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
@@ -66,1947 +66,1940 @@
 058import 
org.apache.hadoop.hbase.TableNotEnabledException;
 059import 
org.apache.hadoop.hbase.TableNotFoundException;
 060import 
org.apache.hadoop.hbase.ZooKeeperConnectionException;
-061import 
org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
-062import 
org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicyFactory;
-063import 
org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil;
-064import 
org.apache.hadoop.hbase.exceptions.RegionMovedException;
-065import 
org.apache.hadoop.hbase.ipc.RpcClient;
-066import 
org.apache.hadoop.hbase.ipc.RpcClientFactory;
-067import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-068import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-069import 
org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
-070import 
org.apache.hadoop.hbase.security.User;
-071import 
org.apache.hadoop.hbase.util.Bytes;
-072import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-073import 
org.apache.hadoop.hbase.util.ExceptionUtil;
-074import 
org.apache.hadoop.hbase.util.Pair;
-075import 
org.apache.hadoop.hbase.util.ReflectionUtils;
-076import 
org.apache.hadoop.hbase.util.Threads;
-077import 
org.apache.hadoop.ipc.RemoteException;
-078import 
org.apache.yetus.audience.InterfaceAudience;
-079import 
org.apache.zookeeper.KeeperException;
-080import org.slf4j.Logger;
-081import org.slf4j.LoggerFactory;
-082
-083import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-084import 
org.apache.hbase.thirdparty.com.google.common.base.Throwables;
-085import 
org.apache.hbase.thirdparty.com.google.protobuf.BlockingRpcChannel;
-086import 
org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
-087import 
org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService.BlockingInterface;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersRequest;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersResponse;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersRequest;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersResponse;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeRequest;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeResponse;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerRequest;
-105import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerResponse;
-106import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest;
-107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse;
-108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest;
-109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningResponse;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesRequest;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesRequest;
-113import 

[11/17] hbase-site git commit: Published site at adc0e85e8532870fa83cb21a44061c83ae77ec34.

2018-04-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ee65067/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.LocateRequest.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.LocateRequest.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.LocateRequest.html
index 63b8c53..88e70e2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.LocateRequest.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.LocateRequest.html
@@ -25,460 +25,470 @@
 017 */
 018package org.apache.hadoop.hbase.client;
 019
-020import static 
org.apache.hadoop.hbase.HConstants.CATALOG_FAMILY;
-021import static 
org.apache.hadoop.hbase.HConstants.NINES;
-022import static 
org.apache.hadoop.hbase.HConstants.ZEROES;
-023import static 
org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
-024import static 
org.apache.hadoop.hbase.client.ConnectionUtils.createClosestRowAfter;
-025import static 
org.apache.hadoop.hbase.client.ConnectionUtils.isEmptyStopRow;
-026import static 
org.apache.hadoop.hbase.client.RegionInfo.createRegionName;
-027import static 
org.apache.hadoop.hbase.util.Bytes.BYTES_COMPARATOR;
-028import static 
org.apache.hadoop.hbase.util.CollectionUtils.computeIfAbsent;
-029
-030import java.io.IOException;
-031import java.util.Arrays;
-032import java.util.HashSet;
-033import java.util.Iterator;
-034import java.util.LinkedHashMap;
-035import java.util.List;
-036import java.util.Map;
-037import java.util.Optional;
-038import java.util.Set;
-039import 
java.util.concurrent.CompletableFuture;
-040import 
java.util.concurrent.ConcurrentHashMap;
-041import 
java.util.concurrent.ConcurrentMap;
-042import 
java.util.concurrent.ConcurrentNavigableMap;
-043import 
java.util.concurrent.ConcurrentSkipListMap;
-044
-045import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-046import 
org.apache.hadoop.hbase.HRegionLocation;
-047import 
org.apache.hadoop.hbase.MetaTableAccessor;
-048import 
org.apache.hadoop.hbase.RegionLocations;
-049import 
org.apache.hadoop.hbase.TableName;
-050import 
org.apache.hadoop.hbase.TableNotFoundException;
-051import 
org.apache.hadoop.hbase.util.Bytes;
-052import 
org.apache.yetus.audience.InterfaceAudience;
-053import org.slf4j.Logger;
-054import org.slf4j.LoggerFactory;
-055
-056/**
-057 * The asynchronous locator for regions 
other than meta.
-058 */
-059@InterfaceAudience.Private
-060class AsyncNonMetaRegionLocator {
-061
-062  private static final Logger LOG = 
LoggerFactory.getLogger(AsyncNonMetaRegionLocator.class);
-063
-064  static final String 
MAX_CONCURRENT_LOCATE_REQUEST_PER_TABLE =
-065
"hbase.client.meta.max.concurrent.locate.per.table";
-066
-067  private static final int 
DEFAULT_MAX_CONCURRENT_LOCATE_REQUEST_PER_TABLE = 8;
-068
-069  private final AsyncConnectionImpl 
conn;
-070
-071  private final int 
maxConcurrentLocateRequestPerTable;
-072
-073  private final 
ConcurrentMapTableName, TableCache cache = new 
ConcurrentHashMap();
-074
-075  private static final class 
LocateRequest {
-076
-077public final byte[] row;
-078
-079public final RegionLocateType 
locateType;
-080
-081public LocateRequest(byte[] row, 
RegionLocateType locateType) {
-082  this.row = row;
-083  this.locateType = locateType;
-084}
-085
-086@Override
-087public int hashCode() {
-088  return Bytes.hashCode(row) ^ 
locateType.hashCode();
-089}
-090
-091@Override
-092public boolean equals(Object obj) {
-093  if (obj == null || obj.getClass() 
!= LocateRequest.class) {
-094return false;
-095  }
-096  LocateRequest that = 
(LocateRequest) obj;
-097  return 
locateType.equals(that.locateType)  Bytes.equals(row, that.row);
-098}
-099  }
-100
-101  private static final class TableCache 
{
-102
-103public final 
ConcurrentNavigableMapbyte[], HRegionLocation cache =
-104  new 
ConcurrentSkipListMap(BYTES_COMPARATOR);
-105
-106public final SetLocateRequest 
pendingRequests = new HashSet();
-107
-108public final MapLocateRequest, 
CompletableFutureHRegionLocation allRequests =
-109  new LinkedHashMap();
-110
-111public boolean hasQuota(int max) {
-112  return pendingRequests.size()  
max;
-113}
-114
-115public boolean 
isPending(LocateRequest req) {
-116  return 
pendingRequests.contains(req);
-117}
-118
-119public void send(LocateRequest req) 
{
-120  pendingRequests.add(req);
-121}
-122
-123public OptionalLocateRequest 
getCandidate() {
-124  return 
allRequests.keySet().stream().filter(r - !isPending(r)).findFirst();
-125}
-126
-127public void 
clearCompletedRequests(OptionalHRegionLocation location) {
-128  for 
(IteratorMap.EntryLocateRequest, 
CompletableFutureHRegionLocation iter =
-129

[13/17] hbase-site git commit: Published site at adc0e85e8532870fa83cb21a44061c83ae77ec34.

2018-04-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ee65067/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.html 
b/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.html
index 1416209..9889c69 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.html
@@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-class ConnectionImplementation
+class ConnectionImplementation
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements ClusterConnection, https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html?is-external=true;
 title="class or interface in java.io">Closeable
 Main implementation of Connection 
and ClusterConnection interfaces.
@@ -724,7 +724,10 @@ implements Search the hbase:meta table for the HRegionLocation info 
that contains the table and row we're
+ seeking.
+
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionLocation
@@ -844,7 +847,7 @@ implements 
 
 RETRIES_BY_SERVER_KEY
-public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String RETRIES_BY_SERVER_KEY
+public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String RETRIES_BY_SERVER_KEY
 
 See Also:
 Constant
 Field Values
@@ -857,7 +860,7 @@ implements 
 
 LOG
-private static finalorg.slf4j.Logger LOG
+private static finalorg.slf4j.Logger LOG
 
 
 
@@ -866,7 +869,7 @@ implements 
 
 RESOLVE_HOSTNAME_ON_FAIL_KEY
-private static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String RESOLVE_HOSTNAME_ON_FAIL_KEY
+private static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String RESOLVE_HOSTNAME_ON_FAIL_KEY
 
 See Also:
 Constant
 Field Values
@@ -879,7 +882,7 @@ implements 
 
 hostnamesCanChange
-private finalboolean hostnamesCanChange
+private finalboolean hostnamesCanChange
 
 
 
@@ -888,7 +891,7 @@ implements 
 
 pause
-private finallong pause
+private finallong pause
 
 
 
@@ -897,7 +900,7 @@ implements 
 
 pauseForCQTBE
-private finallong pauseForCQTBE
+private finallong pauseForCQTBE
 
 
 
@@ -906,7 +909,7 @@ implements 
 
 useMetaReplicas
-privateboolean useMetaReplicas
+privateboolean useMetaReplicas
 
 
 
@@ -915,7 +918,7 @@ implements 
 
 metaReplicaCallTimeoutScanInMicroSecond
-private finalint metaReplicaCallTimeoutScanInMicroSecond
+private finalint metaReplicaCallTimeoutScanInMicroSecond
 
 
 
@@ -924,7 +927,7 @@ implements 
 
 numTries
-private finalint numTries
+private finalint numTries
 
 
 
@@ -933,7 +936,7 @@ implements 
 
 rpcTimeout
-finalint rpcTimeout
+finalint rpcTimeout
 
 
 
@@ -942,7 +945,7 @@ implements 
 
 nonceGenerator
-private static volatileNonceGenerator nonceGenerator
+private static volatileNonceGenerator nonceGenerator
 Global nonceGenerator shared per client.Currently there's 
no reason to limit its scope.
  Once it's set under nonceGeneratorCreateLock, it is never unset or 
changed.
 
@@ -953,7 +956,7 @@ implements 
 
 nonceGeneratorCreateLock
-private static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object nonceGeneratorCreateLock
+private static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object nonceGeneratorCreateLock
 The nonce generator lock. Only taken when creating 
Connection, which gets a private copy.
 
 
@@ -963,7 +966,7 @@ implements 
 
 asyncProcess
-private finalAsyncProcess asyncProcess
+private finalAsyncProcess asyncProcess
 
 
 
@@ -972,7 +975,7 @@ implements 
 
 stats
-private finalServerStatisticTracker stats
+private finalServerStatisticTracker stats
 
 
 
@@ -981,7 +984,7 @@ implements 
 
 closed
-private volatileboolean closed
+private volatileboolean closed
 
 
 
@@ -990,7 +993,7 @@ implements 
 
 aborted
-private volatileboolean aborted
+private volatileboolean aborted
 
 
 
@@ -999,7 +1002,7 @@ implements 
 
 clusterStatusListener
-ClusterStatusListener clusterStatusListener
+ClusterStatusListener clusterStatusListener
 
 
 
@@ -1008,7 +1011,7 @@ implements 
 
 metaRegionLock
-private finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object metaRegionLock
+private 

[05/17] hbase-site git commit: Published site at adc0e85e8532870fa83cb21a44061c83ae77ec34.

2018-04-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ee65067/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.html
index 98a45a0..4f02ded 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.html
@@ -66,1947 +66,1940 @@
 058import 
org.apache.hadoop.hbase.TableNotEnabledException;
 059import 
org.apache.hadoop.hbase.TableNotFoundException;
 060import 
org.apache.hadoop.hbase.ZooKeeperConnectionException;
-061import 
org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
-062import 
org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicyFactory;
-063import 
org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil;
-064import 
org.apache.hadoop.hbase.exceptions.RegionMovedException;
-065import 
org.apache.hadoop.hbase.ipc.RpcClient;
-066import 
org.apache.hadoop.hbase.ipc.RpcClientFactory;
-067import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-068import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-069import 
org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
-070import 
org.apache.hadoop.hbase.security.User;
-071import 
org.apache.hadoop.hbase.util.Bytes;
-072import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-073import 
org.apache.hadoop.hbase.util.ExceptionUtil;
-074import 
org.apache.hadoop.hbase.util.Pair;
-075import 
org.apache.hadoop.hbase.util.ReflectionUtils;
-076import 
org.apache.hadoop.hbase.util.Threads;
-077import 
org.apache.hadoop.ipc.RemoteException;
-078import 
org.apache.yetus.audience.InterfaceAudience;
-079import 
org.apache.zookeeper.KeeperException;
-080import org.slf4j.Logger;
-081import org.slf4j.LoggerFactory;
-082
-083import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-084import 
org.apache.hbase.thirdparty.com.google.common.base.Throwables;
-085import 
org.apache.hbase.thirdparty.com.google.protobuf.BlockingRpcChannel;
-086import 
org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
-087import 
org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService.BlockingInterface;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersRequest;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersResponse;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersRequest;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersResponse;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeRequest;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeResponse;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerRequest;
-105import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerResponse;
-106import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest;
-107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse;
-108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest;
-109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningResponse;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesRequest;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesRequest;
-113import 

[10/17] hbase-site git commit: Published site at adc0e85e8532870fa83cb21a44061c83ae77ec34.

2018-04-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ee65067/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.TableCache.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.TableCache.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.TableCache.html
index 63b8c53..88e70e2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.TableCache.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.TableCache.html
@@ -25,460 +25,470 @@
 017 */
 018package org.apache.hadoop.hbase.client;
 019
-020import static 
org.apache.hadoop.hbase.HConstants.CATALOG_FAMILY;
-021import static 
org.apache.hadoop.hbase.HConstants.NINES;
-022import static 
org.apache.hadoop.hbase.HConstants.ZEROES;
-023import static 
org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
-024import static 
org.apache.hadoop.hbase.client.ConnectionUtils.createClosestRowAfter;
-025import static 
org.apache.hadoop.hbase.client.ConnectionUtils.isEmptyStopRow;
-026import static 
org.apache.hadoop.hbase.client.RegionInfo.createRegionName;
-027import static 
org.apache.hadoop.hbase.util.Bytes.BYTES_COMPARATOR;
-028import static 
org.apache.hadoop.hbase.util.CollectionUtils.computeIfAbsent;
-029
-030import java.io.IOException;
-031import java.util.Arrays;
-032import java.util.HashSet;
-033import java.util.Iterator;
-034import java.util.LinkedHashMap;
-035import java.util.List;
-036import java.util.Map;
-037import java.util.Optional;
-038import java.util.Set;
-039import 
java.util.concurrent.CompletableFuture;
-040import 
java.util.concurrent.ConcurrentHashMap;
-041import 
java.util.concurrent.ConcurrentMap;
-042import 
java.util.concurrent.ConcurrentNavigableMap;
-043import 
java.util.concurrent.ConcurrentSkipListMap;
-044
-045import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-046import 
org.apache.hadoop.hbase.HRegionLocation;
-047import 
org.apache.hadoop.hbase.MetaTableAccessor;
-048import 
org.apache.hadoop.hbase.RegionLocations;
-049import 
org.apache.hadoop.hbase.TableName;
-050import 
org.apache.hadoop.hbase.TableNotFoundException;
-051import 
org.apache.hadoop.hbase.util.Bytes;
-052import 
org.apache.yetus.audience.InterfaceAudience;
-053import org.slf4j.Logger;
-054import org.slf4j.LoggerFactory;
-055
-056/**
-057 * The asynchronous locator for regions 
other than meta.
-058 */
-059@InterfaceAudience.Private
-060class AsyncNonMetaRegionLocator {
-061
-062  private static final Logger LOG = 
LoggerFactory.getLogger(AsyncNonMetaRegionLocator.class);
-063
-064  static final String 
MAX_CONCURRENT_LOCATE_REQUEST_PER_TABLE =
-065
"hbase.client.meta.max.concurrent.locate.per.table";
-066
-067  private static final int 
DEFAULT_MAX_CONCURRENT_LOCATE_REQUEST_PER_TABLE = 8;
-068
-069  private final AsyncConnectionImpl 
conn;
-070
-071  private final int 
maxConcurrentLocateRequestPerTable;
-072
-073  private final 
ConcurrentMapTableName, TableCache cache = new 
ConcurrentHashMap();
-074
-075  private static final class 
LocateRequest {
-076
-077public final byte[] row;
-078
-079public final RegionLocateType 
locateType;
-080
-081public LocateRequest(byte[] row, 
RegionLocateType locateType) {
-082  this.row = row;
-083  this.locateType = locateType;
-084}
-085
-086@Override
-087public int hashCode() {
-088  return Bytes.hashCode(row) ^ 
locateType.hashCode();
-089}
-090
-091@Override
-092public boolean equals(Object obj) {
-093  if (obj == null || obj.getClass() 
!= LocateRequest.class) {
-094return false;
-095  }
-096  LocateRequest that = 
(LocateRequest) obj;
-097  return 
locateType.equals(that.locateType)  Bytes.equals(row, that.row);
-098}
-099  }
-100
-101  private static final class TableCache 
{
-102
-103public final 
ConcurrentNavigableMapbyte[], HRegionLocation cache =
-104  new 
ConcurrentSkipListMap(BYTES_COMPARATOR);
-105
-106public final SetLocateRequest 
pendingRequests = new HashSet();
-107
-108public final MapLocateRequest, 
CompletableFutureHRegionLocation allRequests =
-109  new LinkedHashMap();
-110
-111public boolean hasQuota(int max) {
-112  return pendingRequests.size()  
max;
-113}
-114
-115public boolean 
isPending(LocateRequest req) {
-116  return 
pendingRequests.contains(req);
-117}
-118
-119public void send(LocateRequest req) 
{
-120  pendingRequests.add(req);
-121}
-122
-123public OptionalLocateRequest 
getCandidate() {
-124  return 
allRequests.keySet().stream().filter(r - !isPending(r)).findFirst();
-125}
-126
-127public void 
clearCompletedRequests(OptionalHRegionLocation location) {
-128  for 
(IteratorMap.EntryLocateRequest, 
CompletableFutureHRegionLocation iter =
-129
allRequests.entrySet().iterator(); 

[06/17] hbase-site git commit: Published site at adc0e85e8532870fa83cb21a44061c83ae77ec34.

2018-04-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ee65067/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
index 98a45a0..4f02ded 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
@@ -66,1947 +66,1940 @@
 058import 
org.apache.hadoop.hbase.TableNotEnabledException;
 059import 
org.apache.hadoop.hbase.TableNotFoundException;
 060import 
org.apache.hadoop.hbase.ZooKeeperConnectionException;
-061import 
org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
-062import 
org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicyFactory;
-063import 
org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil;
-064import 
org.apache.hadoop.hbase.exceptions.RegionMovedException;
-065import 
org.apache.hadoop.hbase.ipc.RpcClient;
-066import 
org.apache.hadoop.hbase.ipc.RpcClientFactory;
-067import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-068import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-069import 
org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
-070import 
org.apache.hadoop.hbase.security.User;
-071import 
org.apache.hadoop.hbase.util.Bytes;
-072import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-073import 
org.apache.hadoop.hbase.util.ExceptionUtil;
-074import 
org.apache.hadoop.hbase.util.Pair;
-075import 
org.apache.hadoop.hbase.util.ReflectionUtils;
-076import 
org.apache.hadoop.hbase.util.Threads;
-077import 
org.apache.hadoop.ipc.RemoteException;
-078import 
org.apache.yetus.audience.InterfaceAudience;
-079import 
org.apache.zookeeper.KeeperException;
-080import org.slf4j.Logger;
-081import org.slf4j.LoggerFactory;
-082
-083import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-084import 
org.apache.hbase.thirdparty.com.google.common.base.Throwables;
-085import 
org.apache.hbase.thirdparty.com.google.protobuf.BlockingRpcChannel;
-086import 
org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
-087import 
org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService.BlockingInterface;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersRequest;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersResponse;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersRequest;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersResponse;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeRequest;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeResponse;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerRequest;
-105import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerResponse;
-106import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest;
-107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse;
-108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest;
-109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningResponse;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesRequest;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse;
-112import 

[17/17] hbase-site git commit: Published site at adc0e85e8532870fa83cb21a44061c83ae77ec34.

2018-04-07 Thread git-site-role
Published site at adc0e85e8532870fa83cb21a44061c83ae77ec34.


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/1ee65067
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/1ee65067
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/1ee65067

Branch: refs/heads/asf-site
Commit: 1ee650677527dfb0c57571c3599b1568e4528f18
Parents: 40396e2
Author: jenkins 
Authored: Sat Apr 7 14:47:55 2018 +
Committer: jenkins 
Committed: Sat Apr 7 14:47:55 2018 +

--
 acid-semantics.html | 4 +-
 apache_hbase_reference_guide.pdf| 42977 +
 book.html   |   203 +-
 bulk-loads.html | 4 +-
 checkstyle-aggregate.html   |88 +-
 checkstyle.rss  |20 +-
 coc.html| 4 +-
 dependencies.html   | 4 +-
 dependency-convergence.html | 4 +-
 dependency-info.html| 4 +-
 dependency-management.html  | 4 +-
 devapidocs/constant-values.html | 6 +-
 devapidocs/index-all.html   | 9 +-
 .../hadoop/hbase/ClusterMetrics.Option.html | 4 +-
 .../apache/hadoop/hbase/CompareOperator.html| 4 +-
 .../hadoop/hbase/class-use/RegionLocations.html | 5 +-
 .../hadoop/hbase/class-use/TableName.html   |15 +-
 ...AsyncNonMetaRegionLocator.LocateRequest.html |12 +-
 .../AsyncNonMetaRegionLocator.TableCache.html   |22 +-
 .../hbase/client/AsyncNonMetaRegionLocator.html |64 +-
 .../apache/hadoop/hbase/client/CompactType.html | 4 +-
 .../hadoop/hbase/client/CompactionState.html| 4 +-
 ...ectionImplementation.MasterServiceState.html |18 +-
 ...onImplementation.MasterServiceStubMaker.html |10 +-
 ...ntation.ServerErrorTracker.ServerErrors.html |10 +-
 ...ectionImplementation.ServerErrorTracker.html |20 +-
 .../hbase/client/ConnectionImplementation.html  |   239 +-
 .../apache/hadoop/hbase/client/Durability.html  | 4 +-
 .../hadoop/hbase/client/Scan.ReadType.html  | 4 +-
 .../hadoop/hbase/client/SnapshotType.html   | 4 +-
 ...AsyncNonMetaRegionLocator.LocateRequest.html |10 +-
 .../hadoop/hbase/client/class-use/Result.html   |34 +-
 .../hbase/filter/CompareFilter.CompareOp.html   | 4 +-
 .../hadoop/hbase/filter/Filter.ReturnCode.html  | 4 +-
 .../security/access/Permission.Action.html  | 4 +-
 .../org/apache/hadoop/hbase/Version.html| 6 +-
 ...AsyncNonMetaRegionLocator.LocateRequest.html |   888 +-
 .../AsyncNonMetaRegionLocator.TableCache.html   |   888 +-
 .../hbase/client/AsyncNonMetaRegionLocator.html |   888 +-
 ...ectionImplementation.MasterServiceState.html |  3559 +-
 ...onImplementation.MasterServiceStubMaker.html |  3559 +-
 ...ntation.ServerErrorTracker.ServerErrors.html |  3559 +-
 ...ectionImplementation.ServerErrorTracker.html |  3559 +-
 .../hbase/client/ConnectionImplementation.html  |  3559 +-
 export_control.html | 4 +-
 index.html  | 4 +-
 integration.html| 4 +-
 issue-tracking.html | 4 +-
 license.html| 4 +-
 mail-lists.html | 4 +-
 metrics.html| 4 +-
 old_news.html   | 4 +-
 plugin-management.html  | 4 +-
 plugins.html| 4 +-
 poweredbyhbase.html | 4 +-
 project-info.html   | 4 +-
 project-reports.html| 4 +-
 project-summary.html| 4 +-
 pseudo-distributed.html | 4 +-
 replication.html| 4 +-
 resources.html  | 4 +-
 source-repository.html  | 4 +-
 sponsors.html   | 4 +-
 supportingprojects.html | 4 +-
 team-list.html  | 4 +-
 testdevapidocs/allclasses-frame.html| 1 +
 testdevapidocs/allclasses-noframe.html  | 1 +
 testdevapidocs/index-all.html   |22 +-
 .../org/apache/hadoop/hbase/TestSize.html   | 4 +-
 .../org/apache/hadoop/hbase/TestSplitMerge.html |   362 +
 .../hbase/TestStochasticBalancerJmxMetrics.html | 4 +-
 .../hadoop/hbase/backup/package-tree.html   | 2 +-
 

[04/17] hbase-site git commit: Published site at adc0e85e8532870fa83cb21a44061c83ae77ec34.

2018-04-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ee65067/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
index 98a45a0..4f02ded 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
@@ -66,1947 +66,1940 @@
 058import 
org.apache.hadoop.hbase.TableNotEnabledException;
 059import 
org.apache.hadoop.hbase.TableNotFoundException;
 060import 
org.apache.hadoop.hbase.ZooKeeperConnectionException;
-061import 
org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
-062import 
org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicyFactory;
-063import 
org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil;
-064import 
org.apache.hadoop.hbase.exceptions.RegionMovedException;
-065import 
org.apache.hadoop.hbase.ipc.RpcClient;
-066import 
org.apache.hadoop.hbase.ipc.RpcClientFactory;
-067import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-068import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-069import 
org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
-070import 
org.apache.hadoop.hbase.security.User;
-071import 
org.apache.hadoop.hbase.util.Bytes;
-072import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-073import 
org.apache.hadoop.hbase.util.ExceptionUtil;
-074import 
org.apache.hadoop.hbase.util.Pair;
-075import 
org.apache.hadoop.hbase.util.ReflectionUtils;
-076import 
org.apache.hadoop.hbase.util.Threads;
-077import 
org.apache.hadoop.ipc.RemoteException;
-078import 
org.apache.yetus.audience.InterfaceAudience;
-079import 
org.apache.zookeeper.KeeperException;
-080import org.slf4j.Logger;
-081import org.slf4j.LoggerFactory;
-082
-083import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-084import 
org.apache.hbase.thirdparty.com.google.common.base.Throwables;
-085import 
org.apache.hbase.thirdparty.com.google.protobuf.BlockingRpcChannel;
-086import 
org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
-087import 
org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService.BlockingInterface;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersRequest;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersResponse;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersRequest;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersResponse;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeRequest;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeResponse;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerRequest;
-105import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerResponse;
-106import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest;
-107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse;
-108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest;
-109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningResponse;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesRequest;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesRequest;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse;
-114import 

[09/17] hbase-site git commit: Published site at adc0e85e8532870fa83cb21a44061c83ae77ec34.

2018-04-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ee65067/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.html
index 63b8c53..88e70e2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.html
@@ -25,460 +25,470 @@
 017 */
 018package org.apache.hadoop.hbase.client;
 019
-020import static 
org.apache.hadoop.hbase.HConstants.CATALOG_FAMILY;
-021import static 
org.apache.hadoop.hbase.HConstants.NINES;
-022import static 
org.apache.hadoop.hbase.HConstants.ZEROES;
-023import static 
org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
-024import static 
org.apache.hadoop.hbase.client.ConnectionUtils.createClosestRowAfter;
-025import static 
org.apache.hadoop.hbase.client.ConnectionUtils.isEmptyStopRow;
-026import static 
org.apache.hadoop.hbase.client.RegionInfo.createRegionName;
-027import static 
org.apache.hadoop.hbase.util.Bytes.BYTES_COMPARATOR;
-028import static 
org.apache.hadoop.hbase.util.CollectionUtils.computeIfAbsent;
-029
-030import java.io.IOException;
-031import java.util.Arrays;
-032import java.util.HashSet;
-033import java.util.Iterator;
-034import java.util.LinkedHashMap;
-035import java.util.List;
-036import java.util.Map;
-037import java.util.Optional;
-038import java.util.Set;
-039import 
java.util.concurrent.CompletableFuture;
-040import 
java.util.concurrent.ConcurrentHashMap;
-041import 
java.util.concurrent.ConcurrentMap;
-042import 
java.util.concurrent.ConcurrentNavigableMap;
-043import 
java.util.concurrent.ConcurrentSkipListMap;
-044
-045import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-046import 
org.apache.hadoop.hbase.HRegionLocation;
-047import 
org.apache.hadoop.hbase.MetaTableAccessor;
-048import 
org.apache.hadoop.hbase.RegionLocations;
-049import 
org.apache.hadoop.hbase.TableName;
-050import 
org.apache.hadoop.hbase.TableNotFoundException;
-051import 
org.apache.hadoop.hbase.util.Bytes;
-052import 
org.apache.yetus.audience.InterfaceAudience;
-053import org.slf4j.Logger;
-054import org.slf4j.LoggerFactory;
-055
-056/**
-057 * The asynchronous locator for regions 
other than meta.
-058 */
-059@InterfaceAudience.Private
-060class AsyncNonMetaRegionLocator {
-061
-062  private static final Logger LOG = 
LoggerFactory.getLogger(AsyncNonMetaRegionLocator.class);
-063
-064  static final String 
MAX_CONCURRENT_LOCATE_REQUEST_PER_TABLE =
-065
"hbase.client.meta.max.concurrent.locate.per.table";
-066
-067  private static final int 
DEFAULT_MAX_CONCURRENT_LOCATE_REQUEST_PER_TABLE = 8;
-068
-069  private final AsyncConnectionImpl 
conn;
-070
-071  private final int 
maxConcurrentLocateRequestPerTable;
-072
-073  private final 
ConcurrentMapTableName, TableCache cache = new 
ConcurrentHashMap();
-074
-075  private static final class 
LocateRequest {
-076
-077public final byte[] row;
-078
-079public final RegionLocateType 
locateType;
-080
-081public LocateRequest(byte[] row, 
RegionLocateType locateType) {
-082  this.row = row;
-083  this.locateType = locateType;
-084}
-085
-086@Override
-087public int hashCode() {
-088  return Bytes.hashCode(row) ^ 
locateType.hashCode();
-089}
-090
-091@Override
-092public boolean equals(Object obj) {
-093  if (obj == null || obj.getClass() 
!= LocateRequest.class) {
-094return false;
-095  }
-096  LocateRequest that = 
(LocateRequest) obj;
-097  return 
locateType.equals(that.locateType)  Bytes.equals(row, that.row);
-098}
-099  }
-100
-101  private static final class TableCache 
{
-102
-103public final 
ConcurrentNavigableMapbyte[], HRegionLocation cache =
-104  new 
ConcurrentSkipListMap(BYTES_COMPARATOR);
-105
-106public final SetLocateRequest 
pendingRequests = new HashSet();
-107
-108public final MapLocateRequest, 
CompletableFutureHRegionLocation allRequests =
-109  new LinkedHashMap();
-110
-111public boolean hasQuota(int max) {
-112  return pendingRequests.size()  
max;
-113}
-114
-115public boolean 
isPending(LocateRequest req) {
-116  return 
pendingRequests.contains(req);
-117}
-118
-119public void send(LocateRequest req) 
{
-120  pendingRequests.add(req);
-121}
-122
-123public OptionalLocateRequest 
getCandidate() {
-124  return 
allRequests.keySet().stream().filter(r - !isPending(r)).findFirst();
-125}
-126
-127public void 
clearCompletedRequests(OptionalHRegionLocation location) {
-128  for 
(IteratorMap.EntryLocateRequest, 
CompletableFutureHRegionLocation iter =
-129
allRequests.entrySet().iterator(); iter.hasNext();) {
-130Map.EntryLocateRequest, 

[02/17] hbase-site git commit: Published site at adc0e85e8532870fa83cb21a44061c83ae77ec34.

2018-04-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ee65067/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseClassTestRule.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseClassTestRule.html 
b/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseClassTestRule.html
index 5bd1dcc..2edbeb0 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseClassTestRule.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseClassTestRule.html
@@ -596,126 +596,130 @@
 
 
 static HBaseClassTestRule
-TestNamespace.CLASS_RULE
+TestSplitMerge.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestMetaTableAccessor.CLASS_RULE
+TestNamespace.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestServerLoad.CLASS_RULE
+TestMetaTableAccessor.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestInfoServers.CLASS_RULE
+TestServerLoad.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestClientClusterStatus.CLASS_RULE
+TestInfoServers.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestMetaTableLocator.CLASS_RULE
+TestClientClusterStatus.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestFSTableDescriptorForceCreation.CLASS_RULE
+TestMetaTableLocator.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestAcidGuaranteesWithNoInMemCompaction.CLASS_RULE
+TestFSTableDescriptorForceCreation.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestClusterBootOrder.CLASS_RULE
+TestAcidGuaranteesWithNoInMemCompaction.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestAcidGuaranteesWithAdaptivePolicy.CLASS_RULE
+TestClusterBootOrder.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestMetaTableAccessorNoCluster.CLASS_RULE
+TestAcidGuaranteesWithAdaptivePolicy.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestSerialization.CLASS_RULE
+TestMetaTableAccessorNoCluster.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestServerMetrics.CLASS_RULE
+TestSerialization.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestTagRewriteCell.CLASS_RULE
+TestServerMetrics.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestHBaseOnOtherDfsCluster.CLASS_RULE
+TestTagRewriteCell.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestGlobalMemStoreSize.CLASS_RULE
+TestHBaseOnOtherDfsCluster.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestHBaseTestingUtility.CLASS_RULE
+TestGlobalMemStoreSize.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestServerName.CLASS_RULE
+TestHBaseTestingUtility.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestJMXConnectorServer.CLASS_RULE
+TestServerName.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestMovedRegionsCleaner.CLASS_RULE
+TestJMXConnectorServer.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestSize.CLASS_RULE
+TestMovedRegionsCleaner.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestHRegionLocation.CLASS_RULE
+TestSize.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestMultiVersions.CLASS_RULE
+TestHRegionLocation.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestClusterPortAssignment.CLASS_RULE
+TestMultiVersions.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestHColumnDescriptorDefaultVersions.CLASS_RULE
+TestClusterPortAssignment.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestLocalHBaseCluster.CLASS_RULE
+TestHColumnDescriptorDefaultVersions.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestZooKeeper.CLASS_RULE
+TestLocalHBaseCluster.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestNodeHealthCheckChore.CLASS_RULE
+TestZooKeeper.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestRegionRebalancing.CLASS_RULE
+TestNodeHealthCheckChore.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestPerformanceEvaluation.CLASS_RULE
+TestRegionRebalancing.CLASS_RULE
 
 
 static HBaseClassTestRule
+TestPerformanceEvaluation.CLASS_RULE
+
+
+static HBaseClassTestRule
 TestIntegrationTestBase.CLASS_RULE
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ee65067/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseTestingUtility.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseTestingUtility.html 
b/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseTestingUtility.html
index eff8609..be083b0 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseTestingUtility.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseTestingUtility.html
@@ -429,49 +429,53 @@
 
 
 private static HBaseTestingUtility
-TestMetaTableAccessor.UTIL
+TestSplitMerge.UTIL
 
 
 private static HBaseTestingUtility
-TestInfoServers.UTIL
+TestMetaTableAccessor.UTIL
 
 
 private static HBaseTestingUtility
-TestClientClusterStatus.UTIL
+TestInfoServers.UTIL
 
 
 private static HBaseTestingUtility
-TestMetaTableLocator.UTIL
+TestClientClusterStatus.UTIL
 
 
 private static HBaseTestingUtility
-TestFSTableDescriptorForceCreation.UTIL
+TestMetaTableLocator.UTIL
 
 
 private static HBaseTestingUtility
-TestMetaTableAccessorNoCluster.UTIL
+TestFSTableDescriptorForceCreation.UTIL
 
 
 private static HBaseTestingUtility
-TestJMXConnectorServer.UTIL

[16/17] hbase-site git commit: Published site at adc0e85e8532870fa83cb21a44061c83ae77ec34.

2018-04-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ee65067/apache_hbase_reference_guide.pdf
--
diff --git a/apache_hbase_reference_guide.pdf b/apache_hbase_reference_guide.pdf
index 027a235..5cc953e 100644
--- a/apache_hbase_reference_guide.pdf
+++ b/apache_hbase_reference_guide.pdf
@@ -5,16 +5,16 @@
 /Author (Apache HBase Team)
 /Creator (Asciidoctor PDF 1.5.0.alpha.15, based on Prawn 2.2.2)
 /Producer (Apache HBase Team)
-/ModDate (D:20180406144614+00'00')
-/CreationDate (D:20180406144614+00'00')
+/ModDate (D:20180407144604+00'00')
+/CreationDate (D:20180407144604+00'00')
 >>
 endobj
 2 0 obj
 << /Type /Catalog
 /Pages 3 0 R
 /Names 26 0 R
-/Outlines 4577 0 R
-/PageLabels 4803 0 R
+/Outlines 4588 0 R
+/PageLabels 4814 0 R
 /PageMode /UseOutlines
 /OpenAction [7 0 R /FitH 842.89]
 /ViewerPreferences << /DisplayDocTitle true
@@ -23,8 +23,8 @@ endobj
 endobj
 3 0 obj
 << /Type /Pages
-/Count 713
-/Kids [7 0 R 12 0 R 14 0 R 16 0 R 18 0 R 20 0 R 22 0 R 24 0 R 44 0 R 47 0 R 50 
0 R 54 0 R 61 0 R 65 0 R 67 0 R 69 0 R 76 0 R 79 0 R 81 0 R 87 0 R 90 0 R 92 0 
R 94 0 R 101 0 R 107 0 R 112 0 R 114 0 R 130 0 R 135 0 R 142 0 R 151 0 R 159 0 
R 168 0 R 179 0 R 183 0 R 185 0 R 189 0 R 198 0 R 207 0 R 215 0 R 224 0 R 229 0 
R 238 0 R 246 0 R 255 0 R 268 0 R 275 0 R 285 0 R 293 0 R 301 0 R 308 0 R 316 0 
R 322 0 R 328 0 R 335 0 R 343 0 R 354 0 R 363 0 R 375 0 R 383 0 R 391 0 R 398 0 
R 407 0 R 415 0 R 425 0 R 433 0 R 440 0 R 449 0 R 461 0 R 470 0 R 477 0 R 485 0 
R 493 0 R 502 0 R 509 0 R 514 0 R 518 0 R 523 0 R 527 0 R 543 0 R 554 0 R 558 0 
R 573 0 R 578 0 R 583 0 R 585 0 R 587 0 R 590 0 R 592 0 R 594 0 R 602 0 R 608 0 
R 613 0 R 618 0 R 625 0 R 635 0 R 643 0 R 647 0 R 651 0 R 653 0 R 663 0 R 677 0 
R 685 0 R 692 0 R 703 0 R 711 0 R 728 0 R 742 0 R 747 0 R 753 0 R 756 0 R 760 0 
R 764 0 R 767 0 R 770 0 R 772 0 R 775 0 R 780 0 R 782 0 R 787 0 R 791 0 R 796 0 
R 800 0 R 803 0 R 809 0 R 811 0 R 816 0 
 R 824 0 R 826 0 R 829 0 R 832 0 R 835 0 R 838 0 R 853 0 R 860 0 R 869 0 R 880 
0 R 886 0 R 896 0 R 907 0 R 910 0 R 914 0 R 917 0 R 922 0 R 931 0 R 939 0 R 943 
0 R 947 0 R 952 0 R 956 0 R 958 0 R 973 0 R 984 0 R 989 0 R 996 0 R 999 0 R 
1007 0 R 1015 0 R 1020 0 R 1025 0 R 1030 0 R 1032 0 R 1034 0 R 1036 0 R 1046 0 
R 1054 0 R 1058 0 R 1065 0 R 1072 0 R 1080 0 R 1084 0 R 1090 0 R 1095 0 R 1103 
0 R 1107 0 R 1112 0 R 1114 0 R 1121 0 R 1129 0 R 1134 0 R 1141 0 R 1151 0 R 
1155 0 R 1157 0 R 1159 0 R 1163 0 R 1166 0 R 1171 0 R 1174 0 R 1186 0 R 1190 0 
R 1196 0 R 1205 0 R 1210 0 R 1214 0 R 1218 0 R 1220 0 R 1223 0 R 1226 0 R 1229 
0 R 1233 0 R 1237 0 R 1241 0 R 1246 0 R 1250 0 R 1253 0 R 1255 0 R 1265 0 R 
1268 0 R 1276 0 R 1285 0 R 1291 0 R 1295 0 R 1297 0 R 1309 0 R 1312 0 R 1318 0 
R 1326 0 R 1329 0 R 1336 0 R 1344 0 R 1346 0 R 1348 0 R 1357 0 R 1359 0 R 1361 
0 R 1364 0 R 1366 0 R 1368 0 R 1370 0 R 1372 0 R 1375 0 R 1379 0 R 1384 0 R 
1386 0 R 1388 0 R 1390 0 R 1395 0 R 1402 0 R 1408 0 R 1411 0 
 R 1413 0 R 1416 0 R 1420 0 R 1424 0 R 1427 0 R 1429 0 R 1431 0 R 1434 0 R 1439 
0 R 1445 0 R 1453 0 R 1467 0 R 1481 0 R 1484 0 R 1489 0 R 1502 0 R 1507 0 R 
1522 0 R 1530 0 R 1534 0 R 1543 0 R 1558 0 R 1572 0 R 1584 0 R 1589 0 R 1595 0 
R 1605 0 R 1610 0 R 1615 0 R 1623 0 R 1626 0 R 1635 0 R 1641 0 R 1646 0 R 1658 
0 R 1663 0 R 1669 0 R 1671 0 R 1677 0 R 1685 0 R 1693 0 R 1697 0 R 1699 0 R 
1701 0 R 1713 0 R 1719 0 R 1728 0 R 1734 0 R 1748 0 R 1753 0 R 1762 0 R 1770 0 
R 1776 0 R 1781 0 R 1786 0 R 1789 0 R 1792 0 R 1797 0 R 1802 0 R 1809 0 R 1813 
0 R 1818 0 R 1827 0 R 1832 0 R 1837 0 R 1839 0 R 1848 0 R 1855 0 R 1861 0 R 
1866 0 R 1870 0 R 1873 0 R 1878 0 R 1883 0 R 1892 0 R 1894 0 R 1896 0 R 1899 0 
R 1908 0 R 1911 0 R 1918 0 R 1926 0 R 1931 0 R 1934 0 R 1939 0 R 1941 0 R 1944 
0 R 1949 0 R 1952 0 R 1954 0 R 1957 0 R 1960 0 R 1963 0 R 1973 0 R 1978 0 R 
1983 0 R 1985 0 R 1993 0 R 2000 0 R 2007 0 R 2013 0 R 2018 0 R 2020 0 R 2029 0 
R 2039 0 R 2049 0 R 2055 0 R 2062 0 R 2064 0 R 2069 0 R 2071 
 0 R 2073 0 R 2077 0 R 2080 0 R 2083 0 R 2088 0 R 2092 0 R 2103 0 R 2106 0 R 
2111 0 R 2114 0 R 2116 0 R 2121 0 R 2131 0 R 2133 0 R 2135 0 R 2137 0 R 2139 0 
R 2142 0 R 2144 0 R 2146 0 R 2149 0 R 2151 0 R 2153 0 R 2158 0 R 2163 0 R 2172 
0 R 2174 0 R 2176 0 R 2182 0 R 2184 0 R 2189 0 R 2191 0 R 2193 0 R 2200 0 R 
2205 0 R 2209 0 R 2214 0 R 2218 0 R 2220 0 R  0 R 2226 0 R 2229 0 R 2231 0 
R 2233 0 R 2237 0 R 2239 0 R 2242 0 R 2244 0 R 2246 0 R 2248 0 R 2255 0 R 2258 
0 R 2263 0 R 2265 0 R 2267 0 R 2269 0 R 2271 0 R 2279 0 R 2290 0 R 2304 0 R 
2315 0 R 2320 0 R 2325 0 R 2329 0 R 2332 0 R 2337 0 R 2342 0 R 2344 0 R 2347 0 
R 2349 0 R 2351 0 R 2353 0 R 2358 0 R 2360 0 R 2373 0 R 2376 0 R 2384 0 R 2390 
0 R 2402 0 R 2416 0 R 2429 0 R 2446 0 R 2450 0 R 2452 0 R 2456 0 R 2474 0 R 
2480 0 R 2492 0 R 2496 0 R 2500 0 R 2509 0 R 2519 0 R 2524 0 R 2536 0 R 2549 0 
R 2567 0 R 2576 0 R 2579 0 R 2588 0 R 2606 0 R 2613 0 R 2616 0 R 2621 0 R 2625 
0 R 2628 0 R 2637 0 R 2645 0 R 2649 

[01/17] hbase-site git commit: Published site at adc0e85e8532870fa83cb21a44061c83ae77ec34.

2018-04-07 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 40396e2de -> 1ee650677


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ee65067/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.CountingRegionObserver.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.CountingRegionObserver.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.CountingRegionObserver.html
index 37f3bd1..04be717 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.CountingRegionObserver.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.CountingRegionObserver.html
@@ -95,9 +95,9 @@
 087}
 088
 089@Override
-090public void 
preScannerOpen(ObserverContextRegionCoprocessorEnvironment e, Scan 
scan)
-091throws IOException {
-092  if 
(e.getEnvironment().getRegionInfo().isMetaRegion()) {
+090public boolean 
preScannerNext(ObserverContextRegionCoprocessorEnvironment c,
+091InternalScanner s, 
ListResult result, int limit, boolean hasNext) throws IOException {
+092  if 
(c.getEnvironment().getRegionInfo().isMetaRegion()) {
 093int concurrency = 
CONCURRENCY.incrementAndGet();
 094for (;;) {
 095  int max = 
MAX_CONCURRENCY.get();
@@ -110,68 +110,71 @@
 102}
 103
Threads.sleepWithoutInterrupt(10);
 104  }
-105}
-106
-107@Override
-108public void 
postScannerClose(ObserverContextRegionCoprocessorEnvironment e, 
InternalScanner s)
-109throws IOException {
-110  if 
(e.getEnvironment().getRegionInfo().isMetaRegion()) {
-111CONCURRENCY.decrementAndGet();
-112  }
-113}
-114  }
-115
-116  @BeforeClass
-117  public static void setUp() throws 
Exception {
-118Configuration conf = 
TEST_UTIL.getConfiguration();
-119conf.set(REGION_COPROCESSOR_CONF_KEY, 
CountingRegionObserver.class.getName());
-120
conf.setInt(MAX_CONCURRENT_LOCATE_REQUEST_PER_TABLE, MAX_ALLOWED);
-121TEST_UTIL.startMiniCluster(3);
-122
TEST_UTIL.getAdmin().setBalancerRunning(false, true);
-123AsyncRegistry registry = 
AsyncRegistryFactory.getRegistry(TEST_UTIL.getConfiguration());
-124CONN = new 
AsyncConnectionImpl(TEST_UTIL.getConfiguration(), registry,
-125registry.getClusterId().get(), 
User.getCurrent());
-126LOCATOR = new 
AsyncNonMetaRegionLocator(CONN);
-127SPLIT_KEYS = IntStream.range(1, 
256).mapToObj(i - Bytes.toBytes(String.format("%02x", i)))
-128.toArray(byte[][]::new);
-129TEST_UTIL.createTable(TABLE_NAME, 
FAMILY, SPLIT_KEYS);
-130
TEST_UTIL.waitTableAvailable(TABLE_NAME);
-131  }
-132
-133  @AfterClass
-134  public static void tearDown() throws 
Exception {
-135IOUtils.closeQuietly(CONN);
-136TEST_UTIL.shutdownMiniCluster();
-137  }
-138
-139  private void 
assertLocs(ListCompletableFutureHRegionLocation futures)
-140  throws InterruptedException, 
ExecutionException {
-141assertEquals(256, futures.size());
-142for (int i = 0; i  
futures.size(); i++) {
-143  HRegionLocation loc = 
futures.get(i).get();
-144  if (i == 0) {
-145
assertTrue(isEmptyStartRow(loc.getRegionInfo().getStartKey()));
-146  } else {
-147
assertEquals(String.format("%02x", i), 
Bytes.toString(loc.getRegionInfo().getStartKey()));
-148  }
-149  if (i == futures.size() - 1) {
-150
assertTrue(isEmptyStopRow(loc.getRegionInfo().getEndKey()));
-151  } else {
-152
assertEquals(String.format("%02x", i + 1), 
Bytes.toString(loc.getRegionInfo().getEndKey()));
-153  }
-154}
-155  }
-156
-157  @Test
-158  public void test() throws 
InterruptedException, ExecutionException {
-159
ListCompletableFutureHRegionLocation futures =
-160IntStream.range(0, 
256).mapToObj(i - Bytes.toBytes(String.format("%02x", i)))
-161.map(r - 
LOCATOR.getRegionLocation(TABLE_NAME, r, RegionLocateType.CURRENT, false))
-162.collect(toList());
-163assertLocs(futures);
-164assertTrue(MAX_CONCURRENCY.get() 
= MAX_ALLOWED);
-165  }
-166}
+105  return hasNext;
+106}
+107
+108@Override
+109public boolean 
postScannerNext(ObserverContextRegionCoprocessorEnvironment c,
+110InternalScanner s, 
ListResult result, int limit, boolean hasNext) throws IOException {
+111  if 
(c.getEnvironment().getRegionInfo().isMetaRegion()) {
+112CONCURRENCY.decrementAndGet();
+113  }
+114  return hasNext;
+115}
+116  }
+117
+118  @BeforeClass
+119  public static void setUp() throws 
Exception {
+120Configuration conf = 
TEST_UTIL.getConfiguration();
+121conf.set(REGION_COPROCESSOR_CONF_KEY, 

[15/17] hbase-site git commit: Published site at adc0e85e8532870fa83cb21a44061c83ae77ec34.

2018-04-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ee65067/book.html
--
diff --git a/book.html b/book.html
index e68863b..dba23a1 100644
--- a/book.html
+++ b/book.html
@@ -517,7 +517,7 @@ In this case, you can set JAVA_HOME to the 
directory containing the
 
 
 Edit conf/hbase-site.xml, which is the main HBase configuration 
file.
-At this time, you only need to specify the directory on the local filesystem 
where HBase and ZooKeeper write data.
+At this time, you need to specify the directory on the local filesystem where 
HBase and ZooKeeper write data and acknowledge some risks.
 By default, a new directory is created under /tmp.
 Many servers are configured to delete the contents of /tmp upon 
reboot, so you should store the data elsewhere.
 The following configuration will store HBases data in the 
hbase directory, in the home directory of the user called 
testuser.
@@ -536,6 +536,21 @@ Paste the property tags beneath the 
configuration
 namehbase.zookeeper.property.dataDir/name
 value/home/testuser/zookeeper/value
   /property
+  property
+namehbase.unsafe.stream.capability.enforce/name
+valuefalse/value
+description
+  Controls whether HBase will check for stream capabilities (hflush/hsync).
+
+  Disable this if you intend to run on LocalFileSystem, denoted by a 
rootdir
+  with the 'file://' scheme, but be mindful of the NOTE below.
+
+  WARNING: Setting this to false blinds you to potential data loss and
+  inconsistent system state in the event of process and/or node failures. 
If
+  HBase is complaining of an inability to use hsync or hflush it's most
+  likely not a false positive.
+/description
+  /property
 /configuration
 
 
@@ -554,15 +569,27 @@ HBase will attempt to do a migration, which is not what 
you want.
 
 
 The hbase.rootdir in the above example points to a directory
-in the local filesystem. The 'file:/' prefix is how we denote local 
filesystem.
-To home HBase on an existing instance of HDFS, set the hbase.rootdir 
to point at a
-directory up on your instance: e.g. 
hdfs://namenode.example.org:8020/hbase.
-For more on this variant, see the section below on Standalone HBase over HDFS.
+in the local filesystem. The 'file://' prefix is how we denote local
+filesystem. You should take the WARNING present in the configuration example
+to heart. In standalone mode HBase makes use of the local filesystem 
abstraction
+from the Apache Hadoop project. That abstraction doesnt provide the 
durability
+promises that HBase needs to operate safely. This is fine for local development
+and testing use cases where the cost of cluster failure is well contained. It 
is
+not appropriate for production deployments; eventually you will lose data.
 
 
 
 
 
+
+
+
+To home HBase on an existing instance of HDFS, set the 
hbase.rootdir to point at a
+directory up on your instance: e.g. 
hdfs://namenode.example.org:8020/hbase.
+For more on this variant, see the section below on Standalone HBase over 
HDFS.
+
+
+
 
 The bin/start-hbase.sh script is provided as a convenient way to 
start HBase.
 Issue the command, and if all goes well, a message is logged to standard 
output showing that HBase started successfully.
@@ -631,7 +658,7 @@ You must specify the table name and the ColumnFamily 
name.
 
 List Information About your Table
 
-Use the list command to
+Use the list command to confirm your table exists
 
 
 
@@ -643,6 +670,23 @@ test
 = ["test"]
 
 
+
+Now use the describe command to see details, including 
configuration defaults
+
+
+
+hbase(main):003:0 describe 'test'
+Table test is ENABLED
+test
+COLUMN FAMILIES DESCRIPTION
+{NAME = 'cf', VERSIONS = '1', EVICT_BLOCKS_ON_CLOSE = 'false', 
NEW_VERSION_BEHAVIOR = 'false', KEEP_DELETED_CELLS = 'FALSE', 
CACHE_DATA_ON_WRITE =
+'false', DATA_BLOCK_ENCODING = 'NONE', TTL = 'FOREVER', MIN_VERSIONS 
= '0', REPLICATION_SCOPE = '0', BLOOMFILTER = 'ROW', 
CACHE_INDEX_ON_WRITE = 'f
+alse', IN_MEMORY = 'false', CACHE_BLOOMS_ON_WRITE = 'false', 
PREFETCH_BLOCKS_ON_OPEN = 'false', COMPRESSION = 'NONE', BLOCKCACHE 
= 'true', BLOCKSIZE
+ = '65536'}
+1 row(s)
+Took 0.9998 seconds
+
+
 
 
 Put data into your table.
@@ -826,7 +870,7 @@ First, add the following property which directs HBase to 
run in distributed mode
 
 
 Next, change the hbase.rootdir from the local filesystem to 
the address of your HDFS instance, using the hdfs: URI syntax.
-In this example, HDFS is running on the localhost at port 8020.
+In this example, HDFS is running on the localhost at port 8020. Be sure to 
either remove the entry for hbase.unsafe.stream.capability.enforce 
or set it to true.
 
 
 
@@ -6858,9 +6902,12 @@ Quitting...
 Special upgrading for Replication users from pre-HBase 
1.4
 User running versions of HBase prior to the 1.4.0 release that make use of 
replication should be sure to read the instructions in the section Replication peers TableCFs config.
 

[03/17] hbase-site git commit: Published site at adc0e85e8532870fa83cb21a44061c83ae77ec34.

2018-04-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ee65067/export_control.html
--
diff --git a/export_control.html b/export_control.html
index 8f81112..77b8415 100644
--- a/export_control.html
+++ b/export_control.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  
   Export Control
@@ -321,7 +321,7 @@ for more details.
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-04-06
+  Last Published: 
2018-04-07
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ee65067/index.html
--
diff --git a/index.html b/index.html
index 6e98f65..6b7c8d5 100644
--- a/index.html
+++ b/index.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Apache HBase™ Home
 
@@ -425,7 +425,7 @@ Apache HBase is an open-source, distributed, versioned, 
non-relational database
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-04-06
+  Last Published: 
2018-04-07
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ee65067/integration.html
--
diff --git a/integration.html b/integration.html
index df36939..57cc192 100644
--- a/integration.html
+++ b/integration.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  CI Management
 
@@ -281,7 +281,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-04-06
+  Last Published: 
2018-04-07
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ee65067/issue-tracking.html
--
diff --git a/issue-tracking.html b/issue-tracking.html
index a7e36aa..f31f5fc 100644
--- a/issue-tracking.html
+++ b/issue-tracking.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Issue Management
 
@@ -278,7 +278,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-04-06
+  Last Published: 
2018-04-07
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ee65067/license.html
--
diff --git a/license.html b/license.html
index 259efd9..e299966 100644
--- a/license.html
+++ b/license.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Licenses
 
@@ -481,7 +481,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-04-06
+  Last Published: 
2018-04-07
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ee65067/mail-lists.html
--
diff --git a/mail-lists.html b/mail-lists.html
index 0046b81..173dda8 100644
--- a/mail-lists.html
+++ b/mail-lists.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Mailing Lists
 
@@ -331,7 +331,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-04-06
+  Last Published: 
2018-04-07
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ee65067/metrics.html
--
diff --git a/metrics.html b/metrics.html
index 118e684..76ad903 100644
--- a/metrics.html
+++ b/metrics.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase   
   Apache HBase (TM) Metrics
@@ -449,7 +449,7 @@ export HBASE_REGIONSERVER_OPTS=$HBASE_JMX_OPTS 
-Dcom.sun.management.jmxrem
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-04-06
+  Last Published: 
2018-04-07
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ee65067/old_news.html
--
diff --git a/old_news.html b/old_news.html
index