[2/3] hbase git commit: HBASE-20545 Improve performance of BaseLoadBalancer.retainAssignment

2018-05-15 Thread apurtell
HBASE-20545 Improve performance of BaseLoadBalancer.retainAssignment

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7e65dfaf
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7e65dfaf
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7e65dfaf

Branch: refs/heads/branch-1.4
Commit: 7e65dfaf4fb02368a46325cbb4b7accbdf44ba0e
Parents: 2944999
Author: Thiruvel Thirumoolan 
Authored: Tue May 8 14:16:12 2018 -0700
Committer: Andrew Purtell 
Committed: Tue May 15 18:17:56 2018 -0700

--
 .../hbase/master/balancer/BaseLoadBalancer.java | 41 ++--
 1 file changed, 29 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7e65dfaf/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
index 97a4cb1..41cbeaa 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
@@ -1563,6 +1563,9 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
 // after the cluster restart.
 Set oldHostsNoLongerPresent = Sets.newTreeSet();
 
+// If the old servers aren't present, lets assign those regions later.
+List randomAssignRegions = Lists.newArrayList();
+
 int numRandomAssignments = 0;
 int numRetainedAssigments = 0;
 
@@ -1576,37 +1579,51 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
 localServers = serversByHostname.get(oldServerName.getHostname());
   }
   if (localServers.isEmpty()) {
-// No servers on the new cluster match up with this hostname,
-// assign randomly.
-ServerName randomServer = randomAssignment(cluster, region, servers);
-assignments.get(randomServer).add(region);
-numRandomAssignments++;
-if (oldServerName != null) 
oldHostsNoLongerPresent.add(oldServerName.getHostname());
+// No servers on the new cluster match up with this hostname, assign 
randomly, later.
+randomAssignRegions.add(region);
+if (oldServerName != null) {
+  oldHostsNoLongerPresent.add(oldServerName.getHostname());
+}
   } else if (localServers.size() == 1) {
 // the usual case - one new server on same host
 ServerName target = localServers.get(0);
 assignments.get(target).add(region);
-cluster.doAssignRegion(region, target);
 numRetainedAssigments++;
   } else {
 // multiple new servers in the cluster on this same host
 if (localServers.contains(oldServerName)) {
   assignments.get(oldServerName).add(region);
-  cluster.doAssignRegion(region, oldServerName);
+  numRetainedAssigments++;
 } else {
   ServerName target = null;
-  for (ServerName tmp: localServers) {
+  for (ServerName tmp : localServers) {
 if (tmp.getPort() == oldServerName.getPort()) {
   target = tmp;
+  assignments.get(tmp).add(region);
+  numRetainedAssigments++;
   break;
 }
   }
   if (target == null) {
-target = randomAssignment(cluster, region, localServers);
+randomAssignRegions.add(region);
   }
-  assignments.get(target).add(region);
 }
-numRetainedAssigments++;
+  }
+}
+
+// If servers from prior assignment aren't present, then lets do 
randomAssignment on regions.
+if (randomAssignRegions.size() > 0) {
+  for (Map.Entry> entry : 
assignments.entrySet()) {
+ServerName sn = entry.getKey();
+for (HRegionInfo region : entry.getValue()) {
+  cluster.doAssignRegion(region, sn);
+}
+  }
+  for (HRegionInfo region : randomAssignRegions) {
+ServerName target = randomAssignment(cluster, region, servers);
+assignments.get(target).add(region);
+cluster.doAssignRegion(region, target);
+numRandomAssignments++;
   }
 }
 



[1/3] hbase git commit: HBASE-20545 Improve performance of BaseLoadBalancer.retainAssignment

2018-05-15 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-1 944a221b7 -> 2a309d71c
  refs/heads/branch-1.3 75e7714d2 -> 8491967f0
  refs/heads/branch-1.4 294499949 -> 7e65dfaf4


HBASE-20545 Improve performance of BaseLoadBalancer.retainAssignment

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2a309d71
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2a309d71
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2a309d71

Branch: refs/heads/branch-1
Commit: 2a309d71cccb4cd87278ad505fe6f4513b3e6547
Parents: 944a221
Author: Thiruvel Thirumoolan 
Authored: Tue May 8 14:16:12 2018 -0700
Committer: Andrew Purtell 
Committed: Tue May 15 18:17:52 2018 -0700

--
 .../hbase/master/balancer/BaseLoadBalancer.java | 41 ++--
 1 file changed, 29 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2a309d71/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
index 97a4cb1..41cbeaa 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
@@ -1563,6 +1563,9 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
 // after the cluster restart.
 Set oldHostsNoLongerPresent = Sets.newTreeSet();
 
+// If the old servers aren't present, lets assign those regions later.
+List randomAssignRegions = Lists.newArrayList();
+
 int numRandomAssignments = 0;
 int numRetainedAssigments = 0;
 
@@ -1576,37 +1579,51 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
 localServers = serversByHostname.get(oldServerName.getHostname());
   }
   if (localServers.isEmpty()) {
-// No servers on the new cluster match up with this hostname,
-// assign randomly.
-ServerName randomServer = randomAssignment(cluster, region, servers);
-assignments.get(randomServer).add(region);
-numRandomAssignments++;
-if (oldServerName != null) 
oldHostsNoLongerPresent.add(oldServerName.getHostname());
+// No servers on the new cluster match up with this hostname, assign 
randomly, later.
+randomAssignRegions.add(region);
+if (oldServerName != null) {
+  oldHostsNoLongerPresent.add(oldServerName.getHostname());
+}
   } else if (localServers.size() == 1) {
 // the usual case - one new server on same host
 ServerName target = localServers.get(0);
 assignments.get(target).add(region);
-cluster.doAssignRegion(region, target);
 numRetainedAssigments++;
   } else {
 // multiple new servers in the cluster on this same host
 if (localServers.contains(oldServerName)) {
   assignments.get(oldServerName).add(region);
-  cluster.doAssignRegion(region, oldServerName);
+  numRetainedAssigments++;
 } else {
   ServerName target = null;
-  for (ServerName tmp: localServers) {
+  for (ServerName tmp : localServers) {
 if (tmp.getPort() == oldServerName.getPort()) {
   target = tmp;
+  assignments.get(tmp).add(region);
+  numRetainedAssigments++;
   break;
 }
   }
   if (target == null) {
-target = randomAssignment(cluster, region, localServers);
+randomAssignRegions.add(region);
   }
-  assignments.get(target).add(region);
 }
-numRetainedAssigments++;
+  }
+}
+
+// If servers from prior assignment aren't present, then lets do 
randomAssignment on regions.
+if (randomAssignRegions.size() > 0) {
+  for (Map.Entry> entry : 
assignments.entrySet()) {
+ServerName sn = entry.getKey();
+for (HRegionInfo region : entry.getValue()) {
+  cluster.doAssignRegion(region, sn);
+}
+  }
+  for (HRegionInfo region : randomAssignRegions) {
+ServerName target = randomAssignment(cluster, region, servers);
+assignments.get(target).add(region);
+cluster.doAssignRegion(region, target);
+numRandomAssignments++;
   }
 }
 



[3/3] hbase git commit: HBASE-20545 Improve performance of BaseLoadBalancer.retainAssignment

2018-05-15 Thread apurtell
HBASE-20545 Improve performance of BaseLoadBalancer.retainAssignment

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8491967f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8491967f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8491967f

Branch: refs/heads/branch-1.3
Commit: 8491967f0f5d764dcfdf4d7e8357b2c7f1c3f4ca
Parents: 75e7714
Author: Thiruvel Thirumoolan 
Authored: Tue May 8 14:16:12 2018 -0700
Committer: Andrew Purtell 
Committed: Tue May 15 18:18:04 2018 -0700

--
 .../hbase/master/balancer/BaseLoadBalancer.java | 41 ++--
 1 file changed, 29 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8491967f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
index 1770003..3e00080 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
@@ -1422,6 +1422,9 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
 // after the cluster restart.
 Set oldHostsNoLongerPresent = Sets.newTreeSet();
 
+// If the old servers aren't present, lets assign those regions later.
+List randomAssignRegions = Lists.newArrayList();
+
 int numRandomAssignments = 0;
 int numRetainedAssigments = 0;
 
@@ -1435,37 +1438,51 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
 localServers = serversByHostname.get(oldServerName.getHostname());
   }
   if (localServers.isEmpty()) {
-// No servers on the new cluster match up with this hostname,
-// assign randomly.
-ServerName randomServer = randomAssignment(cluster, region, servers);
-assignments.get(randomServer).add(region);
-numRandomAssignments++;
-if (oldServerName != null) 
oldHostsNoLongerPresent.add(oldServerName.getHostname());
+// No servers on the new cluster match up with this hostname, assign 
randomly, later.
+randomAssignRegions.add(region);
+if (oldServerName != null) {
+  oldHostsNoLongerPresent.add(oldServerName.getHostname());
+}
   } else if (localServers.size() == 1) {
 // the usual case - one new server on same host
 ServerName target = localServers.get(0);
 assignments.get(target).add(region);
-cluster.doAssignRegion(region, target);
 numRetainedAssigments++;
   } else {
 // multiple new servers in the cluster on this same host
 if (localServers.contains(oldServerName)) {
   assignments.get(oldServerName).add(region);
-  cluster.doAssignRegion(region, oldServerName);
+  numRetainedAssigments++;
 } else {
   ServerName target = null;
-  for (ServerName tmp: localServers) {
+  for (ServerName tmp : localServers) {
 if (tmp.getPort() == oldServerName.getPort()) {
   target = tmp;
+  assignments.get(tmp).add(region);
+  numRetainedAssigments++;
   break;
 }
   }
   if (target == null) {
-target = randomAssignment(cluster, region, localServers);
+randomAssignRegions.add(region);
   }
-  assignments.get(target).add(region);
 }
-numRetainedAssigments++;
+  }
+}
+
+// If servers from prior assignment aren't present, then lets do 
randomAssignment on regions.
+if (randomAssignRegions.size() > 0) {
+  for (Map.Entry> entry : 
assignments.entrySet()) {
+ServerName sn = entry.getKey();
+for (HRegionInfo region : entry.getValue()) {
+  cluster.doAssignRegion(region, sn);
+}
+  }
+  for (HRegionInfo region : randomAssignRegions) {
+ServerName target = randomAssignment(cluster, region, servers);
+assignments.get(target).add(region);
+cluster.doAssignRegion(region, target);
+numRandomAssignments++;
   }
 }
 



[20/29] hbase git commit: HBASE-20426 Give up replicating anything in S state

2018-05-15 Thread zhangduo
HBASE-20426 Give up replicating anything in S state


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a0e6719d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a0e6719d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a0e6719d

Branch: refs/heads/HBASE-19064
Commit: a0e6719d4d28018e87cfd56b56875680b1a766d0
Parents: b1423a7
Author: zhangduo 
Authored: Thu May 3 15:51:35 2018 +0800
Committer: zhangduo 
Committed: Wed May 16 09:06:30 2018 +0800

--
 .../src/main/protobuf/MasterProcedure.proto |  13 +-
 .../replication/AbstractPeerProcedure.java  |   4 +
 .../master/replication/ModifyPeerProcedure.java |   6 -
 .../replication/ReplicationPeerManager.java |  13 +-
 ...ransitPeerSyncReplicationStateProcedure.java |  94 +++
 .../hadoop/hbase/regionserver/LogRoller.java|  11 +-
 .../regionserver/PeerProcedureHandlerImpl.java  |  63 --
 .../regionserver/ReplicationSource.java |   1 +
 .../regionserver/ReplicationSourceManager.java  | 118 ---
 .../TestDrainReplicationQueuesForStandBy.java   | 118 +++
 10 files changed, 379 insertions(+), 62 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a0e6719d/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto 
b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
index 01e4dae..f15cb04 100644
--- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
@@ -394,11 +394,14 @@ enum PeerSyncReplicationStateTransitionState {
   SET_PEER_NEW_SYNC_REPLICATION_STATE = 2;
   REFRESH_PEER_SYNC_REPLICATION_STATE_ON_RS_BEGIN = 3;
   REPLAY_REMOTE_WAL_IN_PEER = 4;
-  REOPEN_ALL_REGIONS_IN_PEER = 5;
-  TRANSIT_PEER_NEW_SYNC_REPLICATION_STATE = 6;
-  REFRESH_PEER_SYNC_REPLICATION_STATE_ON_RS_END = 7;
-  CREATE_DIR_FOR_REMOTE_WAL = 8;
-  POST_PEER_SYNC_REPLICATION_STATE_TRANSITION = 9;
+  REMOVE_ALL_REPLICATION_QUEUES_IN_PEER = 5;
+  REOPEN_ALL_REGIONS_IN_PEER = 6;
+  TRANSIT_PEER_NEW_SYNC_REPLICATION_STATE = 7;
+  REFRESH_PEER_SYNC_REPLICATION_STATE_ON_RS_END = 8;
+  SYNC_REPLICATION_SET_PEER_ENABLED = 9;
+  SYNC_REPLICATION_ENABLE_PEER_REFRESH_PEER_ON_RS = 10;
+  CREATE_DIR_FOR_REMOTE_WAL = 11;
+  POST_PEER_SYNC_REPLICATION_STATE_TRANSITION = 12;
 }
 
 message PeerModificationStateData {

http://git-wip-us.apache.org/repos/asf/hbase/blob/a0e6719d/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
index 6679d78..458e073 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
@@ -106,4 +106,8 @@ public abstract class AbstractPeerProcedure
 throw new UnsupportedOperationException();
   }
 
+  protected final void refreshPeer(MasterProcedureEnv env, PeerOperationType 
type) {
+
addChildProcedure(env.getMasterServices().getServerManager().getOnlineServersList().stream()
+  .map(sn -> new RefreshPeerProcedure(peerId, type, 
sn)).toArray(RefreshPeerProcedure[]::new));
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a0e6719d/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
index 32b8ea1..56462ca 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
@@ -108,12 +108,6 @@ public abstract class ModifyPeerProcedure extends 
AbstractPeerProcedure new RefreshPeerProcedure(peerId, type, sn))
-  .toArray(RefreshPeerProcedure[]::new));
-  }
-
   protected ReplicationPeerConfig getOldPeerConfig() {
 return null;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a0e6719d/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
--
diff --git 
a/hbase-server/src/main/java

[17/29] hbase git commit: HBASE-19082 Reject read/write from client but accept write from replication in state S

2018-05-15 Thread zhangduo
HBASE-19082 Reject read/write from client but accept write from replication in 
state S


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a1d92f3f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a1d92f3f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a1d92f3f

Branch: refs/heads/HBASE-19064
Commit: a1d92f3f6c9fc4f4bd5dc5c3e4d0c7e630b2504b
Parents: 0a2f4d9
Author: zhangduo 
Authored: Mon Feb 12 18:20:18 2018 +0800
Committer: zhangduo 
Committed: Wed May 16 09:06:30 2018 +0800

--
 .../org/apache/hadoop/hbase/HConstants.java |   3 -
 .../src/main/protobuf/MasterProcedure.proto |   3 +-
 .../hbase/replication/ReplicationUtils.java |   4 +
 ...ransitPeerSyncReplicationStateProcedure.java |  10 +
 .../hadoop/hbase/regionserver/HRegion.java  |   5 +-
 .../hbase/regionserver/HRegionServer.java   |   2 +-
 .../hbase/regionserver/RSRpcServices.java   |  88 ++--
 .../RejectRequestsFromClientStateChecker.java   |  44 
 .../regionserver/ReplicationSink.java   |  72 ---
 .../SyncReplicationPeerInfoProvider.java|  10 +-
 .../SyncReplicationPeerInfoProviderImpl.java|  19 +-
 .../hbase/wal/SyncReplicationWALProvider.java   |   3 +
 .../org/apache/hadoop/hbase/wal/WALFactory.java |   4 +-
 .../hbase/replication/TestSyncReplication.java  | 200 +++
 .../wal/TestSyncReplicationWALProvider.java |   8 +-
 15 files changed, 401 insertions(+), 74 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a1d92f3f/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
--
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 522c2cf..9241682 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -1355,9 +1355,6 @@ public final class HConstants {
 
   public static final String NOT_IMPLEMENTED = "Not implemented";
 
-  // TODO: need to find a better place to hold it.
-  public static final String SYNC_REPLICATION_ENABLED = 
"hbase.replication.sync.enabled";
-
   private HConstants() {
 // Can't be instantiated with this ctor.
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a1d92f3f/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto 
b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
index 67c1b43..e8b940e 100644
--- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
@@ -397,7 +397,8 @@ enum PeerSyncReplicationStateTransitionState {
   REOPEN_ALL_REGIONS_IN_PEER = 5;
   TRANSIT_PEER_NEW_SYNC_REPLICATION_STATE = 6;
   REFRESH_PEER_SYNC_REPLICATION_STATE_ON_RS_END = 7;
-  POST_PEER_SYNC_REPLICATION_STATE_TRANSITION = 8;
+  CREATE_DIR_FOR_REMOTE_WAL = 8;
+  POST_PEER_SYNC_REPLICATION_STATE_TRANSITION = 9;
 }
 
 message PeerModificationStateData {

http://git-wip-us.apache.org/repos/asf/hbase/blob/a1d92f3f/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
index e4dea83..d94cb00 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
@@ -37,6 +37,10 @@ import org.apache.yetus.audience.InterfaceAudience;
 @InterfaceAudience.Private
 public final class ReplicationUtils {
 
+  public static final String SYNC_REPLICATION_ENABLED = 
"hbase.replication.sync.enabled";
+
+  public static final String REPLICATION_ATTR_NAME = "__rep__";
+
   private ReplicationUtils() {
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a1d92f3f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
index 8fc932f..69404a0 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replica

[06/29] hbase git commit: HBASE-19999 Remove the SYNC_REPLICATION_ENABLED flag

2018-05-15 Thread zhangduo
HBASE-1 Remove the SYNC_REPLICATION_ENABLED flag


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d303ea98
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d303ea98
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d303ea98

Branch: refs/heads/HBASE-19064
Commit: d303ea9853ecd462893721e6110bdf14a4d2e7f1
Parents: 35b5bca
Author: Guanghao Zhang 
Authored: Fri Mar 9 11:30:25 2018 +0800
Committer: zhangduo 
Committed: Wed May 16 09:06:30 2018 +0800

--
 .../hbase/replication/ReplicationUtils.java  |  2 --
 .../hadoop/hbase/regionserver/HRegionServer.java | 13 -
 .../hbase/wal/SyncReplicationWALProvider.java| 19 ++-
 .../org/apache/hadoop/hbase/wal/WALFactory.java  | 18 --
 .../hbase/replication/TestSyncReplication.java   |  1 -
 .../master/TestRecoverStandbyProcedure.java  |  2 --
 .../wal/TestSyncReplicationWALProvider.java  |  2 --
 7 files changed, 38 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d303ea98/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
index e402d0f..cb22f57 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
@@ -37,8 +37,6 @@ import org.apache.yetus.audience.InterfaceAudience;
 @InterfaceAudience.Private
 public final class ReplicationUtils {
 
-  public static final String SYNC_REPLICATION_ENABLED = 
"hbase.replication.sync.enabled";
-
   public static final String REPLICATION_ATTR_NAME = "__rep__";
 
   public static final String REMOTE_WAL_DIR_NAME = "remoteWALs";

http://git-wip-us.apache.org/repos/asf/hbase/blob/d303ea98/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 2fb4f67..af2f3b5 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -1804,10 +1804,8 @@ public class HRegionServer extends HasThread implements
   private void setupWALAndReplication() throws IOException {
 boolean isMasterNoTableOrSystemTableOnly = this instanceof HMaster &&
   (!LoadBalancer.isTablesOnMaster(conf) || 
LoadBalancer.isSystemTablesOnlyOnMaster(conf));
-if (isMasterNoTableOrSystemTableOnly) {
-  conf.setBoolean(ReplicationUtils.SYNC_REPLICATION_ENABLED, false);
-}
-WALFactory factory = new WALFactory(conf, serverName.toString());
+WALFactory factory =
+new WALFactory(conf, serverName.toString(), 
!isMasterNoTableOrSystemTableOnly);
 if (!isMasterNoTableOrSystemTableOnly) {
   // TODO Replication make assumptions here based on the default 
filesystem impl
   Path oldLogDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
@@ -1926,11 +1924,8 @@ public class HRegionServer extends HasThread implements
 }
 this.executorService.startExecutorService(ExecutorType.RS_REFRESH_PEER,
   conf.getInt("hbase.regionserver.executor.refresh.peer.threads", 2));
-
-if (conf.getBoolean(ReplicationUtils.SYNC_REPLICATION_ENABLED, false)) {
-  
this.executorService.startExecutorService(ExecutorType.RS_REPLAY_SYNC_REPLICATION_WAL,
-
conf.getInt("hbase.regionserver.executor.replay.sync.replication.wal.threads", 
2));
-}
+
this.executorService.startExecutorService(ExecutorType.RS_REPLAY_SYNC_REPLICATION_WAL,
+  
conf.getInt("hbase.regionserver.executor.replay.sync.replication.wal.threads", 
1));
 
 Threads.setDaemonThreadRunning(this.walRoller.getThread(), getName() + 
".logRoller",
 uncaughtExceptionHandler);

http://git-wip-us.apache.org/repos/asf/hbase/blob/d303ea98/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
index 282aa21..54287fe 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.jav

[15/29] hbase git commit: HBASE-19957 General framework to transit sync replication state

2018-05-15 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase/blob/0a2f4d9f/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProvider.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProvider.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProvider.java
new file mode 100644
index 000..92f2c52
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProvider.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication.regionserver;
+
+import java.util.Optional;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * Get the information for a sync replication peer.
+ */
+@InterfaceAudience.Private
+public interface SyncReplicationPeerInfoProvider {
+
+  /**
+   * Return the peer id and remote WAL directory if the region is 
synchronously replicated and the
+   * state is {@link SyncReplicationState#ACTIVE}.
+   */
+  Optional> getPeerIdAndRemoteWALDir(RegionInfo info);
+
+  /**
+   * Check whether the give region is contained in a sync replication peer 
which is in the given
+   * state.
+   */
+  boolean isInState(RegionInfo info, SyncReplicationState state);
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a2f4d9f/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProviderImpl.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProviderImpl.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProviderImpl.java
new file mode 100644
index 000..32159e6
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProviderImpl.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication.regionserver;
+
+import java.util.Optional;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.replication.ReplicationPeer;
+import org.apache.hadoop.hbase.replication.ReplicationPeers;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.yetus.audience.InterfaceAudience;
+
+@InterfaceAudience.Private
+class SyncReplicationPeerInfoProviderImpl implements 
SyncReplicationPeerInfoProvider {
+
+  private final ReplicationPeers replicationPeers;
+
+  private final SyncReplicationPeerMappingManager mapping;
+
+  SyncReplicationPeerInfoProviderImpl(ReplicationPeers replicationPeers,
+  SyncReplicationPeerMappingManager mapping) {
+this.replicationPeers = replicationPeers;
+this.mapping = mapping;
+  }
+
+  @Override
+  public Optional> getPeerIdAndRemoteWALDir(RegionInfo 
info) {
+String peerId = mapping.getPeerId(info);
+if (peerId == null) {
+  return Optional.empty();
+}
+ReplicationPeer peer = replication

[24/29] hbase git commit: HBASE-19782 Reject the replication request when peer is DA or A state

2018-05-15 Thread zhangduo
HBASE-19782 Reject the replication request when peer is DA or A state


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5bc73928
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5bc73928
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5bc73928

Branch: refs/heads/HBASE-19064
Commit: 5bc73928ced98fb6aa750e3c4a564767f2b4d520
Parents: 2029a46
Author: huzheng 
Authored: Fri Mar 2 18:05:29 2018 +0800
Committer: zhangduo 
Committed: Wed May 16 09:06:30 2018 +0800

--
 .../hbase/protobuf/ReplicationProtbufUtil.java  |  2 +-
 .../hadoop/hbase/regionserver/HRegion.java  |  2 +-
 .../hbase/regionserver/HRegionServer.java   |  5 +--
 .../hbase/regionserver/RSRpcServices.java   | 25 +--
 .../RejectReplicationRequestStateChecker.java   | 45 
 .../ReplaySyncReplicationWALCallable.java   | 24 ++-
 .../replication/regionserver/Replication.java   |  2 +-
 .../regionserver/ReplicationSink.java   | 16 +++
 .../SyncReplicationPeerInfoProvider.java| 11 ++---
 .../SyncReplicationPeerInfoProviderImpl.java| 13 +++---
 .../SyncReplicationPeerMappingManager.java  |  5 +--
 .../hbase/wal/SyncReplicationWALProvider.java   |  7 +--
 .../replication/SyncReplicationTestBase.java| 32 ++
 .../replication/TestSyncReplicationActive.java  | 13 +-
 .../regionserver/TestReplicationSink.java   |  5 +--
 .../regionserver/TestWALEntrySinkFilter.java|  3 +-
 .../wal/TestSyncReplicationWALProvider.java |  6 +--
 17 files changed, 163 insertions(+), 53 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5bc73928/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
index 81dd59e..e01f881 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.PrivateCellUtil;
+import org.apache.hadoop.hbase.wal.WAL.Entry;
 import org.apache.hadoop.hbase.wal.WALKeyImpl;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.io.SizedCellScanner;
@@ -45,7 +46,6 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminServic
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
 import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.wal.WAL.Entry;
 
 @InterfaceAudience.Private
 public class ReplicationProtbufUtil {

http://git-wip-us.apache.org/repos/asf/hbase/blob/5bc73928/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 6aa4b27..ba487c6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -1984,7 +1984,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   private boolean shouldForbidMajorCompaction() {
 if (rsServices != null && rsServices.getReplicationSourceService() != 
null) {
   return 
rsServices.getReplicationSourceService().getSyncReplicationPeerInfoProvider()
-  .checkState(getRegionInfo(), ForbidMajorCompactionChecker.get());
+  .checkState(getRegionInfo().getTable(), 
ForbidMajorCompactionChecker.get());
 }
 return false;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/5bc73928/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 440a838..ab571c6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -2478,10 +2478,9 @@ public class HRegionServer extends HasThread impleme

[01/29] hbase git commit: HBASE-20457 Return immediately for a scan rpc call when we want to switch from pread to stream [Forced Update!]

2018-05-15 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/HBASE-19064 5d5c2d204 -> f60a710ef (forced update)


HBASE-20457 Return immediately for a scan rpc call when we want to switch from 
pread to stream


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/26babcf0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/26babcf0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/26babcf0

Branch: refs/heads/HBASE-19064
Commit: 26babcf013de696b899d76a3c39434b794440d8d
Parents: d2daada
Author: zhangduo 
Authored: Thu Apr 26 17:54:13 2018 +0800
Committer: zhangduo 
Committed: Tue May 15 20:56:20 2018 +0800

--
 .../RpcRetryingCallerWithReadReplicas.java  |   1 +
 .../hbase/regionserver/ScannerContext.java  |  27 ++--
 .../hadoop/hbase/regionserver/StoreScanner.java |  17 ++-
 .../regionserver/TestSwitchToStreamRead.java| 141 +--
 4 files changed, 164 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/26babcf0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
index 4a31cff..a0be0bf 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
@@ -279,6 +279,7 @@ public class RpcRetryingCallerWithReadReplicas {
   throws RetriesExhaustedException, DoNotRetryIOException {
 Throwable t = e.getCause();
 assert t != null; // That's what ExecutionException is about: holding an 
exception
+t.printStackTrace();
 
 if (t instanceof RetriesExhaustedException) {
   throw (RetriesExhaustedException) t;

http://git-wip-us.apache.org/repos/asf/hbase/blob/26babcf0/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
index 10f9b24..cc6ec84 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
@@ -99,6 +99,12 @@ public class ScannerContext {
 
   private Cell lastPeekedCell = null;
 
+  // Set this to true will have the same behavior with reaching the time limit.
+  // This is used when you want to make the current RSRpcService.scan returns 
immediately. For
+  // example, when we want to switch from pread to stream, we can only do it 
after the rpc call is
+  // returned.
+  private boolean returnImmediately;
+
   /**
* Tracks the relevant server side metrics during scans. null when metrics 
should not be tracked
*/
@@ -247,7 +253,8 @@ public class ScannerContext {
* @return true if the time limit can be enforced in the checker's scope
*/
   boolean hasTimeLimit(LimitScope checkerScope) {
-return limits.canEnforceTimeLimitFromScope(checkerScope) && 
limits.getTime() > 0;
+return limits.canEnforceTimeLimitFromScope(checkerScope) &&
+  (limits.getTime() > 0 || returnImmediately);
   }
 
   /**
@@ -307,7 +314,8 @@ public class ScannerContext {
* @return true when the limit is enforceable from the checker's scope and 
it has been reached
*/
   boolean checkTimeLimit(LimitScope checkerScope) {
-return hasTimeLimit(checkerScope) && (System.currentTimeMillis() >= 
limits.getTime());
+return hasTimeLimit(checkerScope) &&
+  (returnImmediately || System.currentTimeMillis() >= limits.getTime());
   }
 
   /**
@@ -327,6 +335,10 @@ public class ScannerContext {
 this.lastPeekedCell = lastPeekedCell;
   }
 
+  void returnImmediately() {
+this.returnImmediately = true;
+  }
+
   @Override
   public String toString() {
 StringBuilder sb = new StringBuilder();
@@ -539,11 +551,6 @@ public class ScannerContext {
 LimitFields() {
 }
 
-LimitFields(int batch, LimitScope sizeScope, long size, long heapSize, 
LimitScope timeScope,
-long time) {
-  setFields(batch, sizeScope, size, heapSize, timeScope, time);
-}
-
 void copy(LimitFields limitsToCopy) {
   if (limitsToCopy != null) {
 setFields(limitsToCopy.getBatch(), limitsToCopy.getSizeScope(), 
limitsToCopy.getDataSize(),
@@ -691,12 +698,6 @@ public class ScannerContext {
 // such AND data cells of Cells which are in

[03/29] hbase git commit: HBASE-20456 Support removing a ReplicationSourceShipper for a special wal group

2018-05-15 Thread zhangduo
HBASE-20456 Support removing a ReplicationSourceShipper for a special wal group


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2ae4e0bf
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2ae4e0bf
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2ae4e0bf

Branch: refs/heads/HBASE-19064
Commit: 2ae4e0bf5b06d5862d9cc35af2dd810c782fae6c
Parents: 9ed1f4e
Author: zhangduo 
Authored: Tue Apr 24 22:01:21 2018 +0800
Committer: zhangduo 
Committed: Wed May 16 09:06:30 2018 +0800

--
 .../hbase/regionserver/wal/AsyncFSWAL.java  |  1 +
 .../RecoveredReplicationSource.java | 13 +---
 .../RecoveredReplicationSourceShipper.java  |  7 --
 .../regionserver/ReplicationSource.java | 13 +++-
 .../regionserver/ReplicationSourceManager.java  | 19 -
 .../regionserver/ReplicationSourceShipper.java  | 20 +++--
 .../ReplicationSourceWALReader.java |  9 ++-
 .../regionserver/WALEntryStream.java|  3 +-
 .../hadoop/hbase/wal/AbstractFSWALProvider.java | 28 ---
 .../hbase/wal/SyncReplicationWALProvider.java   | 10 ++-
 .../TestReplicationSourceManager.java   |  5 +-
 .../TestSyncReplicationShipperQuit.java | 81 
 .../regionserver/TestWALEntryStream.java|  4 +-
 13 files changed, 163 insertions(+), 50 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2ae4e0bf/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
index 17133ed..f630e63 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
@@ -682,6 +682,7 @@ public class AsyncFSWAL extends AbstractFSWAL {
   protected void doShutdown() throws IOException {
 waitForSafePoint();
 closeWriter(this.writer);
+this.writer = null;
 closeExecutor.shutdown();
 try {
   if (!closeExecutor.awaitTermination(waitOnShutdownInSeconds, 
TimeUnit.SECONDS)) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/2ae4e0bf/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java
index a21ca44..f1bb538 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java
@@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.replication.ReplicationPeer;
 import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
 import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
@@ -144,15 +143,9 @@ public class RecoveredReplicationSource extends 
ReplicationSource {
   }
 
   void tryFinish() {
-// use synchronize to make sure one last thread will clean the queue
-synchronized (workerThreads) {
-  Threads.sleep(100);// wait a short while for other worker thread to 
fully exit
-  boolean allTasksDone = workerThreads.values().stream().allMatch(w -> 
w.isFinished());
-  if (allTasksDone) {
-this.getSourceMetrics().clear();
-manager.removeRecoveredSource(this);
-LOG.info("Finished recovering queue {} with the following stats: {}", 
queueId, getStats());
-  }
+if (workerThreads.isEmpty()) {
+  this.getSourceMetrics().clear();
+  manager.finishRecoveredSource(this);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/2ae4e0bf/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSourceShipper.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSourceShipper.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSourceShipper.java
index 91109cf..b0d4db0 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/re

[13/29] hbase git commit: HBASE-19864 Use protobuf instead of enum.ordinal to store SyncReplicationState

2018-05-15 Thread zhangduo
HBASE-19864 Use protobuf instead of enum.ordinal to store SyncReplicationState

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ae549084
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ae549084
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ae549084

Branch: refs/heads/HBASE-19064
Commit: ae54908464593c7593af2f36673bfab8fdfdbe62
Parents: c9ee48c
Author: Guanghao Zhang 
Authored: Fri Jan 26 16:50:48 2018 +0800
Committer: zhangduo 
Committed: Wed May 16 09:06:30 2018 +0800

--
 .../replication/ReplicationPeerConfigUtil.java  | 22 +++---
 .../hbase/replication/SyncReplicationState.java | 17 ++
 .../hbase/shaded/protobuf/RequestConverter.java |  7 +++---
 .../src/main/protobuf/Replication.proto | 13 +++
 .../replication/ZKReplicationPeerStorage.java   | 24 +---
 .../hadoop/hbase/master/MasterRpcServices.java  |  9 
 ...ransitPeerSyncReplicationStateProcedure.java |  9 
 .../TestReplicationSourceManager.java   |  2 +-
 8 files changed, 67 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ae549084/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
index 6cbe05b..331795c 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
@@ -403,7 +403,7 @@ public final class ReplicationPeerConfigUtil {
 ReplicationProtos.ReplicationState.State.ENABLED == 
desc.getState().getState();
 ReplicationPeerConfig config = convert(desc.getConfig());
 return new ReplicationPeerDescription(desc.getId(), enabled, config,
-
SyncReplicationState.valueOf(desc.getSyncReplicationState().getNumber()));
+  toSyncReplicationState(desc.getSyncReplicationState()));
   }
 
   public static ReplicationProtos.ReplicationPeerDescription
@@ -411,17 +411,33 @@ public final class ReplicationPeerConfigUtil {
 ReplicationProtos.ReplicationPeerDescription.Builder builder =
 ReplicationProtos.ReplicationPeerDescription.newBuilder();
 builder.setId(desc.getPeerId());
+
 ReplicationProtos.ReplicationState.Builder stateBuilder =
 ReplicationProtos.ReplicationState.newBuilder();
 stateBuilder.setState(desc.isEnabled() ? 
ReplicationProtos.ReplicationState.State.ENABLED :
 ReplicationProtos.ReplicationState.State.DISABLED);
 builder.setState(stateBuilder.build());
+
 builder.setConfig(convert(desc.getPeerConfig()));
-builder.setSyncReplicationState(
-  
ReplicationProtos.SyncReplicationState.forNumber(desc.getSyncReplicationState().ordinal()));
+
builder.setSyncReplicationState(toSyncReplicationState(desc.getSyncReplicationState()));
+
 return builder.build();
   }
 
+  public static ReplicationProtos.SyncReplicationState
+  toSyncReplicationState(SyncReplicationState state) {
+ReplicationProtos.SyncReplicationState.Builder syncReplicationStateBuilder 
=
+ReplicationProtos.SyncReplicationState.newBuilder();
+syncReplicationStateBuilder
+
.setState(ReplicationProtos.SyncReplicationState.State.forNumber(state.ordinal()));
+return syncReplicationStateBuilder.build();
+  }
+
+  public static SyncReplicationState
+  toSyncReplicationState(ReplicationProtos.SyncReplicationState state) {
+return SyncReplicationState.valueOf(state.getState().getNumber());
+  }
+
   public static ReplicationPeerConfig appendTableCFsToReplicationPeerConfig(
   Map> tableCfs, ReplicationPeerConfig peerConfig) 
{
 ReplicationPeerConfigBuilder builder = 
ReplicationPeerConfig.newBuilder(peerConfig);

http://git-wip-us.apache.org/repos/asf/hbase/blob/ae549084/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java
index bd144e9..a65b144 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java
@@ -17,8 +17,15 @@
  */
 package org.apache.hadoop.hbase.replication;
 

[14/29] hbase git commit: HBASE-19935 Only allow table replication for sync replication for now

2018-05-15 Thread zhangduo
HBASE-19935 Only allow table replication for sync replication for now


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/fb3bcfc4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/fb3bcfc4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/fb3bcfc4

Branch: refs/heads/HBASE-19064
Commit: fb3bcfc44a055b80883b77af35bc94f97d237430
Parents: ae54908
Author: Guanghao Zhang 
Authored: Tue Feb 6 16:00:59 2018 +0800
Committer: zhangduo 
Committed: Wed May 16 09:06:30 2018 +0800

--
 .../replication/ReplicationPeerConfig.java  |  9 +++
 .../replication/ReplicationPeerManager.java | 34 -
 .../replication/TestReplicationAdmin.java   | 73 ++--
 .../wal/TestCombinedAsyncWriter.java|  6 ++
 .../wal/TestSyncReplicationWALProvider.java |  6 ++
 5 files changed, 102 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/fb3bcfc4/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
index 97abc74..997a155 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
@@ -25,6 +25,8 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.TreeMap;
+
+import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -220,6 +222,13 @@ public class ReplicationPeerConfig {
 return this.remoteWALDir;
   }
 
+  /**
+   * Use remote wal dir to decide whether a peer is sync replication peer
+   */
+  public boolean isSyncReplication() {
+return !StringUtils.isBlank(this.remoteWALDir);
+  }
+
   public static ReplicationPeerConfigBuilder newBuilder() {
 return new ReplicationPeerConfigBuilderImpl();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/fb3bcfc4/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
index f07a0d8..ff778a8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
@@ -170,7 +170,7 @@ public class ReplicationPeerManager {
   " does not match new remote wal dir '" + 
peerConfig.getRemoteWALDir() + "'");
 }
 
-if (oldPeerConfig.getRemoteWALDir() != null) {
+if (oldPeerConfig.isSyncReplication()) {
   if (!ReplicationUtils.isNamespacesAndTableCFsEqual(oldPeerConfig, 
peerConfig)) {
 throw new DoNotRetryIOException(
   "Changing the replicated namespace/table config on a synchronous 
replication " +
@@ -199,8 +199,8 @@ public class ReplicationPeerManager {
 }
 ReplicationPeerConfig copiedPeerConfig = 
ReplicationPeerConfig.newBuilder(peerConfig).build();
 SyncReplicationState syncReplicationState =
-StringUtils.isBlank(peerConfig.getRemoteWALDir()) ? 
SyncReplicationState.NONE
-: SyncReplicationState.DOWNGRADE_ACTIVE;
+copiedPeerConfig.isSyncReplication() ? 
SyncReplicationState.DOWNGRADE_ACTIVE
+: SyncReplicationState.NONE;
 peerStorage.addPeer(peerId, copiedPeerConfig, enabled, 
syncReplicationState);
 peers.put(peerId,
   new ReplicationPeerDescription(peerId, enabled, copiedPeerConfig, 
syncReplicationState));
@@ -324,9 +324,37 @@ public class ReplicationPeerManager {
 peerConfig.getTableCFsMap());
 }
 
+if (peerConfig.isSyncReplication()) {
+  checkPeerConfigForSyncReplication(peerConfig);
+}
+
 checkConfiguredWALEntryFilters(peerConfig);
   }
 
+  private void checkPeerConfigForSyncReplication(ReplicationPeerConfig 
peerConfig)
+  throws DoNotRetryIOException {
+// This is used to reduce the difficulty for implementing the sync 
replication state transition
+// as we need to reopen all the related regions.
+// TODO: Add namespace, replicat_all flag back
+if (peerConfig.replicateAllUserTables()) {
+  throw new DoNotRetryIOException(
+  "Only support replicated table c

[29/29] hbase git commit: HBASE-20432 Cleanup related resources when remove a sync replication peer

2018-05-15 Thread zhangduo
HBASE-20432 Cleanup related resources when remove a sync replication peer


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b1423a78
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b1423a78
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b1423a78

Branch: refs/heads/HBASE-19064
Commit: b1423a780370f663b7af47c3227dbc43e0058a60
Parents: 4f4f0c7
Author: huzheng 
Authored: Wed Apr 18 20:38:33 2018 +0800
Committer: zhangduo 
Committed: Wed May 16 09:06:30 2018 +0800

--
 .../master/replication/RemovePeerProcedure.java | 10 +
 .../ReplaySyncReplicationWALManager.java|  8 
 .../replication/SyncReplicationTestBase.java| 45 +---
 .../replication/TestSyncReplicationActive.java  |  9 ++--
 .../replication/TestSyncReplicationStandBy.java | 31 --
 5 files changed, 89 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b1423a78/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RemovePeerProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RemovePeerProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RemovePeerProcedure.java
index 82dc07e..7335fe0 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RemovePeerProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RemovePeerProcedure.java
@@ -66,9 +66,19 @@ public class RemovePeerProcedure extends ModifyPeerProcedure 
{
 env.getReplicationPeerManager().removePeer(peerId);
   }
 
+  private void removeRemoteWALs(MasterProcedureEnv env) throws IOException {
+ReplaySyncReplicationWALManager remoteWALManager =
+env.getMasterServices().getReplaySyncReplicationWALManager();
+remoteWALManager.removePeerRemoteWALs(peerId);
+remoteWALManager.removePeerReplayWALDir(peerId);
+  }
+
   @Override
   protected void postPeerModification(MasterProcedureEnv env)
   throws IOException, ReplicationException {
+if (peerConfig.isSyncReplication()) {
+  removeRemoteWALs(env);
+}
 env.getReplicationPeerManager().removeAllQueuesAndHFileRefs(peerId);
 if (peerConfig.isSerial()) {
   env.getReplicationPeerManager().removeAllLastPushedSeqIds(peerId);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b1423a78/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplaySyncReplicationWALManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplaySyncReplicationWALManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplaySyncReplicationWALManager.java
index 72f5c37..eac5aa4 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplaySyncReplicationWALManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplaySyncReplicationWALManager.java
@@ -115,6 +115,14 @@ public class ReplaySyncReplicationWALManager {
 }
   }
 
+  public void removePeerRemoteWALs(String peerId) throws IOException {
+Path remoteWALDir = getPeerRemoteWALDir(peerId);
+if (fs.exists(remoteWALDir) && !fs.delete(remoteWALDir, true)) {
+  throw new IOException(
+  "Failed to remove remote WALs dir " + remoteWALDir + " for peer id=" 
+ peerId);
+}
+  }
+
   public void initPeerWorkers(String peerId) {
 BlockingQueue servers = new LinkedBlockingQueue<>();
 services.getServerManager().getOnlineServers().keySet()

http://git-wip-us.apache.org/repos/asf/hbase/blob/b1423a78/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java
index 0d5fce8..de679be 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.replication;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -71,6 +72,10 @@ public class SyncReplicationTestBase {
 
   protected static String PEER_ID = "1";
 
+  protected static Path remoteWALDir1;
+
+  protected static Path remoteWALDir2;
+
   p

[08/29] hbase git commit: HBASE-19078 Add a remote peer cluster wal directory config for synchronous replication

2018-05-15 Thread zhangduo
HBASE-19078 Add a remote peer cluster wal directory config for synchronous 
replication

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/06c1f2c2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/06c1f2c2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/06c1f2c2

Branch: refs/heads/HBASE-19064
Commit: 06c1f2c2f90f68ce083afb6be4ea47f43f0398b0
Parents: 0c5193f
Author: Guanghao Zhang 
Authored: Sat Jan 13 18:55:28 2018 +0800
Committer: zhangduo 
Committed: Wed May 16 09:06:30 2018 +0800

--
 .../replication/ReplicationPeerConfigUtil.java  |  6 ++
 .../replication/ReplicationPeerConfig.java  | 20 -
 .../ReplicationPeerConfigBuilder.java   |  7 ++
 .../src/main/protobuf/Replication.proto |  1 +
 .../replication/ReplicationPeerManager.java | 15 
 .../replication/TestReplicationAdmin.java   | 77 
 .../src/main/ruby/hbase/replication_admin.rb| 14 ++--
 hbase-shell/src/main/ruby/hbase_constants.rb|  1 +
 .../src/main/ruby/shell/commands/add_peer.rb| 21 +-
 .../src/main/ruby/shell/commands/list_peers.rb  | 19 -
 .../test/ruby/hbase/replication_admin_test.rb   | 16 
 11 files changed, 186 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/06c1f2c2/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
index b1c1713..474ded3 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
@@ -319,6 +319,9 @@ public final class ReplicationPeerConfigUtil {
 
excludeNamespacesList.stream().map(ByteString::toStringUtf8).collect(Collectors.toSet()));
 }
 
+if (peer.hasRemoteWALDir()) {
+  builder.setRemoteWALDir(peer.getRemoteWALDir());
+}
 return builder.build();
   }
 
@@ -376,6 +379,9 @@ public final class ReplicationPeerConfigUtil {
   }
 }
 
+if (peerConfig.getRemoteWALDir() != null) {
+  builder.setRemoteWALDir(peerConfig.getRemoteWALDir());
+}
 return builder.build();
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/06c1f2c2/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
index e0d9a4c..97abc74 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
@@ -47,6 +47,8 @@ public class ReplicationPeerConfig {
   private Set excludeNamespaces = null;
   private long bandwidth = 0;
   private final boolean serial;
+  // Used by synchronous replication
+  private String remoteWALDir;
 
   private ReplicationPeerConfig(ReplicationPeerConfigBuilderImpl builder) {
 this.clusterKey = builder.clusterKey;
@@ -66,6 +68,7 @@ public class ReplicationPeerConfig {
 : null;
 this.bandwidth = builder.bandwidth;
 this.serial = builder.serial;
+this.remoteWALDir = builder.remoteWALDir;
   }
 
   private Map>
@@ -213,6 +216,10 @@ public class ReplicationPeerConfig {
 return this;
   }
 
+  public String getRemoteWALDir() {
+return this.remoteWALDir;
+  }
+
   public static ReplicationPeerConfigBuilder newBuilder() {
 return new ReplicationPeerConfigBuilderImpl();
   }
@@ -230,7 +237,8 @@ public class ReplicationPeerConfig {
   .setReplicateAllUserTables(peerConfig.replicateAllUserTables())
   .setExcludeTableCFsMap(peerConfig.getExcludeTableCFsMap())
   .setExcludeNamespaces(peerConfig.getExcludeNamespaces())
-  
.setBandwidth(peerConfig.getBandwidth()).setSerial(peerConfig.isSerial());
+  .setBandwidth(peerConfig.getBandwidth()).setSerial(peerConfig.isSerial())
+  .setRemoteWALDir(peerConfig.getRemoteWALDir());
 return builder;
   }
 
@@ -259,6 +267,8 @@ public class ReplicationPeerConfig {
 
 private boolean serial = false;
 
+private String remoteWALDir = null;
+
 @Override
 public ReplicationPeerConfigBuilder setClusterKey(String clusterKey) {
   this.clusterKey = clusterKey;
@@ -327,6 +337,11 @@ public

[07/29] hbase git commit: HBASE-19083 Introduce a new log writer which can write to two HDFSes

2018-05-15 Thread zhangduo
HBASE-19083 Introduce a new log writer which can write to two HDFSes


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0c5193fe
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0c5193fe
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0c5193fe

Branch: refs/heads/HBASE-19064
Commit: 0c5193fe4c26d38ab5bfb72d98c263df6b879dc7
Parents: ab53329
Author: zhangduo 
Authored: Thu Jan 11 21:08:02 2018 +0800
Committer: zhangduo 
Committed: Wed May 16 09:06:30 2018 +0800

--
 .../hbase/regionserver/wal/AsyncFSWAL.java  |  21 +--
 .../regionserver/wal/CombinedAsyncWriter.java   | 134 ++
 .../hbase/regionserver/wal/DualAsyncFSWAL.java  |  67 +
 .../wal/AbstractTestProtobufLog.java| 110 +++
 .../regionserver/wal/ProtobufLogTestHelper.java |  99 ++
 .../regionserver/wal/TestAsyncProtobufLog.java  |  32 +
 .../wal/TestCombinedAsyncWriter.java| 136 +++
 .../hbase/regionserver/wal/TestProtobufLog.java |  14 +-
 .../regionserver/wal/WriterOverAsyncWriter.java |  63 +
 9 files changed, 533 insertions(+), 143 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0c5193fe/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
index e34818f..0bee9d6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
@@ -607,12 +607,16 @@ public class AsyncFSWAL extends 
AbstractFSWAL {
 }
   }
 
-  @Override
-  protected AsyncWriter createWriterInstance(Path path) throws IOException {
+  protected final AsyncWriter createAsyncWriter(FileSystem fs, Path path) 
throws IOException {
 return AsyncFSWALProvider.createAsyncWriter(conf, fs, path, false, 
eventLoopGroup,
   channelClass);
   }
 
+  @Override
+  protected AsyncWriter createWriterInstance(Path path) throws IOException {
+return createAsyncWriter(fs, path);
+  }
+
   private void waitForSafePoint() {
 consumeLock.lock();
 try {
@@ -632,13 +636,12 @@ public class AsyncFSWAL extends 
AbstractFSWAL {
 }
   }
 
-  private long closeWriter() {
-AsyncWriter oldWriter = this.writer;
-if (oldWriter != null) {
-  long fileLength = oldWriter.getLength();
+  protected final long closeWriter(AsyncWriter writer) {
+if (writer != null) {
+  long fileLength = writer.getLength();
   closeExecutor.execute(() -> {
 try {
-  oldWriter.close();
+  writer.close();
 } catch (IOException e) {
   LOG.warn("close old writer failed", e);
 }
@@ -654,7 +657,7 @@ public class AsyncFSWAL extends AbstractFSWAL {
   throws IOException {
 Preconditions.checkNotNull(nextWriter);
 waitForSafePoint();
-long oldFileLen = closeWriter();
+long oldFileLen = closeWriter(this.writer);
 logRollAndSetupWalProps(oldPath, newPath, oldFileLen);
 this.writer = nextWriter;
 if (nextWriter instanceof AsyncProtobufLogWriter) {
@@ -679,7 +682,7 @@ public class AsyncFSWAL extends AbstractFSWAL {
   @Override
   protected void doShutdown() throws IOException {
 waitForSafePoint();
-closeWriter();
+closeWriter(this.writer);
 closeExecutor.shutdown();
 try {
   if (!closeExecutor.awaitTermination(waitOnShutdownInSeconds, 
TimeUnit.SECONDS)) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/0c5193fe/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CombinedAsyncWriter.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CombinedAsyncWriter.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CombinedAsyncWriter.java
new file mode 100644
index 000..8ecfede
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CombinedAsyncWriter.java
@@ -0,0 +1,134 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by app

[12/29] hbase git commit: HBASE-19857 Complete the procedure for adding a sync replication peer

2018-05-15 Thread zhangduo
HBASE-19857 Complete the procedure for adding a sync replication peer


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c9ee48c3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c9ee48c3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c9ee48c3

Branch: refs/heads/HBASE-19064
Commit: c9ee48c377a14c1bd50a9604a6136dc42459f7eb
Parents: 39cef39
Author: zhangduo 
Authored: Thu Jan 25 20:09:00 2018 +0800
Committer: zhangduo 
Committed: Wed May 16 09:06:30 2018 +0800

--
 .../hbase/replication/ReplicationPeer.java  |   9 +
 .../hbase/replication/ReplicationPeerImpl.java  |  28 +--
 .../hbase/replication/ReplicationPeers.java |   3 +-
 .../regionserver/PeerActionListener.java|  10 +-
 .../SyncReplicationPeerProvider.java|  35 +++
 .../SynchronousReplicationPeerProvider.java |  35 ---
 .../hbase/wal/SyncReplicationWALProvider.java   | 234 +++
 .../wal/SynchronousReplicationWALProvider.java  | 225 --
 .../org/apache/hadoop/hbase/wal/WALFactory.java |   8 +-
 .../TestReplicationSourceManager.java   |   3 +
 .../wal/TestSyncReplicationWALProvider.java | 153 
 .../TestSynchronousReplicationWALProvider.java  | 153 
 12 files changed, 456 insertions(+), 440 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c9ee48c3/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
index 2da3cce..0196a9a 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
@@ -54,6 +54,15 @@ public interface ReplicationPeer {
   PeerState getPeerState();
 
   /**
+   * Returns the sync replication state of the peer by reading local cache.
+   * 
+   * If the peer is not a synchronous replication peer, a {@link 
SyncReplicationState#NONE} will be
+   * returned.
+   * @return the sync replication state
+   */
+  SyncReplicationState getSyncReplicationState();
+
+  /**
* Test whether the peer is enabled.
* @return {@code true} if enabled, otherwise {@code false}.
*/

http://git-wip-us.apache.org/repos/asf/hbase/blob/c9ee48c3/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
index d656466..ff3f662 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
@@ -36,6 +36,8 @@ public class ReplicationPeerImpl implements ReplicationPeer {
 
   private volatile PeerState peerState;
 
+  private volatile SyncReplicationState syncReplicationState;
+
   private final List peerConfigListeners;
 
   /**
@@ -45,12 +47,13 @@ public class ReplicationPeerImpl implements ReplicationPeer 
{
* @param id string representation of this peer's identifier
* @param peerConfig configuration for the replication peer
*/
-  public ReplicationPeerImpl(Configuration conf, String id, boolean peerState,
-  ReplicationPeerConfig peerConfig) {
+  public ReplicationPeerImpl(Configuration conf, String id, 
ReplicationPeerConfig peerConfig,
+  boolean peerState, SyncReplicationState syncReplicationState) {
 this.conf = conf;
 this.id = id;
 this.peerState = peerState ? PeerState.ENABLED : PeerState.DISABLED;
 this.peerConfig = peerConfig;
+this.syncReplicationState = syncReplicationState;
 this.peerConfigListeners = new ArrayList<>();
   }
 
@@ -77,37 +80,26 @@ public class ReplicationPeerImpl implements ReplicationPeer 
{
 return peerState;
   }
 
-  /**
-   * Get the peer config object
-   * @return the ReplicationPeerConfig for this peer
-   */
+  @Override
+  public SyncReplicationState getSyncReplicationState() {
+return syncReplicationState;
+  }
+
   @Override
   public ReplicationPeerConfig getPeerConfig() {
 return peerConfig;
   }
 
-  /**
-   * Get the configuration object required to communicate with this peer
-   * @return configuration object
-   */
   @Override
   public Configuration getConfiguration() {
 return conf;
   }
 
-  /**
-   * Get replicable (table, cf-list) map of

[21/29] hbase git commit: HBASE-19079 Support setting up two clusters with A and S stat

2018-05-15 Thread zhangduo
HBASE-19079 Support setting up two clusters with A and S stat


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0cf2aafc
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0cf2aafc
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0cf2aafc

Branch: refs/heads/HBASE-19064
Commit: 0cf2aafcddd637b08d82007f86d955c4689a76cf
Parents: d303ea9
Author: zhangduo 
Authored: Tue Apr 10 22:35:19 2018 +0800
Committer: zhangduo 
Committed: Wed May 16 09:06:30 2018 +0800

--
 .../replication/ReplicationPeerManager.java |   5 +-
 ...ransitPeerSyncReplicationStateProcedure.java |   2 +-
 .../hbase/regionserver/wal/DualAsyncFSWAL.java  |  14 ++
 .../hadoop/hbase/regionserver/wal/WALUtil.java  |  25 ++-
 .../hbase/replication/ChainWALEntryFilter.java  |  28 +--
 .../ReplaySyncReplicationWALCallable.java   |  27 ++-
 .../SyncReplicationPeerInfoProviderImpl.java|   6 +-
 .../hadoop/hbase/wal/AbstractFSWALProvider.java |  10 +-
 .../hbase/wal/SyncReplicationWALProvider.java   |  94 ++---
 .../org/apache/hadoop/hbase/wal/WALEdit.java|   8 +-
 .../org/apache/hadoop/hbase/wal/WALFactory.java |   2 +-
 .../replication/TestReplicationAdmin.java   |  33 +--
 .../regionserver/wal/TestWALDurability.java |   2 +
 .../replication/SyncReplicationTestBase.java| 185 +
 .../hbase/replication/TestSyncReplication.java  | 207 ---
 .../replication/TestSyncReplicationActive.java  |  64 ++
 .../replication/TestSyncReplicationStandBy.java |  96 +
 17 files changed, 521 insertions(+), 287 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0cf2aafc/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
index 41dd6e3..229549e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
@@ -68,8 +68,9 @@ public class ReplicationPeerManager {
 
   private final ImmutableMap>
 allowedTransition = 
Maps.immutableEnumMap(ImmutableMap.of(SyncReplicationState.ACTIVE,
-  EnumSet.of(SyncReplicationState.DOWNGRADE_ACTIVE), 
SyncReplicationState.STANDBY,
-  EnumSet.of(SyncReplicationState.DOWNGRADE_ACTIVE), 
SyncReplicationState.DOWNGRADE_ACTIVE,
+  EnumSet.of(SyncReplicationState.DOWNGRADE_ACTIVE, 
SyncReplicationState.STANDBY),
+  SyncReplicationState.STANDBY, 
EnumSet.of(SyncReplicationState.DOWNGRADE_ACTIVE),
+  SyncReplicationState.DOWNGRADE_ACTIVE,
   EnumSet.of(SyncReplicationState.STANDBY, SyncReplicationState.ACTIVE)));
 
   ReplicationPeerManager(ReplicationPeerStorage peerStorage, 
ReplicationQueueStorage queueStorage,

http://git-wip-us.apache.org/repos/asf/hbase/blob/0cf2aafc/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
index cc51890..5da2b0c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
@@ -171,7 +171,7 @@ public class TransitPeerSyncReplicationStateProcedure
 }
 return Flow.HAS_MORE_STATE;
   case REPLAY_REMOTE_WAL_IN_PEER:
-// TODO: replay remote wal when transiting from S to DA.
+addChildProcedure(new RecoverStandbyProcedure(peerId));
 
setNextState(PeerSyncReplicationStateTransitionState.REOPEN_ALL_REGIONS_IN_PEER);
 return Flow.HAS_MORE_STATE;
   case REOPEN_ALL_REGIONS_IN_PEER:

http://git-wip-us.apache.org/repos/asf/hbase/blob/0cf2aafc/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java
index 0495337..a98567a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyn

[26/29] hbase git commit: HBASE-20425 Do not write the cluster id of the current active cluster when writing remote WAL

2018-05-15 Thread zhangduo
HBASE-20425 Do not write the cluster id of the current active cluster when 
writing remote WAL


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9ed1f4eb
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9ed1f4eb
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9ed1f4eb

Branch: refs/heads/HBASE-19064
Commit: 9ed1f4eb869bfdade1866124eabc7a64eee7b98c
Parents: 5bc7392
Author: huzheng 
Authored: Mon Apr 23 17:20:55 2018 +0800
Committer: zhangduo 
Committed: Wed May 16 09:06:30 2018 +0800

--
 .../replication/TestSyncReplicationActive.java  | 32 
 1 file changed, 32 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9ed1f4eb/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActive.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActive.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActive.java
index bff4572..f9020a0 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActive.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActive.java
@@ -17,9 +17,17 @@
  */
 package org.apache.hadoop.hbase.replication;
 
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
+import org.apache.hadoop.hbase.wal.WAL.Entry;
+import org.apache.hadoop.hbase.wal.WAL.Reader;
+import org.apache.hadoop.hbase.wal.WALFactory;
+import org.junit.Assert;
 import org.junit.ClassRule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -49,6 +57,9 @@ public class TestSyncReplicationActive extends 
SyncReplicationTestBase {
 // peer is disabled so no data have been replicated
 verifyNotReplicatedThroughRegion(UTIL2, 0, 100);
 
+// Ensure that there's no cluster id in remote log entries.
+verifyNoClusterIdInRemoteLog(UTIL2, PEER_ID);
+
 UTIL2.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID,
   SyncReplicationState.DOWNGRADE_ACTIVE);
 // confirm that peer with state DA will reject replication request.
@@ -72,4 +83,25 @@ public class TestSyncReplicationActive extends 
SyncReplicationTestBase {
 verifyReplicationRequestRejection(UTIL2, true);
 write(UTIL2, 200, 300);
   }
+
+  private void verifyNoClusterIdInRemoteLog(HBaseTestingUtility utility, 
String peerId)
+  throws Exception {
+FileSystem fs2 = utility.getTestFileSystem();
+Path remoteDir =
+new 
Path(utility.getMiniHBaseCluster().getMaster().getMasterFileSystem().getRootDir(),
+"remoteWALs").makeQualified(fs2.getUri(), 
fs2.getWorkingDirectory());
+FileStatus[] files = fs2.listStatus(new Path(remoteDir, peerId));
+Assert.assertTrue(files.length > 0);
+for (FileStatus file : files) {
+  try (Reader reader =
+  WALFactory.createReader(fs2, file.getPath(), 
utility.getConfiguration())) {
+Entry entry = reader.next();
+Assert.assertTrue(entry != null);
+while (entry != null) {
+  Assert.assertEquals(entry.getKey().getClusterIds().size(), 0);
+  entry = reader.next();
+}
+  }
+}
+  }
 }



[27/29] hbase git commit: HBASE-20458 Support removing a WAL from LogRoller

2018-05-15 Thread zhangduo
HBASE-20458 Support removing a WAL from LogRoller


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4f4f0c7e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4f4f0c7e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4f4f0c7e

Branch: refs/heads/HBASE-19064
Commit: 4f4f0c7e03fb89d47934c38f286a5fbc3ce5e34c
Parents: 06fa05d
Author: Guanghao Zhang 
Authored: Mon Apr 23 16:31:54 2018 +0800
Committer: zhangduo 
Committed: Wed May 16 09:06:30 2018 +0800

--
 .../hadoop/hbase/regionserver/LogRoller.java| 29 +--
 .../hbase/regionserver/wal/AbstractFSWAL.java   |  7 +-
 .../regionserver/wal/WALClosedException.java| 47 ++
 .../hbase/regionserver/TestLogRoller.java   | 90 
 .../regionserver/wal/AbstractTestFSWAL.java |  9 ++
 5 files changed, 171 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4f4f0c7e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java
index 55c5219..ab0083f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.hbase.regionserver;
 
 import java.io.Closeable;
 import java.io.IOException;
+import java.util.Iterator;
+import java.util.Map;
 import java.util.Map.Entry;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.atomic.AtomicBoolean;
@@ -30,6 +32,7 @@ import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL;
 import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
 import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
+import org.apache.hadoop.hbase.regionserver.wal.WALClosedException;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.HasThread;
 import org.apache.hadoop.hbase.wal.WAL;
@@ -177,17 +180,24 @@ public class LogRoller extends HasThread implements 
Closeable {
   rollLock.lock(); // FindBugs UL_UNRELEASED_LOCK_EXCEPTION_PATH
   try {
 this.lastrolltime = now;
-for (Entry entry : walNeedsRoll.entrySet()) {
+for (Iterator> iter = 
walNeedsRoll.entrySet().iterator(); iter
+.hasNext();) {
+  Entry entry = iter.next();
   final WAL wal = entry.getKey();
   // Force the roll if the logroll.period is elapsed or if a roll was 
requested.
   // The returned value is an array of actual region names.
-  final byte [][] regionsToFlush = wal.rollWriter(periodic ||
-  entry.getValue().booleanValue());
-  walNeedsRoll.put(wal, Boolean.FALSE);
-  if (regionsToFlush != null) {
-for (byte[] r : regionsToFlush) {
-  scheduleFlush(r);
+  try {
+final byte[][] regionsToFlush =
+wal.rollWriter(periodic || entry.getValue().booleanValue());
+walNeedsRoll.put(wal, Boolean.FALSE);
+if (regionsToFlush != null) {
+  for (byte[] r : regionsToFlush) {
+scheduleFlush(r);
+  }
 }
+  } catch (WALClosedException e) {
+LOG.warn("WAL has been closed. Skipping rolling of writer and just 
remove it", e);
+iter.remove();
   }
 }
   } catch (FailedLogCloseException e) {
@@ -252,4 +262,9 @@ public class LogRoller extends HasThread implements 
Closeable {
 running = false;
 interrupt();
   }
+
+  @VisibleForTesting
+  Map getWalNeedsRoll() {
+return this.walNeedsRoll;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/4f4f0c7e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
index 4816d77..2c0c72b1 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
@@ -750,15 +750,14 @@ public abstract class AbstractFSWAL 
implements WAL {
   public byte[][] rollWriter(boolean force) throws FailedLogCloseException, 
IOException {
 rollWriterLock.lock();
 try {
+  if (this.closed) {
+throw new WALClosedExcept

[25/29] hbase git commit: HBASE-20576 Check remote WAL directory when creating peer and transiting peer to A

2018-05-15 Thread zhangduo
HBASE-20576 Check remote WAL directory when creating peer and transiting peer 
to A


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f60a710e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f60a710e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f60a710e

Branch: refs/heads/HBASE-19064
Commit: f60a710ef600f20d8d2915c09b52e20ac89c320f
Parents: 6e40908
Author: zhangduo 
Authored: Tue May 15 15:07:40 2018 +0800
Committer: zhangduo 
Committed: Wed May 16 09:06:30 2018 +0800

--
 .../replication/ReplicationPeerManager.java | 19 +++--
 ...ransitPeerSyncReplicationStateProcedure.java | 73 +---
 .../replication/TestReplicationAdmin.java   | 57 ---
 3 files changed, 110 insertions(+), 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f60a710e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
index e1d8b51..8e49137 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hbase.master.replication;
 
 import java.io.IOException;
+import java.net.URI;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.EnumSet;
@@ -31,6 +32,7 @@ import java.util.regex.Pattern;
 import java.util.stream.Collectors;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
@@ -45,7 +47,6 @@ import 
org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
 import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
 import org.apache.hadoop.hbase.replication.ReplicationUtils;
 import org.apache.hadoop.hbase.replication.SyncReplicationState;
-import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.zookeeper.ZKConfig;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -193,9 +194,9 @@ public class ReplicationPeerManager {
   }
 
   /**
-   * @return the old state, and whether the peer is enabled.
+   * @return the old desciption of the peer
*/
-  Pair 
preTransitPeerSyncReplicationState(String peerId,
+  ReplicationPeerDescription preTransitPeerSyncReplicationState(String peerId,
   SyncReplicationState state) throws DoNotRetryIOException {
 ReplicationPeerDescription desc = checkPeerExists(peerId);
 SyncReplicationState fromState = desc.getSyncReplicationState();
@@ -204,7 +205,7 @@ public class ReplicationPeerManager {
   throw new DoNotRetryIOException("Can not transit current cluster state 
from " + fromState +
 " to " + state + " for peer id=" + peerId);
 }
-return Pair.newPair(fromState, desc.isEnabled());
+return desc;
   }
 
   public void addPeer(String peerId, ReplicationPeerConfig peerConfig, boolean 
enabled)
@@ -384,6 +385,16 @@ public class ReplicationPeerManager {
   "Only support replicated table config for sync replication peer");
   }
 }
+Path remoteWALDir = new Path(peerConfig.getRemoteWALDir());
+if (!remoteWALDir.isAbsolute()) {
+  throw new DoNotRetryIOException(
+"The remote WAL directory " + peerConfig.getRemoteWALDir() + " is not 
absolute");
+}
+URI remoteWALDirUri = remoteWALDir.toUri();
+if (remoteWALDirUri.getScheme() == null || remoteWALDirUri.getAuthority() 
== null) {
+  throw new DoNotRetryIOException("The remote WAL directory " + 
peerConfig.getRemoteWALDir() +
+" is not qualified, you must provide scheme and authority");
+}
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/f60a710e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
index 0175296..ebe7a93 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
+++ 
b

[28/29] hbase git commit: HBASE-19865 Add UT for sync replication peer in DA state

2018-05-15 Thread zhangduo
HBASE-19865 Add UT for sync replication peer in DA state


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6e40908d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6e40908d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6e40908d

Branch: refs/heads/HBASE-19064
Commit: 6e40908defd18fc26e0f72dc1faacd3bedd48f73
Parents: a0e6719
Author: zhangduo 
Authored: Tue May 8 20:33:22 2018 +0800
Committer: zhangduo 
Committed: Wed May 16 09:06:30 2018 +0800

--
 .../hbase/replication/TestReplicationBase.java  | 28 +++---
 ...estReplicationChangingPeerRegionservers.java | 20 ++
 .../TestReplicationSmallTestsSync.java  | 40 
 3 files changed, 76 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6e40908d/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java
index f96dbe5..cd84293 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java
@@ -1,5 +1,4 @@
-/*
- *
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -28,6 +27,8 @@ import java.util.List;
 import java.util.NavigableMap;
 import java.util.TreeMap;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
@@ -58,6 +59,9 @@ import org.junit.BeforeClass;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
+import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap;
+
 /**
  * This class is only a base for other integration-level replication tests.
  * Do not add tests here.
@@ -99,6 +103,10 @@ public class TestReplicationBase {
 return false;
   }
 
+  protected boolean isSyncPeer() {
+return false;
+  }
+
   protected final void cleanUp() throws IOException, InterruptedException {
 // Starting and stopping replication can make us miss new logs,
 // rolling like this makes sure the most recent one gets added to the queue
@@ -245,9 +253,19 @@ public class TestReplicationBase {
   @Before
   public void setUpBase() throws Exception {
 if (!peerExist(PEER_ID2)) {
-  ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder()
-  
.setClusterKey(utility2.getClusterKey()).setSerial(isSerialPeer()).build();
-  hbaseAdmin.addReplicationPeer(PEER_ID2, rpc);
+  ReplicationPeerConfigBuilder builder = ReplicationPeerConfig.newBuilder()
+.setClusterKey(utility2.getClusterKey()).setSerial(isSerialPeer());
+  if (isSyncPeer()) {
+FileSystem fs2 = utility2.getTestFileSystem();
+// The remote wal dir is not important as we do not use it in DA 
state, here we only need to
+// confirm that a sync peer in DA state can still replicate data to 
remote cluster
+// asynchronously.
+builder.setReplicateAllUserTables(false)
+  .setTableCFsMap(ImmutableMap.of(tableName, ImmutableList.of()))
+  .setRemoteWALDir(new Path("/RemoteWAL")
+.makeQualified(fs2.getUri(), 
fs2.getWorkingDirectory()).toUri().toString());
+  }
+  hbaseAdmin.addReplicationPeer(PEER_ID2, builder.build());
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/6e40908d/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationChangingPeerRegionservers.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationChangingPeerRegionservers.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationChangingPeerRegionservers.java
index b94b443..5c96742 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationChangingPeerRegionservers.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationChangingPeerRegionservers.java
@@ -62,22 +62,28 @@ public class TestReplicationChangingPeerRegionservers 
extends TestReplicationBas
   private static final Logger LOG =
   LoggerFactory.getLogger(TestReplica

[09/29] hbase git commit: HBASE-19747 Introduce a special WALProvider for synchronous replication

2018-05-15 Thread zhangduo
HBASE-19747 Introduce a special WALProvider for synchronous replication


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3b55ac4f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3b55ac4f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3b55ac4f

Branch: refs/heads/HBASE-19064
Commit: 3b55ac4fa5f27bd5a180c47291b92f2e73295e24
Parents: 06c1f2c
Author: zhangduo 
Authored: Fri Jan 19 18:38:39 2018 +0800
Committer: zhangduo 
Committed: Wed May 16 09:06:30 2018 +0800

--
 .../hbase/regionserver/wal/AbstractFSWAL.java   |   7 +
 .../hbase/regionserver/wal/AsyncFSWAL.java  |   1 -
 .../hbase/regionserver/wal/DualAsyncFSWAL.java  |   4 +-
 .../hadoop/hbase/regionserver/wal/FSHLog.java   |   4 -
 .../regionserver/PeerActionListener.java|  33 +++
 .../SynchronousReplicationPeerProvider.java |  35 +++
 .../hadoop/hbase/wal/AbstractFSWALProvider.java |   1 +
 .../hadoop/hbase/wal/AsyncFSWALProvider.java|  18 +-
 .../hbase/wal/NettyAsyncFSWALConfigHelper.java  |   8 +-
 .../hbase/wal/RegionGroupingProvider.java   |  13 +-
 .../wal/SynchronousReplicationWALProvider.java  | 225 +++
 .../org/apache/hadoop/hbase/wal/WALFactory.java |  37 ++-
 .../org/apache/hadoop/hbase/wal/WALKeyImpl.java |  16 +-
 .../regionserver/TestCompactionPolicy.java  |   1 +
 .../regionserver/TestFailedAppendAndSync.java   | 122 +-
 .../hadoop/hbase/regionserver/TestHRegion.java  |  24 +-
 .../TestHRegionWithInMemoryFlush.java   |   7 -
 .../hbase/regionserver/TestRegionIncrement.java |  20 +-
 .../hbase/regionserver/TestWALLockup.java   |   1 +
 .../regionserver/wal/AbstractTestWALReplay.java |   1 +
 .../regionserver/wal/ProtobufLogTestHelper.java |  44 +++-
 .../hbase/regionserver/wal/TestAsyncFSWAL.java  |  13 +-
 .../regionserver/wal/TestAsyncWALReplay.java|   4 +-
 .../wal/TestCombinedAsyncWriter.java|   3 +-
 .../hbase/regionserver/wal/TestFSHLog.java  |  15 +-
 .../hbase/regionserver/wal/TestWALReplay.java   |   1 +
 .../apache/hadoop/hbase/wal/IOTestProvider.java |   2 -
 .../TestSynchronousReplicationWALProvider.java  | 153 +
 28 files changed, 659 insertions(+), 154 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3b55ac4f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
index ce8dafa..4816d77 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
@@ -430,6 +430,13 @@ public abstract class AbstractFSWAL 
implements WAL {
 this.implClassName = getClass().getSimpleName();
   }
 
+  /**
+   * Used to initialize the WAL. Usually just call rollWriter to create the 
first log writer.
+   */
+  public void init() throws IOException {
+rollWriter();
+  }
+
   @Override
   public void registerWALActionsListener(WALActionsListener listener) {
 this.listeners.add(listener);

http://git-wip-us.apache.org/repos/asf/hbase/blob/3b55ac4f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
index 0bee9d6..17133ed 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
@@ -248,7 +248,6 @@ public class AsyncFSWAL extends AbstractFSWAL {
 batchSize = conf.getLong(WAL_BATCH_SIZE, DEFAULT_WAL_BATCH_SIZE);
 waitOnShutdownInSeconds = 
conf.getInt(ASYNC_WAL_WAIT_ON_SHUTDOWN_IN_SECONDS,
   DEFAULT_ASYNC_WAL_WAIT_ON_SHUTDOWN_IN_SECONDS);
-rollWriter();
   }
 
   private static boolean waitingRoll(int epochAndState) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/3b55ac4f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java
index 42b0dae..0495337 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java
+++ 
b/hbase-ser

[10/29] hbase git commit: HBASE-19781 Add a new cluster state flag for synchronous replication

2018-05-15 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase/blob/39cef397/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
index 8911982..f5eca39 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
@@ -28,6 +28,7 @@ import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.ReplicationPeerStorage;
 import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
 import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
 import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE;
@@ -67,9 +68,9 @@ public class TestHBaseFsckReplication {
 String peerId1 = "1";
 String peerId2 = "2";
 peerStorage.addPeer(peerId1, 
ReplicationPeerConfig.newBuilder().setClusterKey("key").build(),
-  true);
+  true, SyncReplicationState.NONE);
 peerStorage.addPeer(peerId2, 
ReplicationPeerConfig.newBuilder().setClusterKey("key").build(),
-  true);
+  true, SyncReplicationState.NONE);
 for (int i = 0; i < 10; i++) {
   queueStorage.addWAL(ServerName.valueOf("localhost", 1 + i, 10 + 
i), peerId1,
 "file-" + i);

http://git-wip-us.apache.org/repos/asf/hbase/blob/39cef397/hbase-shell/src/main/ruby/hbase/replication_admin.rb
--
diff --git a/hbase-shell/src/main/ruby/hbase/replication_admin.rb 
b/hbase-shell/src/main/ruby/hbase/replication_admin.rb
index d1f1344..5f86365 100644
--- a/hbase-shell/src/main/ruby/hbase/replication_admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/replication_admin.rb
@@ -20,6 +20,7 @@
 include Java
 
 java_import 
org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil
+java_import org.apache.hadoop.hbase.replication.SyncReplicationState
 java_import org.apache.hadoop.hbase.replication.ReplicationPeerConfig
 java_import org.apache.hadoop.hbase.util.Bytes
 java_import org.apache.hadoop.hbase.zookeeper.ZKConfig
@@ -338,6 +339,20 @@ module Hbase
   '!' + ReplicationPeerConfigUtil.convertToString(tableCFs)
 end
 
+# Transit current cluster to a new state in the specified synchronous
+# replication peer
+def transit_peer_sync_replication_state(id, state)
+  if 'ACTIVE'.eql?(state)
+@admin.transitReplicationPeerSyncReplicationState(id, 
SyncReplicationState::ACTIVE)
+  elsif 'DOWNGRADE_ACTIVE'.eql?(state)
+@admin.transitReplicationPeerSyncReplicationState(id, 
SyncReplicationState::DOWNGRADE_ACTIVE)
+  elsif 'STANDBY'.eql?(state)
+@admin.transitReplicationPeerSyncReplicationState(id, 
SyncReplicationState::STANDBY)
+  else
+raise(ArgumentError, 'synchronous replication state must be ACTIVE, 
DOWNGRADE_ACTIVE or STANDBY')
+  end
+end
+
 
#--
 # Enables a table's replication switch
 def enable_tablerep(table_name)

http://git-wip-us.apache.org/repos/asf/hbase/blob/39cef397/hbase-shell/src/main/ruby/shell.rb
--
diff --git a/hbase-shell/src/main/ruby/shell.rb 
b/hbase-shell/src/main/ruby/shell.rb
index 9a79658..934fa11 100644
--- a/hbase-shell/src/main/ruby/shell.rb
+++ b/hbase-shell/src/main/ruby/shell.rb
@@ -393,6 +393,7 @@ Shell.load_command_group(
 get_peer_config
 list_peer_configs
 update_peer_config
+transit_peer_sync_replication_state
   ]
 )
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/39cef397/hbase-shell/src/main/ruby/shell/commands/list_peers.rb
--
diff --git a/hbase-shell/src/main/ruby/shell/commands/list_peers.rb 
b/hbase-shell/src/main/ruby/shell/commands/list_peers.rb
index f3ab749..f2ec014 100644
--- a/hbase-shell/src/main/ruby/shell/commands/list_peers.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/list_peers.rb
@@ -39,8 +39,8 @@ EOF
 peers = replication_admin.list_peers
 
 formatter.header(%w[PEER_ID CLUSTER_KEY ENDPOINT_CLASSNAME
-REMOTE_ROOT_DIR STATE REPLICATE_ALL 
-NAMESPACES TABLE_CFS BANDWIDTH
+REMOTE_ROOT_DIR SYNC_REPLICATION_STATE STATE
+REPLICATE_ALL NAMESPACES TABLE_CFS BANDWIDTH

[11/29] hbase git commit: HBASE-19781 Add a new cluster state flag for synchronous replication

2018-05-15 Thread zhangduo
HBASE-19781 Add a new cluster state flag for synchronous replication


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/39cef397
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/39cef397
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/39cef397

Branch: refs/heads/HBASE-19064
Commit: 39cef397af914386232311f9adfa070310cb0ae6
Parents: 3b55ac4
Author: Guanghao Zhang 
Authored: Mon Jan 22 11:44:49 2018 +0800
Committer: zhangduo 
Committed: Wed May 16 09:06:30 2018 +0800

--
 .../org/apache/hadoop/hbase/client/Admin.java   |  39 +
 .../apache/hadoop/hbase/client/AsyncAdmin.java  |  31 
 .../hadoop/hbase/client/AsyncHBaseAdmin.java|   7 +
 .../hbase/client/ConnectionImplementation.java  |   9 ++
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |  26 +++
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java |  15 ++
 .../client/ShortCircuitMasterConnection.java|   9 ++
 .../replication/ReplicationPeerConfigUtil.java  |  26 +--
 .../replication/ReplicationPeerDescription.java |  10 +-
 .../hbase/replication/SyncReplicationState.java |  48 ++
 .../hbase/shaded/protobuf/RequestConverter.java |  10 ++
 .../src/main/protobuf/Master.proto  |   4 +
 .../src/main/protobuf/MasterProcedure.proto |   4 +
 .../src/main/protobuf/Replication.proto |  20 +++
 .../replication/ReplicationPeerStorage.java |  18 ++-
 .../hbase/replication/ReplicationUtils.java |   1 +
 .../replication/ZKReplicationPeerStorage.java   |  61 +--
 .../replication/TestReplicationStateBasic.java  |  23 ++-
 .../TestZKReplicationPeerStorage.java   |  12 +-
 .../hbase/coprocessor/MasterObserver.java   |  23 +++
 .../org/apache/hadoop/hbase/master/HMaster.java |  12 ++
 .../hbase/master/MasterCoprocessorHost.java |  21 +++
 .../hadoop/hbase/master/MasterRpcServices.java  |  17 ++
 .../hadoop/hbase/master/MasterServices.java |   9 ++
 .../procedure/PeerProcedureInterface.java   |   2 +-
 .../replication/ReplicationPeerManager.java |  51 +-
 ...ransitPeerSyncReplicationStateProcedure.java | 159 +++
 .../hbase/security/access/AccessController.java |   8 +
 .../replication/TestReplicationAdmin.java   |  62 
 .../hbase/master/MockNoopMasterServices.java|   8 +-
 .../cleaner/TestReplicationHFileCleaner.java|   4 +-
 .../TestReplicationTrackerZKImpl.java   |   6 +-
 .../TestReplicationSourceManager.java   |   3 +-
 .../security/access/TestAccessController.java   |  16 ++
 .../hbase/util/TestHBaseFsckReplication.java|   5 +-
 .../src/main/ruby/hbase/replication_admin.rb|  15 ++
 hbase-shell/src/main/ruby/shell.rb  |   1 +
 .../src/main/ruby/shell/commands/list_peers.rb  |   6 +-
 .../transit_peer_sync_replication_state.rb  |  44 +
 .../test/ruby/hbase/replication_admin_test.rb   |  24 +++
 40 files changed, 816 insertions(+), 53 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/39cef397/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index 331f2d1..39542e4 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -51,6 +51,7 @@ import 
org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
 import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
 import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
 import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
@@ -2657,6 +2658,44 @@ public interface Admin extends Abortable, Closeable {
   List listReplicationPeers(Pattern pattern) 
throws IOException;
 
   /**
+   * Transit current cluster to a new state in a synchronous replication peer.
+   * @param peerId a short name that identifies the peer
+   * @param state a new state of current cluster
+   * @throws IOException if a remote or network exception occurs
+   */
+  void transitReplicationPeerSyncReplicationState(String peerId, 
SyncReplicationState state)
+  throws IOException;
+
+  /**
+   * Transit current cluster to a new state in a synchronous replication peer. 
But does not block
+   * and wait for it.
+   * 
+   * You can use Future.get(long, TimeUnit) to wait on the operation to 
complete. It may throw
+   * Exec

[22/29] hbase git commit: HBASE-20163 Forbid major compaction when standby cluster replay the remote wals

2018-05-15 Thread zhangduo
HBASE-20163 Forbid major compaction when standby cluster replay the remote wals


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a7693675
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a7693675
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a7693675

Branch: refs/heads/HBASE-19064
Commit: a76936756b10942925aae6c88f7d6bc5cdadbd68
Parents: 0cf2aaf
Author: Guanghao Zhang 
Authored: Thu Apr 12 14:44:25 2018 +0800
Committer: zhangduo 
Committed: Wed May 16 09:06:30 2018 +0800

--
 .../hadoop/hbase/regionserver/HRegion.java  | 18 
 .../hbase/regionserver/HRegionServer.java   |  2 +-
 .../regionserver/RegionServerServices.java  |  5 +++
 .../ForbidMajorCompactionChecker.java   | 44 
 .../hadoop/hbase/MockRegionServerServices.java  |  6 +++
 .../hadoop/hbase/master/MockRegionServer.java   |  6 +++
 6 files changed, 80 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a7693675/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index d86565e..6aa4b27 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -144,6 +144,7 @@ import 
org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope;
 import org.apache.hadoop.hbase.regionserver.ScannerContext.NextState;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
 import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
+import 
org.apache.hadoop.hbase.regionserver.compactions.ForbidMajorCompactionChecker;
 import 
org.apache.hadoop.hbase.regionserver.throttle.CompactionThroughputControllerFactory;
 import 
org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController;
 import org.apache.hadoop.hbase.regionserver.throttle.StoreHotnessProtector;
@@ -1980,6 +1981,14 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 return compact(compaction, store, throughputController, null);
   }
 
+  private boolean shouldForbidMajorCompaction() {
+if (rsServices != null && rsServices.getReplicationSourceService() != 
null) {
+  return 
rsServices.getReplicationSourceService().getSyncReplicationPeerInfoProvider()
+  .checkState(getRegionInfo(), ForbidMajorCompactionChecker.get());
+}
+return false;
+  }
+
   public boolean compact(CompactionContext compaction, HStore store,
   ThroughputController throughputController, User user) throws IOException 
{
 assert compaction != null && compaction.hasSelection();
@@ -1989,6 +1998,15 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   store.cancelRequestedCompaction(compaction);
   return false;
 }
+
+if (compaction.getRequest().isAllFiles() && shouldForbidMajorCompaction()) 
{
+  LOG.warn("Skipping major compaction on " + this
+  + " because this cluster is transiting sync replication state"
+  + " from STANDBY to DOWNGRADE_ACTIVE");
+  store.cancelRequestedCompaction(compaction);
+  return false;
+}
+
 MonitoredTask status = null;
 boolean requestNeedsCancellation = true;
 /*

http://git-wip-us.apache.org/repos/asf/hbase/blob/a7693675/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index af2f3b5..440a838 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -2472,7 +2472,7 @@ public class HRegionServer extends HasThread implements
* @return Return the object that implements the replication
* source executorService.
*/
-  @VisibleForTesting
+  @Override
   public ReplicationSourceService getReplicationSourceService() {
 return replicationSourceHandler;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a7693675/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
 
b/hbase-server

[04/29] hbase git commit: HBASE-19943 Only allow removing sync replication peer which is in DA state

2018-05-15 Thread zhangduo
HBASE-19943 Only allow removing sync replication peer which is in DA state


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/963b58fd
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/963b58fd
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/963b58fd

Branch: refs/heads/HBASE-19064
Commit: 963b58fdae6a50e1732cbb415944579d862c34c9
Parents: fc625d0
Author: huzheng 
Authored: Thu Mar 1 18:34:02 2018 +0800
Committer: zhangduo 
Committed: Wed May 16 09:06:30 2018 +0800

--
 .../replication/ReplicationPeerManager.java | 14 -
 .../hbase/wal/SyncReplicationWALProvider.java   |  2 +-
 .../replication/TestReplicationAdmin.java   | 63 
 .../hbase/replication/TestSyncReplication.java  |  2 +-
 4 files changed, 78 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/963b58fd/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
index 0dc922d..41dd6e3 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
@@ -120,8 +120,20 @@ public class ReplicationPeerManager {
 return desc;
   }
 
+  private void checkPeerInDAStateIfSyncReplication(String peerId) throws 
DoNotRetryIOException {
+ReplicationPeerDescription desc = peers.get(peerId);
+if (desc != null && desc.getPeerConfig().isSyncReplication()
+&& 
!SyncReplicationState.DOWNGRADE_ACTIVE.equals(desc.getSyncReplicationState())) {
+  throw new DoNotRetryIOException("Couldn't remove synchronous replication 
peer with state="
+  + desc.getSyncReplicationState()
+  + ", Transit the synchronous replication state to be 
DOWNGRADE_ACTIVE firstly.");
+}
+  }
+
   ReplicationPeerConfig preRemovePeer(String peerId) throws 
DoNotRetryIOException {
-return checkPeerExists(peerId).getPeerConfig();
+ReplicationPeerDescription pd = checkPeerExists(peerId);
+checkPeerInDAStateIfSyncReplication(peerId);
+return pd.getPeerConfig();
   }
 
   void preEnablePeer(String peerId) throws DoNotRetryIOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/963b58fd/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
index ac4b4cd..282aa21 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
@@ -142,7 +142,7 @@ public class SyncReplicationWALProvider implements 
WALProvider, PeerActionListen
   @Override
   public WAL getWAL(RegionInfo region) throws IOException {
 if (region == null) {
-  return provider.getWAL(region);
+  return provider.getWAL(null);
 }
 Optional> peerIdAndRemoteWALDir =
   peerInfoProvider.getPeerIdAndRemoteWALDir(region);

http://git-wip-us.apache.org/repos/asf/hbase/blob/963b58fd/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
index 0ad476f..486ab51 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
@@ -254,6 +254,62 @@ public class TestReplicationAdmin {
   }
 
   @Test
+  public void testRemovePeerWithNonDAState() throws Exception {
+TableName tableName = TableName.valueOf(name.getMethodName());
+TEST_UTIL.createTable(tableName, Bytes.toBytes("family"));
+ReplicationPeerConfigBuilder builder = ReplicationPeerConfig.newBuilder();
+
+String rootDir = "hdfs://srv1:/hbase";
+builder.setClusterKey(KEY_ONE);
+builder.setRemoteWALDir(rootDir);
+builder.setReplicateAllUserTables(false);
+Map> tableCfs = new HashMap<>();
+tableCfs.put(tableName, new ArrayList<>());
+builder.se

[05/29] hbase git commit: HBASE-20434 Also remove remote wals when peer is in DA state

2018-05-15 Thread zhangduo
HBASE-20434 Also remove remote wals when peer is in DA state


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/06fa05d1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/06fa05d1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/06fa05d1

Branch: refs/heads/HBASE-19064
Commit: 06fa05d15a5263638a60e38a18673fd84a42d23b
Parents: 2ae4e0b
Author: zhangduo 
Authored: Wed Apr 25 17:12:23 2018 +0800
Committer: zhangduo 
Committed: Wed May 16 09:06:30 2018 +0800

--
 .../hbase/replication/ReplicationUtils.java |   4 +
 ...ransitPeerSyncReplicationStateProcedure.java |   2 +-
 .../regionserver/ReplicationSource.java |   7 +-
 .../regionserver/ReplicationSourceManager.java  |  86 ++--
 .../hadoop/hbase/wal/AbstractFSWALProvider.java |  19 ++--
 .../hbase/wal/SyncReplicationWALProvider.java   |  30 +-
 .../TestSyncReplicationRemoveRemoteWAL.java | 101 +++
 .../TestReplicationSourceManager.java   |  68 -
 8 files changed, 251 insertions(+), 66 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/06fa05d1/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
index 66e9b01..069db7a 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
@@ -191,6 +191,10 @@ public final class ReplicationUtils {
 return new Path(remoteWALDir, peerId);
   }
 
+  public static Path getRemoteWALDirForPeer(Path remoteWALDir, String peerId) {
+return new Path(remoteWALDir, peerId);
+  }
+
   /**
* Do the sleeping logic
* @param msg Why we sleep

http://git-wip-us.apache.org/repos/asf/hbase/blob/06fa05d1/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
index 5da2b0c..99fd615 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
@@ -211,7 +211,7 @@ public class TransitPeerSyncReplicationStateProcedure
   case CREATE_DIR_FOR_REMOTE_WAL:
 MasterFileSystem mfs = env.getMasterFileSystem();
 Path remoteWALDir = new Path(mfs.getWALRootDir(), 
ReplicationUtils.REMOTE_WAL_DIR_NAME);
-Path remoteWALDirForPeer = new Path(remoteWALDir, peerId);
+Path remoteWALDirForPeer = 
ReplicationUtils.getRemoteWALDirForPeer(remoteWALDir, peerId);
 FileSystem walFs = mfs.getWALFileSystem();
 try {
   if (walFs.exists(remoteWALDirForPeer)) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/06fa05d1/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
index 1a27fc1..7313f13 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
@@ -549,14 +549,17 @@ public class ReplicationSource implements 
ReplicationSourceInterface {
 }
 
 /**
+ * 
  * Split a path to get the start time
+ * 
+ * 
  * For example: 10.20.20.171%3A60020.1277499063250
+ * 
  * @param p path to split
  * @return start time
  */
 private static long getTS(Path p) {
-  int tsIndex = p.getName().lastIndexOf('.') + 1;
-  return Long.parseLong(p.getName().substring(tsIndex));
+  return AbstractFSWALProvider.getWALStartTimeFromWALName(p.getName());
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/06fa05d1/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java

[18/29] hbase git commit: HBASE-19973 Implement a procedure to replay sync replication wal for standby cluster

2018-05-15 Thread zhangduo
HBASE-19973 Implement a procedure to replay sync replication wal for standby 
cluster


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/35b5bca8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/35b5bca8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/35b5bca8

Branch: refs/heads/HBASE-19064
Commit: 35b5bca84104735fdd19bc4007187797cdb15f92
Parents: 963b58f
Author: Guanghao Zhang 
Authored: Fri Mar 2 18:43:25 2018 +0800
Committer: zhangduo 
Committed: Wed May 16 09:06:30 2018 +0800

--
 .../src/main/protobuf/MasterProcedure.proto |  22 +++
 .../apache/hadoop/hbase/executor/EventType.java |   9 +-
 .../hadoop/hbase/executor/ExecutorType.java |   3 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |   9 +
 .../hadoop/hbase/master/MasterServices.java |   6 +
 .../procedure/PeerProcedureInterface.java   |   3 +-
 .../hbase/master/procedure/PeerQueue.java   |   3 +-
 .../replication/RecoverStandbyProcedure.java| 114 +++
 .../ReplaySyncReplicationWALManager.java| 139 +
 .../ReplaySyncReplicationWALProcedure.java  | 193 +++
 .../hbase/regionserver/HRegionServer.java   |   9 +-
 .../ReplaySyncReplicationWALCallable.java   | 149 ++
 .../SyncReplicationPeerInfoProviderImpl.java|   3 +
 .../org/apache/hadoop/hbase/util/FSUtils.java   |   5 +
 .../hbase/master/MockNoopMasterServices.java|   8 +-
 .../master/TestRecoverStandbyProcedure.java | 186 ++
 16 files changed, 854 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/35b5bca8/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto 
b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
index e8b940e..01e4dae 100644
--- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
@@ -459,3 +459,25 @@ message TransitPeerSyncReplicationStateStateData {
   optional SyncReplicationState fromState = 1;
   required SyncReplicationState toState = 2;
 }
+
+enum RecoverStandbyState {
+  RENAME_SYNC_REPLICATION_WALS_DIR = 1;
+  INIT_WORKERS = 2;
+  DISPATCH_TASKS = 3;
+  REMOVE_SYNC_REPLICATION_WALS_DIR = 4;
+}
+
+message RecoverStandbyStateData {
+  required string peer_id = 1;
+}
+
+message ReplaySyncReplicationWALStateData {
+  required string peer_id = 1;
+  required string wal = 2;
+  optional ServerName target_server = 3;
+}
+
+message ReplaySyncReplicationWALParameter {
+  required string peer_id = 1;
+  required string wal = 2;
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/35b5bca8/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java
index 922deb8..ad38d1c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java
@@ -281,7 +281,14 @@ public enum EventType {
*
* RS_REFRESH_PEER
*/
-  RS_REFRESH_PEER (84, ExecutorType.RS_REFRESH_PEER);
+  RS_REFRESH_PEER(84, ExecutorType.RS_REFRESH_PEER),
+
+  /**
+   * RS replay sync replication wal.
+   *
+   * RS_REPLAY_SYNC_REPLICATION_WAL
+   */
+  RS_REPLAY_SYNC_REPLICATION_WAL(85, 
ExecutorType.RS_REPLAY_SYNC_REPLICATION_WAL);
 
   private final int code;
   private final ExecutorType executor;

http://git-wip-us.apache.org/repos/asf/hbase/blob/35b5bca8/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java
index 7f130d1..ea97354 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java
@@ -47,7 +47,8 @@ public enum ExecutorType {
   RS_REGION_REPLICA_FLUSH_OPS  (28),
   RS_COMPACTED_FILES_DISCHARGER (29),
   RS_OPEN_PRIORITY_REGION(30),
-  RS_REFRESH_PEER   (31);
+  RS_REFRESH_PEER(31),
+  RS_REPLAY_SYNC_REPLICATION_WAL(32);
 
   ExecutorType(int value) {
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/35b5bca8/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--

[02/29] hbase git commit: HBASE-20585 Need to clear peer map when clearing MasterProcedureScheduler

2018-05-15 Thread zhangduo
HBASE-20585 Need to clear peer map when clearing MasterProcedureScheduler


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ab53329c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ab53329c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ab53329c

Branch: refs/heads/HBASE-19064
Commit: ab53329cb3a56296ad05ee68735eb78896819cd3
Parents: 26babcf
Author: zhangduo 
Authored: Tue May 15 21:24:13 2018 +0800
Committer: zhangduo 
Committed: Wed May 16 08:46:29 2018 +0800

--
 .../hadoop/hbase/master/procedure/MasterProcedureScheduler.java  | 4 
 1 file changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ab53329c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
index a3bd938..69a6e8f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
@@ -247,6 +247,10 @@ public class MasterProcedureScheduler extends 
AbstractProcedureScheduler {
 clear(tableMap, tableRunQueue, TABLE_QUEUE_KEY_COMPARATOR);
 tableMap = null;
 
+// Remove Peers
+clear(peerMap, peerRunQueue, PEER_QUEUE_KEY_COMPARATOR);
+peerMap = null;
+
 assert size() == 0 : "expected queue size to be 0, got " + size();
   }
 



[19/29] hbase git commit: HBASE-19990 Create remote wal directory when transitting to state S

2018-05-15 Thread zhangduo
HBASE-19990 Create remote wal directory when transitting to state S


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/fc625d0f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/fc625d0f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/fc625d0f

Branch: refs/heads/HBASE-19064
Commit: fc625d0f5b1e5c413459350ee2b3c790b97c55d9
Parents: a1d92f3
Author: zhangduo 
Authored: Wed Feb 14 16:01:16 2018 +0800
Committer: zhangduo 
Committed: Wed May 16 09:06:30 2018 +0800

--
 .../procedure2/ProcedureYieldException.java |  9 --
 .../hbase/replication/ReplicationUtils.java |  2 ++
 .../hadoop/hbase/master/MasterFileSystem.java   | 19 ++---
 .../master/procedure/MasterProcedureEnv.java|  5 
 ...ransitPeerSyncReplicationStateProcedure.java | 29 
 .../hbase/replication/TestSyncReplication.java  |  8 ++
 6 files changed, 55 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/fc625d0f/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java
index 0487ac5b..dbb9981 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java
@@ -15,16 +15,21 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.hbase.procedure2;
 
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.yetus.audience.InterfaceStability;
 
-// TODO: Not used yet
+/**
+ * Indicate that a procedure wants to be rescheduled. Usually because there 
are something wrong but
+ * we do not want to fail the procedure.
+ * 
+ * TODO: need to support scheduling after a delay.
+ */
 @InterfaceAudience.Private
 @InterfaceStability.Stable
 public class ProcedureYieldException extends ProcedureException {
+
   /** default constructor */
   public ProcedureYieldException() {
 super();

http://git-wip-us.apache.org/repos/asf/hbase/blob/fc625d0f/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
index d94cb00..e402d0f 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
@@ -41,6 +41,8 @@ public final class ReplicationUtils {
 
   public static final String REPLICATION_ATTR_NAME = "__rep__";
 
+  public static final String REMOTE_WAL_DIR_NAME = "remoteWALs";
+
   private ReplicationUtils() {
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/fc625d0f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
index 864be02..7ccbd71 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.log.HBaseMarkers;
 import org.apache.hadoop.hbase.mob.MobConstants;
 import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
 import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.replication.ReplicationUtils;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSTableDescriptors;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -133,7 +134,6 @@ public class MasterFileSystem {
* Idempotent.
*/
   private void createInitialFileSystemLayout() throws IOException {
-
 final String[] protectedSubDirs = new String[] {
 HConstants.BASE_NAMESPACE_DIR,
 HConstants.HFILE_ARCHIVE_DIRECTORY,
@@ -145,7 +145,8 @@ public class MasterFileSystem {
   HConstants.HREGION_LOGDIR_NAME,
   HConstants.HREGION_OLDLOGDIR_NAME,
   HConstants.CORRUPT_DIR_NAME,
-  WALProcedureStore.MASTER_PROCEDURE_LOGDIR
+  WALProcedureStore.MASTER_PROCEDURE_LOGDIR,
+   

[23/29] hbase git commit: HBASE-20370 Also remove the wal file in remote cluster when we finish replicating a file

2018-05-15 Thread zhangduo
HBASE-20370 Also remove the wal file in remote cluster when we finish 
replicating a file


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2029a46b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2029a46b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2029a46b

Branch: refs/heads/HBASE-19064
Commit: 2029a46bba41cdcfac7c70ff048cf27103b32f47
Parents: a769367
Author: zhangduo 
Authored: Tue Apr 17 09:04:56 2018 +0800
Committer: zhangduo 
Committed: Wed May 16 09:06:30 2018 +0800

--
 .../hbase/replication/ReplicationUtils.java |  36 ++-
 .../regionserver/ReplicationSource.java |  38 +++
 .../ReplicationSourceInterface.java |  21 +++-
 .../regionserver/ReplicationSourceManager.java  | 108 ++-
 .../regionserver/ReplicationSourceShipper.java  |  27 ++---
 .../hbase/wal/SyncReplicationWALProvider.java   |  11 +-
 .../replication/ReplicationSourceDummy.java |  20 ++--
 .../TestReplicationSourceManager.java   | 101 -
 8 files changed, 246 insertions(+), 116 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2029a46b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
index cb22f57..66e9b01 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
@@ -22,14 +22,17 @@ import java.util.Collection;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.CompoundConfiguration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Helper class for replication.
@@ -37,6 +40,8 @@ import org.apache.yetus.audience.InterfaceAudience;
 @InterfaceAudience.Private
 public final class ReplicationUtils {
 
+  private static final Logger LOG = 
LoggerFactory.getLogger(ReplicationUtils.class);
+
   public static final String REPLICATION_ATTR_NAME = "__rep__";
 
   public static final String REMOTE_WAL_DIR_NAME = "remoteWALs";
@@ -176,4 +181,33 @@ public final class ReplicationUtils {
   return tableCFs != null && tableCFs.containsKey(tableName);
 }
   }
+
+  public static FileSystem getRemoteWALFileSystem(Configuration conf, String 
remoteWALDir)
+  throws IOException {
+return new Path(remoteWALDir).getFileSystem(conf);
+  }
+
+  public static Path getRemoteWALDirForPeer(String remoteWALDir, String 
peerId) {
+return new Path(remoteWALDir, peerId);
+  }
+
+  /**
+   * Do the sleeping logic
+   * @param msg Why we sleep
+   * @param sleepForRetries the base sleep time.
+   * @param sleepMultiplier by how many times the default sleeping time is 
augmented
+   * @param maxRetriesMultiplier the max retry multiplier
+   * @return True if sleepMultiplier is < 
maxRetriesMultiplier
+   */
+  public static boolean sleepForRetries(String msg, long sleepForRetries, int 
sleepMultiplier,
+  int maxRetriesMultiplier) {
+try {
+  LOG.trace("{}, sleeping {} times {}", msg, sleepForRetries, 
sleepMultiplier);
+  Thread.sleep(sleepForRetries * sleepMultiplier);
+} catch (InterruptedException e) {
+  LOG.debug("Interrupted while sleeping between retries");
+  Thread.currentThread().interrupt();
+}
+return sleepMultiplier < maxRetriesMultiplier;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/2029a46b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
index b05a673..01ccb11 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
@@ -89,8 +89,6 @@ public class ReplicationSource implements 
ReplicationSourceInterface {
 
   protec

[16/29] hbase git commit: HBASE-19957 General framework to transit sync replication state

2018-05-15 Thread zhangduo
HBASE-19957 General framework to transit sync replication state


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0a2f4d9f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0a2f4d9f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0a2f4d9f

Branch: refs/heads/HBASE-19064
Commit: 0a2f4d9f9c56140c09c579ec9469ef13468406bf
Parents: fb3bcfc
Author: zhangduo 
Authored: Fri Feb 9 18:33:28 2018 +0800
Committer: zhangduo 
Committed: Wed May 16 09:06:30 2018 +0800

--
 .../replication/ReplicationPeerConfig.java  |   2 -
 .../replication/ReplicationPeerDescription.java |   5 +-
 .../hbase/replication/SyncReplicationState.java |  19 +-
 .../org/apache/hadoop/hbase/HConstants.java |   3 +
 .../src/main/protobuf/MasterProcedure.proto |  20 +-
 .../hbase/replication/ReplicationPeerImpl.java  |  45 -
 .../replication/ReplicationPeerStorage.java |  25 ++-
 .../hbase/replication/ReplicationPeers.java |  27 ++-
 .../replication/ZKReplicationPeerStorage.java   |  63 +--
 .../hbase/coprocessor/MasterObserver.java   |   7 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |   4 +-
 .../hbase/master/MasterCoprocessorHost.java |  12 +-
 .../replication/AbstractPeerProcedure.java  |  14 +-
 .../master/replication/ModifyPeerProcedure.java |  11 --
 .../replication/RefreshPeerProcedure.java   |  18 +-
 .../replication/ReplicationPeerManager.java |  89 +
 ...ransitPeerSyncReplicationStateProcedure.java | 181 ---
 .../hbase/regionserver/HRegionServer.java   |  35 ++--
 .../regionserver/ReplicationSourceService.java  |  11 +-
 .../regionserver/PeerActionListener.java|   4 +-
 .../regionserver/PeerProcedureHandler.java  |  16 +-
 .../regionserver/PeerProcedureHandlerImpl.java  |  52 +-
 .../regionserver/RefreshPeerCallable.java   |   7 +
 .../replication/regionserver/Replication.java   |  22 ++-
 .../regionserver/ReplicationSourceManager.java  |  41 +++--
 .../SyncReplicationPeerInfoProvider.java|  43 +
 .../SyncReplicationPeerInfoProviderImpl.java|  71 
 .../SyncReplicationPeerMappingManager.java  |  48 +
 .../SyncReplicationPeerProvider.java|  35 
 .../hbase/wal/SyncReplicationWALProvider.java   |  35 ++--
 .../org/apache/hadoop/hbase/wal/WALFactory.java |  47 ++---
 .../replication/TestReplicationAdmin.java   |   3 +-
 .../TestReplicationSourceManager.java   |   5 +-
 .../wal/TestSyncReplicationWALProvider.java |  36 ++--
 34 files changed, 743 insertions(+), 313 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0a2f4d9f/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
index 997a155..cc7b4bc 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
@@ -15,7 +15,6 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.hbase.replication;
 
 import java.util.Collection;
@@ -25,7 +24,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.TreeMap;
-
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.util.Bytes;

http://git-wip-us.apache.org/repos/asf/hbase/blob/0a2f4d9f/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java
index 2d077c5..b0c27bb 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java
@@ -20,7 +20,10 @@ package org.apache.hadoop.hbase.replication;
 import org.apache.yetus.audience.InterfaceAudience;
 
 /**
- * The POJO equivalent of ReplicationProtos.ReplicationPeerDescription
+ * The POJO equivalent of ReplicationProtos.ReplicationPeerDescription.
+ * 
+ * To developer, here we do not store the new sync replication state since it 
is just an
+ * intermediate state and this class is public.
  */
 @InterfaceAudience.Public
 public

hbase git commit: HBASE-20585 Need to clear peer map when clearing MasterProcedureScheduler

2018-05-15 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/branch-2 60b8344cf -> 82e301116


HBASE-20585 Need to clear peer map when clearing MasterProcedureScheduler


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/82e30111
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/82e30111
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/82e30111

Branch: refs/heads/branch-2
Commit: 82e3011166758736e9d01f498db2b36e5d087beb
Parents: 60b8344
Author: zhangduo 
Authored: Tue May 15 21:24:13 2018 +0800
Committer: zhangduo 
Committed: Wed May 16 08:46:34 2018 +0800

--
 .../hadoop/hbase/master/procedure/MasterProcedureScheduler.java  | 4 
 1 file changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/82e30111/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
index 1a39307..d78efc6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
@@ -247,6 +247,10 @@ public class MasterProcedureScheduler extends 
AbstractProcedureScheduler {
 clear(tableMap, tableRunQueue, TABLE_QUEUE_KEY_COMPARATOR);
 tableMap = null;
 
+// Remove Peers
+clear(peerMap, peerRunQueue, PEER_QUEUE_KEY_COMPARATOR);
+peerMap = null;
+
 assert size() == 0 : "expected queue size to be 0, got " + size();
   }
 



hbase git commit: HBASE-20585 Need to clear peer map when clearing MasterProcedureScheduler

2018-05-15 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/master 26babcf01 -> ab53329cb


HBASE-20585 Need to clear peer map when clearing MasterProcedureScheduler


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ab53329c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ab53329c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ab53329c

Branch: refs/heads/master
Commit: ab53329cb3a56296ad05ee68735eb78896819cd3
Parents: 26babcf
Author: zhangduo 
Authored: Tue May 15 21:24:13 2018 +0800
Committer: zhangduo 
Committed: Wed May 16 08:46:29 2018 +0800

--
 .../hadoop/hbase/master/procedure/MasterProcedureScheduler.java  | 4 
 1 file changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ab53329c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
index a3bd938..69a6e8f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
@@ -247,6 +247,10 @@ public class MasterProcedureScheduler extends 
AbstractProcedureScheduler {
 clear(tableMap, tableRunQueue, TABLE_QUEUE_KEY_COMPARATOR);
 tableMap = null;
 
+// Remove Peers
+clear(peerMap, peerRunQueue, PEER_QUEUE_KEY_COMPARATOR);
+peerMap = null;
+
 assert size() == 0 : "expected queue size to be 0, got " + size();
   }
 



hbase-site git commit: Remove the CFP and reorder the elements a bit

2018-05-15 Thread elserj
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site f78682dac -> 662611ad4


Remove the CFP and reorder the elements a bit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/662611ad
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/662611ad
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/662611ad

Branch: refs/heads/asf-site
Commit: 662611ad4555b2d0c78c234415f02c4baceda016
Parents: f78682d
Author: Josh Elser 
Authored: Tue May 15 18:07:31 2018 -0400
Committer: Josh Elser 
Committed: Tue May 15 18:07:31 2018 -0400

--
 hbasecon-2018/feed.xml   |   2 +-
 hbasecon-2018/index.html | 107 +-
 2 files changed, 45 insertions(+), 64 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/662611ad/hbasecon-2018/feed.xml
--
diff --git a/hbasecon-2018/feed.xml b/hbasecon-2018/feed.xml
index 6f93fff..d804e56 100644
--- a/hbasecon-2018/feed.xml
+++ b/hbasecon-2018/feed.xml
@@ -4,7 +4,7 @@
   https://jekyllrb.com/"; version="3.7.3">Jekyll
   https://hbase.apache.org/hbasecon-2018//hbasecon-2018/feed.xml"; 
rel="self" type="application/atom+xml" />
   https://hbase.apache.org/hbasecon-2018//hbasecon-2018/"; 
rel="alternate" type="text/html" />
-  2018-05-15T16:31:59-04:00
+  2018-05-15T18:06:11-04:00
   https://hbase.apache.org/hbasecon-2018//hbasecon-2018/
 
   

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/662611ad/hbasecon-2018/index.html
--
diff --git a/hbasecon-2018/index.html b/hbasecon-2018/index.html
index 9091409..7240fa5 100644
--- a/hbasecon-2018/index.html
+++ b/hbasecon-2018/index.html
@@ -53,16 +53,14 @@
 
 About
 
+
 
-  CFP
+Agenda
 
 
   Registration
 
 
-Agenda
-
-
 Sponsors
 
 
@@ -131,55 +129,7 @@
 
 
 
-
-
-   
-   
-   
-   Call for Proposals
-   
-   
-   
-   
- The event's call for proposals is available at https://easychair.org/conferences/?conf=hbasecon2018";>EasyChair for 
HBaseCon2018.
- The CFP will be open until 2018/04/20. Please submit your talks 
as soon as possible!
- All submissions will be reviewed by a group of individuals 
from the Apache HBase PMC.
-   
-   
-   
-
-
-
-
-   
-   
-   
-   
-   
-   
-   Registration
-   
-   
-   
-   
- Registration for HBaseCon 2018 is now open! Please use the 
following page to register: https://hbasecon2018.hortonworksevents.com/";>Register Here.
-   Registration for HBaseCon includes registration to PhoenixCon 
which is running at concurrently at the same venue.
-   
-   
-   
-   
- For any issues around registration, please use the following 
contact information, Monday-Friday, 8:00am - 5:00pm PST:
-   
- Telephone: (800) 380-3544 toll free
- Telephone: (415) 446-7709 international
- Email: mailto:hbasecon2...@hortonworksevents.com";>hbasecon2...@hortonworksevents.com
-   
- 
-   
-   
-   
-
-
+
 
 
 
@@ -318,13 +268,41 @@

 
 
-
 
-
+

-   
-   
+   
+   
+   
+   
+   
+   Registration
+   

+   
+   
+ Registration for HBaseCon 2018 is now open! Please use the 
following page to register: https://hbasecon2018.hortonworksevents.com/";>Register Here.
+   Registration for HBaseCon includes registration to PhoenixCon 
which is running at concurrently at the same venue.
+   
+   
+   
+   
+ For any issues around registration, please use the following 
contact information, Monday-Friday, 8:00am - 5:00pm PST:
+   
+ Telephone: (800) 380-3544 toll free
+ Telephone: (415) 446-7709 international
+ Email: mailto:hbasecon2...@hortonworksevents.com";>hbasecon2...@hortonworksevents.com
+   
+ 
+   
+   
+   
+
+
+
+
+
+   


Sponsors
@@ -358,8 +336,11 @@
 
 
 
-
+

+   
+ 

hbase-site git commit: Add agenda and new sponsor to HBaseCon site

2018-05-15 Thread elserj
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 3724b8a18 -> f78682dac


Add agenda and new sponsor to HBaseCon site


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/f78682da
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/f78682da
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/f78682da

Branch: refs/heads/asf-site
Commit: f78682dac50ab97a6939ae2ddb15226f0689ef31
Parents: 3724b8a
Author: Josh Elser 
Authored: Tue May 15 16:33:03 2018 -0400
Committer: Josh Elser 
Committed: Tue May 15 16:33:03 2018 -0400

--
 hbasecon-2018/feed.xml  |   2 +-
 hbasecon-2018/img/speakers/JohnLeach.jpg| Bin 0 -> 33868 bytes
 hbasecon-2018/img/speakers/LeiChen.png  | Bin 0 -> 88008 bytes
 hbasecon-2018/img/speakers/Missing.png  | Bin 0 -> 37762 bytes
 hbasecon-2018/img/speakers/VincentPoon.jpeg | Bin 0 -> 14137 bytes
 hbasecon-2018/img/speakers/ZhengHu.jpg  | Bin 0 -> 179993 bytes
 hbasecon-2018/img/sponsors/bloomberg.png| Bin 0 -> 27643 bytes
 hbasecon-2018/index.html| 169 +--
 hbasecon-2018/style.css |  33 -
 9 files changed, 188 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f78682da/hbasecon-2018/feed.xml
--
diff --git a/hbasecon-2018/feed.xml b/hbasecon-2018/feed.xml
index f9ffa4d..6f93fff 100644
--- a/hbasecon-2018/feed.xml
+++ b/hbasecon-2018/feed.xml
@@ -4,7 +4,7 @@
   https://jekyllrb.com/"; version="3.7.3">Jekyll
   https://hbase.apache.org/hbasecon-2018//hbasecon-2018/feed.xml"; 
rel="self" type="application/atom+xml" />
   https://hbase.apache.org/hbasecon-2018//hbasecon-2018/"; 
rel="alternate" type="text/html" />
-  2018-05-07T21:33:40-07:00
+  2018-05-15T16:31:59-04:00
   https://hbase.apache.org/hbasecon-2018//hbasecon-2018/
 
   

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f78682da/hbasecon-2018/img/speakers/JohnLeach.jpg
--
diff --git a/hbasecon-2018/img/speakers/JohnLeach.jpg 
b/hbasecon-2018/img/speakers/JohnLeach.jpg
new file mode 100644
index 000..2494807
Binary files /dev/null and b/hbasecon-2018/img/speakers/JohnLeach.jpg differ

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f78682da/hbasecon-2018/img/speakers/LeiChen.png
--
diff --git a/hbasecon-2018/img/speakers/LeiChen.png 
b/hbasecon-2018/img/speakers/LeiChen.png
new file mode 100644
index 000..2b6c5c7
Binary files /dev/null and b/hbasecon-2018/img/speakers/LeiChen.png differ

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f78682da/hbasecon-2018/img/speakers/Missing.png
--
diff --git a/hbasecon-2018/img/speakers/Missing.png 
b/hbasecon-2018/img/speakers/Missing.png
new file mode 100644
index 000..ea29a7d
Binary files /dev/null and b/hbasecon-2018/img/speakers/Missing.png differ

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f78682da/hbasecon-2018/img/speakers/VincentPoon.jpeg
--
diff --git a/hbasecon-2018/img/speakers/VincentPoon.jpeg 
b/hbasecon-2018/img/speakers/VincentPoon.jpeg
new file mode 100644
index 000..4636ec0
Binary files /dev/null and b/hbasecon-2018/img/speakers/VincentPoon.jpeg differ

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f78682da/hbasecon-2018/img/speakers/ZhengHu.jpg
--
diff --git a/hbasecon-2018/img/speakers/ZhengHu.jpg 
b/hbasecon-2018/img/speakers/ZhengHu.jpg
new file mode 100644
index 000..9a760c7
Binary files /dev/null and b/hbasecon-2018/img/speakers/ZhengHu.jpg differ

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f78682da/hbasecon-2018/img/sponsors/bloomberg.png
--
diff --git a/hbasecon-2018/img/sponsors/bloomberg.png 
b/hbasecon-2018/img/sponsors/bloomberg.png
new file mode 100644
index 000..e07df0c
Binary files /dev/null and b/hbasecon-2018/img/sponsors/bloomberg.png differ

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f78682da/hbasecon-2018/index.html
--
diff --git a/hbasecon-2018/index.html b/hbasecon-2018/index.html
index 02dc186..9091409 100644
--- a/hbasecon-2018/index.html
+++ b/hbasecon-2018/index.html
@@ -59,11 +59,9 @@
 
   Registration
 
-
 
 Sponsors
   

[37/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html
index 97ceefd..b7b4236 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html
@@ -103,8 +103,8 @@
 095 * Caches cache whole blocks with 
trailing checksums if any. We then tag on some metadata, the
 096 * content of BLOCK_METADATA_SPACE which 
will be flag on if we are doing 'hbase'
 097 * checksums and then the offset into the 
file which is needed when we re-make a cache key
-098 * when we return the block to the cache 
as 'done'. See {@link Cacheable#serialize(ByteBuffer)} and
-099 * {@link Cacheable#getDeserializer()}.
+098 * when we return the block to the cache 
as 'done'.
+099 * See {@link 
Cacheable#serialize(ByteBuffer, boolean)} and {@link 
Cacheable#getDeserializer()}.
 100 *
 101 * 

TODO: Should we cache the checksums? Down in Writer#getBlockForCaching(CacheConfig) where 102 * we make a block to cache-on-write, there is an attempt at turning off checksums. This is not the @@ -333,1579 +333,1579 @@ 325 * Creates a new {@link HFile} block from the given fields. This constructor 326 * is used only while writing blocks and caching, 327 * and is sitting in a byte buffer and we want to stuff the block into cache. -328 * See {@link Writer#getBlockForCaching(CacheConfig)}. -329 * -330 *

TODO: The caller presumes no checksumming -331 * required of this block instance since going into cache; checksum already verified on -332 * underlying block data pulled in from filesystem. Is that correct? What if cache is SSD? -333 * -334 * @param blockType the type of this block, see {@link BlockType} -335 * @param onDiskSizeWithoutHeader see {@link #onDiskSizeWithoutHeader} -336 * @param uncompressedSizeWithoutHeader see {@link #uncompressedSizeWithoutHeader} -337 * @param prevBlockOffset see {@link #prevBlockOffset} -338 * @param b block header ({@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes) -339 * @param fillHeader when true, write the first 4 header fields into passed buffer. -340 * @param offset the file offset the block was read from -341 * @param onDiskDataSizeWithHeader see {@link #onDiskDataSizeWithHeader} -342 * @param fileContext HFile meta data -343 */ -344 HFileBlock(BlockType blockType, int onDiskSizeWithoutHeader, int uncompressedSizeWithoutHeader, -345 long prevBlockOffset, ByteBuffer b, boolean fillHeader, long offset, -346 final int nextBlockOnDiskSize, int onDiskDataSizeWithHeader, HFileContext fileContext) { -347init(blockType, onDiskSizeWithoutHeader, uncompressedSizeWithoutHeader, -348prevBlockOffset, offset, onDiskDataSizeWithHeader, nextBlockOnDiskSize, fileContext); -349this.buf = new SingleByteBuff(b); -350if (fillHeader) { -351 overwriteHeader(); -352} -353this.buf.rewind(); -354 } -355 -356 /** -357 * Creates a block from an existing buffer starting with a header. Rewinds -358 * and takes ownership of the buffer. By definition of rewind, ignores the -359 * buffer position, but if you slice the buffer beforehand, it will rewind -360 * to that point. -361 * @param buf Has header, content, and trailing checksums if present. -362 */ -363 HFileBlock(ByteBuff buf, boolean usesHBaseChecksum, MemoryType memType, final long offset, -364 final int nextBlockOnDiskSize, HFileContext fileContext) throws IOException { -365buf.rewind(); -366final BlockType blockType = BlockType.read(buf); -367final int onDiskSizeWithoutHeader = buf.getInt(Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX); -368final int uncompressedSizeWithoutHeader = -369 buf.getInt(Header.UNCOMPRESSED_SIZE_WITHOUT_HEADER_INDEX); -370final long prevBlockOffset = buf.getLong(Header.PREV_BLOCK_OFFSET_INDEX); -371// This constructor is called when we deserialize a block from cache and when we read a block in -372// from the fs. fileCache is null when deserialized from cache so need to make up one. -373HFileContextBuilder fileContextBuilder = fileContext != null? -374new HFileContextBuilder(fileContext): new HFileContextBuilder(); -375 fileContextBuilder.withHBaseCheckSum(usesHBaseChecksum); -376int onDiskDataSizeWithHeader; -377if (usesHBaseChecksum) { -378 byte checksumType = buf.get(Header.CHECKSUM_TYPE_INDEX); -379 int bytesPerChecksum = buf.getInt(Header.BYTES_PER_CHECKSUM_INDEX); -380 onDiskDataSizeWithHeader = buf.getInt(Header.ON_DISK_DATA_SIZE_WITH_HEADER_INDEX); -381 // Use the checksum type and bytes per checksu


[38/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockWritable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockWritable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockWritable.html
index 97ceefd..b7b4236 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockWritable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockWritable.html
@@ -103,8 +103,8 @@
 095 * Caches cache whole blocks with 
trailing checksums if any. We then tag on some metadata, the
 096 * content of BLOCK_METADATA_SPACE which 
will be flag on if we are doing 'hbase'
 097 * checksums and then the offset into the 
file which is needed when we re-make a cache key
-098 * when we return the block to the cache 
as 'done'. See {@link Cacheable#serialize(ByteBuffer)} and
-099 * {@link Cacheable#getDeserializer()}.
+098 * when we return the block to the cache 
as 'done'.
+099 * See {@link 
Cacheable#serialize(ByteBuffer, boolean)} and {@link 
Cacheable#getDeserializer()}.
 100 *
 101 * 

TODO: Should we cache the checksums? Down in Writer#getBlockForCaching(CacheConfig) where 102 * we make a block to cache-on-write, there is an attempt at turning off checksums. This is not the @@ -333,1579 +333,1579 @@ 325 * Creates a new {@link HFile} block from the given fields. This constructor 326 * is used only while writing blocks and caching, 327 * and is sitting in a byte buffer and we want to stuff the block into cache. -328 * See {@link Writer#getBlockForCaching(CacheConfig)}. -329 * -330 *

TODO: The caller presumes no checksumming -331 * required of this block instance since going into cache; checksum already verified on -332 * underlying block data pulled in from filesystem. Is that correct? What if cache is SSD? -333 * -334 * @param blockType the type of this block, see {@link BlockType} -335 * @param onDiskSizeWithoutHeader see {@link #onDiskSizeWithoutHeader} -336 * @param uncompressedSizeWithoutHeader see {@link #uncompressedSizeWithoutHeader} -337 * @param prevBlockOffset see {@link #prevBlockOffset} -338 * @param b block header ({@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes) -339 * @param fillHeader when true, write the first 4 header fields into passed buffer. -340 * @param offset the file offset the block was read from -341 * @param onDiskDataSizeWithHeader see {@link #onDiskDataSizeWithHeader} -342 * @param fileContext HFile meta data -343 */ -344 HFileBlock(BlockType blockType, int onDiskSizeWithoutHeader, int uncompressedSizeWithoutHeader, -345 long prevBlockOffset, ByteBuffer b, boolean fillHeader, long offset, -346 final int nextBlockOnDiskSize, int onDiskDataSizeWithHeader, HFileContext fileContext) { -347init(blockType, onDiskSizeWithoutHeader, uncompressedSizeWithoutHeader, -348prevBlockOffset, offset, onDiskDataSizeWithHeader, nextBlockOnDiskSize, fileContext); -349this.buf = new SingleByteBuff(b); -350if (fillHeader) { -351 overwriteHeader(); -352} -353this.buf.rewind(); -354 } -355 -356 /** -357 * Creates a block from an existing buffer starting with a header. Rewinds -358 * and takes ownership of the buffer. By definition of rewind, ignores the -359 * buffer position, but if you slice the buffer beforehand, it will rewind -360 * to that point. -361 * @param buf Has header, content, and trailing checksums if present. -362 */ -363 HFileBlock(ByteBuff buf, boolean usesHBaseChecksum, MemoryType memType, final long offset, -364 final int nextBlockOnDiskSize, HFileContext fileContext) throws IOException { -365buf.rewind(); -366final BlockType blockType = BlockType.read(buf); -367final int onDiskSizeWithoutHeader = buf.getInt(Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX); -368final int uncompressedSizeWithoutHeader = -369 buf.getInt(Header.UNCOMPRESSED_SIZE_WITHOUT_HEADER_INDEX); -370final long prevBlockOffset = buf.getLong(Header.PREV_BLOCK_OFFSET_INDEX); -371// This constructor is called when we deserialize a block from cache and when we read a block in -372// from the fs. fileCache is null when deserialized from cache so need to make up one. -373HFileContextBuilder fileContextBuilder = fileContext != null? -374new HFileContextBuilder(fileContext): new HFileContextBuilder(); -375 fileContextBuilder.withHBaseCheckSum(usesHBaseChecksum); -376int onDiskDataSizeWithHeader; -377if (usesHBaseChecksum) { -378 byte checksumType = buf.get(Header.CHECKSUM_TYPE_INDEX); -379 int bytesPerChecksum = buf.getInt(Header.BYTES_PER_CHECKSUM_INDEX); -380 onDiskDataSizeWithHeader = buf.getInt(Header.ON_DISK_DATA_SIZE_WITH_HEADER_INDEX); -381 // Use the checksum t


[50/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
Published site at 26babcf013de696b899d76a3c39434b794440d8d.


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/da4482ac
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/da4482ac
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/da4482ac

Branch: refs/heads/asf-site
Commit: da4482ac24b3874e3aaadacdbcd3ff145bc892ee
Parents: 58454ff
Author: jenkins 
Authored: Tue May 15 14:49:24 2018 +
Committer: jenkins 
Committed: Tue May 15 14:49:24 2018 +

--
 acid-semantics.html |4 +-
 apache_hbase_reference_guide.pdf|4 +-
 book.html   |2 +-
 bulk-loads.html |4 +-
 checkstyle-aggregate.html   |  268 +-
 coc.html|4 +-
 dependencies.html   |4 +-
 dependency-convergence.html |4 +-
 dependency-info.html|4 +-
 dependency-management.html  |4 +-
 devapidocs/constant-values.html |6 +-
 devapidocs/index-all.html   |   25 +-
 .../hadoop/hbase/backup/package-tree.html   |2 +-
 .../RpcRetryingCallerWithReadReplicas.html  |4 +-
 .../hadoop/hbase/client/package-tree.html   |   22 +-
 .../hadoop/hbase/filter/package-tree.html   |8 +-
 ...BlockCacheUtil.CachedBlockCountsPerFile.html |   24 +-
 .../BlockCacheUtil.CachedBlocksByFile.html  |   42 +-
 .../hadoop/hbase/io/hfile/BlockCacheUtil.html   |   84 +-
 .../hbase/io/hfile/Cacheable.MemoryType.html|6 +-
 .../apache/hadoop/hbase/io/hfile/Cacheable.html |   15 +-
 .../io/hfile/HFileBlock.BlockIterator.html  |6 +-
 .../io/hfile/HFileBlock.BlockWritable.html  |6 +-
 .../hbase/io/hfile/HFileBlock.FSReader.html |   18 +-
 .../hbase/io/hfile/HFileBlock.FSReaderImpl.html |   58 +-
 .../io/hfile/HFileBlock.PrefetchedHeader.html   |   12 +-
 .../hbase/io/hfile/HFileBlock.Writer.State.html |   12 +-
 .../hbase/io/hfile/HFileBlock.Writer.html   |   80 +-
 .../hadoop/hbase/io/hfile/HFileBlock.html   |  140 +-
 .../io/hfile/LruBlockCache.BlockBucket.html |   28 +-
 .../io/hfile/LruBlockCache.EvictionThread.html  |   18 +-
 .../hfile/LruBlockCache.StatisticsThread.html   |8 +-
 .../hadoop/hbase/io/hfile/LruBlockCache.html|   80 +-
 .../hfile/bucket/BucketCache.BucketEntry.html   |   48 +-
 .../bucket/BucketCache.BucketEntryGroup.html|   18 +-
 .../hfile/bucket/BucketCache.RAMQueueEntry.html |   20 +-
 .../BucketCache.SharedMemoryBucketEntry.html|   20 +-
 .../bucket/BucketCache.StatisticsThread.html|8 +-
 .../hfile/bucket/BucketCache.WriterThread.html  |   14 +-
 .../hbase/io/hfile/bucket/BucketCache.html  |   82 +-
 .../hbase/io/hfile/class-use/BlockCacheKey.html |9 +
 .../hbase/io/hfile/class-use/Cacheable.html |   16 +-
 .../hadoop/hbase/io/hfile/package-tree.html |8 +-
 .../apache/hadoop/hbase/ipc/package-tree.html   |2 +-
 .../hadoop/hbase/mapreduce/package-tree.html|2 +-
 .../hbase/master/balancer/package-tree.html |2 +-
 .../hadoop/hbase/master/package-tree.html   |4 +-
 .../hbase/master/procedure/package-tree.html|2 +-
 .../org/apache/hadoop/hbase/package-tree.html   |   16 +-
 .../hadoop/hbase/procedure2/package-tree.html   |4 +-
 .../hadoop/hbase/quotas/package-tree.html   |6 +-
 .../regionserver/NoLimitScannerContext.html |2 +-
 .../regionserver/ScannerContext.Builder.html|   24 +-
 .../ScannerContext.LimitFields.html |   84 +-
 .../regionserver/ScannerContext.LimitScope.html |   12 +-
 .../regionserver/ScannerContext.NextState.html  |   28 +-
 .../ScannerContext.ProgressFields.html  |   45 +-
 .../hbase/regionserver/ScannerContext.html  |  114 +-
 .../hadoop/hbase/regionserver/StoreScanner.html |   44 +-
 .../class-use/ScannerContext.LimitScope.html|   16 -
 .../hadoop/hbase/regionserver/package-tree.html |   16 +-
 .../regionserver/querymatcher/package-tree.html |2 +-
 .../hbase/regionserver/wal/package-tree.html|2 +-
 .../replication/regionserver/package-tree.html  |2 +-
 .../hadoop/hbase/rest/model/package-tree.html   |2 +-
 .../hbase/security/access/package-tree.html |2 +-
 .../hadoop/hbase/security/package-tree.html |2 +-
 .../apache/hadoop/hbase/util/package-tree.html  |   10 +-
 .../org/apache/hadoop/hbase/Version.html|6 +-
 ...eadReplicas.ReplicaRegionServerCallable.html |  119 +-
 .../RpcRetryingCallerWithReadReplicas.html  |  119 +-
 ...BlockCacheUtil.CachedBlockCountsPerFile.html |  562 +--
 .../BlockCacheUtil.CachedBlocksByFile.html  |  562 +--
 .../hadoop/hbase/io/hfile/BlockCacheUtil.html   |  562 +--
 .../

hbase-site git commit: INFRA-10751 Empty commit

2018-05-15 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site da4482ac2 -> 3724b8a18


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/3724b8a1
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/3724b8a1
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/3724b8a1

Branch: refs/heads/asf-site
Commit: 3724b8a18148782038d31ebbc29786c17f43c146
Parents: da4482a
Author: jenkins 
Authored: Tue May 15 14:49:47 2018 +
Committer: jenkins 
Committed: Tue May 15 14:49:47 2018 +

--

--




[47/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.html 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.html
index f0895d6..2f0eda0 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.html
@@ -155,8 +155,8 @@ implements Cacheable.serialize(ByteBuffer)
 and
- Cacheable.getDeserializer().
+ when we return the block to the cache as 'done'.
+ See Cacheable.serialize(ByteBuffer,
 boolean) and Cacheable.getDeserializer().
 
  TODO: Should we cache the checksums? Down in 
Writer#getBlockForCaching(CacheConfig) where
  we make a block to cache-on-write, there is an attempt at turning off 
checksums. This is not the
@@ -374,7 +374,7 @@ implements Constructor and Description
 
 
-(package private)
+ 
 HFileBlock(BlockType blockType,
   int onDiskSizeWithoutHeader,
   int uncompressedSizeWithoutHeader,
@@ -429,7 +429,8 @@ implements 
 private https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer
-addMetaData(https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer destination)
+addMetaData(https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer destination,
+   boolean includeNextBlockMetadata)
 Adds metadata at current position (position is moved 
forward).
 
 
@@ -657,7 +658,8 @@ implements 
 void
-serialize(https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer destination)
+serialize(https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer destination,
+ boolean includeNextBlockMetadata)
 Serializes its data into destination.
 
 
@@ -980,7 +982,7 @@ implements See Also:
-serialize(ByteBuffer)
+#serialize(ByteBuffer)
 
 
 
@@ -1029,20 +1031,19 @@ implements 
 
 HFileBlock
-HFileBlock(BlockType blockType,
-   int onDiskSizeWithoutHeader,
-   int uncompressedSizeWithoutHeader,
-   long prevBlockOffset,
-   https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer b,
-   boolean fillHeader,
-   long offset,
-   int nextBlockOnDiskSize,
-   int onDiskDataSizeWithHeader,
-   HFileContext fileContext)
+public HFileBlock(BlockType blockType,
+  int onDiskSizeWithoutHeader,
+  int uncompressedSizeWithoutHeader,
+  long prevBlockOffset,
+  https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer b,
+  boolean fillHeader,
+  long offset,
+  int nextBlockOnDiskSize,
+  int onDiskDataSizeWithHeader,
+  HFileContext fileContext)
 Creates a new HFile block 
from the given fields. This constructor
  is used only while writing blocks and caching,
  and is sitting in a byte buffer and we want to stuff the block into cache.
- See HFileBlock.Writer.getBlockForCaching(CacheConfig).
 
  TODO: The caller presumes no checksumming
  required of this block instance since going into cache; checksum already 
verified on
@@ -1067,7 +1068,7 @@ implements 
 
 HFileBlock
-HFileBlock(ByteBuff buf,
+HFileBlock(ByteBuff buf,
boolean usesHBaseChecksum,
Cacheable.MemoryType memType,
long offset,
@@ -1100,7 +1101,7 @@ implements 
 
 init
-private void init(BlockType blockType,
+private void init(BlockType blockType,
   int onDiskSizeWithoutHeader,
   int uncompressedSizeWithoutHeader,
   long prevBlockOffset,
@@ -1117,7 +1118,7 @@ implements 
 
 getOnDiskSizeWithHeader
-private static int getOnDiskSizeWithHeader(https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer headerBuf,
+private static int getOnDiskSizeWithHeader(https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer headerBuf,
boolean verifyChecksum)
 Parse total on disk size including header and 
checksum.
 
@@ -1135,7 +1136,7 @@ implements 
 
 getNextBlockOnDiskSize
-int getNextBlockOnDiskSize()
+int getNextBlockOnDiskSize()
 
 Returns:
 the on-disk size of the next block (including the header size and any 
c

[43/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/devapidocs/org/apache/hadoop/hbase/regionserver/StoreScanner.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/StoreScanner.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/StoreScanner.html
index 101b23a..8bdd840 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/StoreScanner.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/StoreScanner.html
@@ -1476,7 +1476,7 @@ implements 
 
 needToReturn
-private ScannerContext.NextState needToReturn(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List outResult)
+private ScannerContext.NextState needToReturn(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List outResult)
 If the top cell won't be flushed into disk, the new top 
cell may be
  changed after #reopenAfterFlush. Because the older top cell only exist
  in the memstore scanner but the memstore scanner is replaced by hfile
@@ -1498,7 +1498,7 @@ implements 
 
 seekOrSkipToNextRow
-private void seekOrSkipToNextRow(Cell cell)
+private void seekOrSkipToNextRow(Cell cell)
   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 
 Throws:
@@ -1512,7 +1512,7 @@ implements 
 
 seekOrSkipToNextColumn
-private void seekOrSkipToNextColumn(Cell cell)
+private void seekOrSkipToNextColumn(Cell cell)
  throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 
 Throws:
@@ -1526,7 +1526,7 @@ implements 
 
 trySkipToNextRow
-protected boolean trySkipToNextRow(Cell cell)
+protected boolean trySkipToNextRow(Cell cell)
 throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 See if we should actually SEEK or rather just SKIP to the 
next Cell (see HBASE-13109).
  ScanQueryMatcher may issue SEEK hints, such as seek to next column, next row,
@@ -1589,7 +1589,7 @@ implements 
 
 trySkipToNextColumn
-protected boolean trySkipToNextColumn(Cell cell)
+protected boolean trySkipToNextColumn(Cell cell)
throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 See trySkipToNextRow(Cell)
 
@@ -1608,7 +1608,7 @@ implements 
 
 getReadPoint
-public long getReadPoint()
+public long getReadPoint()
 
 Specified by:
 getReadPoint in
 interface ChangedReadersObserver
@@ -1623,7 +1623,7 @@ implements 
 
 clearAndClose
-private static void clearAndClose(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List scanners)
+private static void clearAndClose(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List scanners)
 
 
 
@@ -1632,7 +1632,7 @@ implements 
 
 updateReaders
-public void updateReaders(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List sfs,
+public void updateReaders(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List sfs,
   https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List memStoreScanners)
throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 Description copied from 
interface: ChangedReadersObserver
@@ -1654,7 +1654,7 @@ implements 
 
 reopenAfterFlush
-protected final boolean reopenAfterFlush()
+protected final boolean reopenAfterFlush()
   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 
 Returns:
@@ -1670,7 +1670,7 @@ implements 
 
 resetQueryMatcher
-private void resetQueryMatcher(Cell lastTopKey)
+private void resetQueryMatcher(Cell lastTopKey)
 
 
 
@@ -1679,7 +1679,7 @@ implements 
 
 checkScanOrder
-protected void checkScanOrder(Cell prevKV,
+protected void checkScanOrder(Cell prevKV,
   Cell kv,
   CellComparator comparator)
throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOExc

[33/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.State.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.State.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.State.html
index 97ceefd..b7b4236 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.State.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.State.html
@@ -103,8 +103,8 @@
 095 * Caches cache whole blocks with 
trailing checksums if any. We then tag on some metadata, the
 096 * content of BLOCK_METADATA_SPACE which 
will be flag on if we are doing 'hbase'
 097 * checksums and then the offset into the 
file which is needed when we re-make a cache key
-098 * when we return the block to the cache 
as 'done'. See {@link Cacheable#serialize(ByteBuffer)} and
-099 * {@link Cacheable#getDeserializer()}.
+098 * when we return the block to the cache 
as 'done'.
+099 * See {@link 
Cacheable#serialize(ByteBuffer, boolean)} and {@link 
Cacheable#getDeserializer()}.
 100 *
 101 * 

TODO: Should we cache the checksums? Down in Writer#getBlockForCaching(CacheConfig) where 102 * we make a block to cache-on-write, there is an attempt at turning off checksums. This is not the @@ -333,1579 +333,1579 @@ 325 * Creates a new {@link HFile} block from the given fields. This constructor 326 * is used only while writing blocks and caching, 327 * and is sitting in a byte buffer and we want to stuff the block into cache. -328 * See {@link Writer#getBlockForCaching(CacheConfig)}. -329 * -330 *

TODO: The caller presumes no checksumming -331 * required of this block instance since going into cache; checksum already verified on -332 * underlying block data pulled in from filesystem. Is that correct? What if cache is SSD? -333 * -334 * @param blockType the type of this block, see {@link BlockType} -335 * @param onDiskSizeWithoutHeader see {@link #onDiskSizeWithoutHeader} -336 * @param uncompressedSizeWithoutHeader see {@link #uncompressedSizeWithoutHeader} -337 * @param prevBlockOffset see {@link #prevBlockOffset} -338 * @param b block header ({@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes) -339 * @param fillHeader when true, write the first 4 header fields into passed buffer. -340 * @param offset the file offset the block was read from -341 * @param onDiskDataSizeWithHeader see {@link #onDiskDataSizeWithHeader} -342 * @param fileContext HFile meta data -343 */ -344 HFileBlock(BlockType blockType, int onDiskSizeWithoutHeader, int uncompressedSizeWithoutHeader, -345 long prevBlockOffset, ByteBuffer b, boolean fillHeader, long offset, -346 final int nextBlockOnDiskSize, int onDiskDataSizeWithHeader, HFileContext fileContext) { -347init(blockType, onDiskSizeWithoutHeader, uncompressedSizeWithoutHeader, -348prevBlockOffset, offset, onDiskDataSizeWithHeader, nextBlockOnDiskSize, fileContext); -349this.buf = new SingleByteBuff(b); -350if (fillHeader) { -351 overwriteHeader(); -352} -353this.buf.rewind(); -354 } -355 -356 /** -357 * Creates a block from an existing buffer starting with a header. Rewinds -358 * and takes ownership of the buffer. By definition of rewind, ignores the -359 * buffer position, but if you slice the buffer beforehand, it will rewind -360 * to that point. -361 * @param buf Has header, content, and trailing checksums if present. -362 */ -363 HFileBlock(ByteBuff buf, boolean usesHBaseChecksum, MemoryType memType, final long offset, -364 final int nextBlockOnDiskSize, HFileContext fileContext) throws IOException { -365buf.rewind(); -366final BlockType blockType = BlockType.read(buf); -367final int onDiskSizeWithoutHeader = buf.getInt(Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX); -368final int uncompressedSizeWithoutHeader = -369 buf.getInt(Header.UNCOMPRESSED_SIZE_WITHOUT_HEADER_INDEX); -370final long prevBlockOffset = buf.getLong(Header.PREV_BLOCK_OFFSET_INDEX); -371// This constructor is called when we deserialize a block from cache and when we read a block in -372// from the fs. fileCache is null when deserialized from cache so need to make up one. -373HFileContextBuilder fileContextBuilder = fileContext != null? -374new HFileContextBuilder(fileContext): new HFileContextBuilder(); -375 fileContextBuilder.withHBaseCheckSum(usesHBaseChecksum); -376int onDiskDataSizeWithHeader; -377if (usesHBaseChecksum) { -378 byte checksumType = buf.get(Header.CHECKSUM_TYPE_INDEX); -379 int bytesPerChecksum = buf.getInt(Header.BYTES_PER_CHECKSUM_INDEX); -380 onDiskDataSizeWithHeader = buf.getInt(Header.ON_DISK_DATA_SIZE_WITH_HEADER_INDEX); -381 // Use the checksum type a


[36/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html
index 97ceefd..b7b4236 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html
@@ -103,8 +103,8 @@
 095 * Caches cache whole blocks with 
trailing checksums if any. We then tag on some metadata, the
 096 * content of BLOCK_METADATA_SPACE which 
will be flag on if we are doing 'hbase'
 097 * checksums and then the offset into the 
file which is needed when we re-make a cache key
-098 * when we return the block to the cache 
as 'done'. See {@link Cacheable#serialize(ByteBuffer)} and
-099 * {@link Cacheable#getDeserializer()}.
+098 * when we return the block to the cache 
as 'done'.
+099 * See {@link 
Cacheable#serialize(ByteBuffer, boolean)} and {@link 
Cacheable#getDeserializer()}.
 100 *
 101 * 

TODO: Should we cache the checksums? Down in Writer#getBlockForCaching(CacheConfig) where 102 * we make a block to cache-on-write, there is an attempt at turning off checksums. This is not the @@ -333,1579 +333,1579 @@ 325 * Creates a new {@link HFile} block from the given fields. This constructor 326 * is used only while writing blocks and caching, 327 * and is sitting in a byte buffer and we want to stuff the block into cache. -328 * See {@link Writer#getBlockForCaching(CacheConfig)}. -329 * -330 *

TODO: The caller presumes no checksumming -331 * required of this block instance since going into cache; checksum already verified on -332 * underlying block data pulled in from filesystem. Is that correct? What if cache is SSD? -333 * -334 * @param blockType the type of this block, see {@link BlockType} -335 * @param onDiskSizeWithoutHeader see {@link #onDiskSizeWithoutHeader} -336 * @param uncompressedSizeWithoutHeader see {@link #uncompressedSizeWithoutHeader} -337 * @param prevBlockOffset see {@link #prevBlockOffset} -338 * @param b block header ({@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes) -339 * @param fillHeader when true, write the first 4 header fields into passed buffer. -340 * @param offset the file offset the block was read from -341 * @param onDiskDataSizeWithHeader see {@link #onDiskDataSizeWithHeader} -342 * @param fileContext HFile meta data -343 */ -344 HFileBlock(BlockType blockType, int onDiskSizeWithoutHeader, int uncompressedSizeWithoutHeader, -345 long prevBlockOffset, ByteBuffer b, boolean fillHeader, long offset, -346 final int nextBlockOnDiskSize, int onDiskDataSizeWithHeader, HFileContext fileContext) { -347init(blockType, onDiskSizeWithoutHeader, uncompressedSizeWithoutHeader, -348prevBlockOffset, offset, onDiskDataSizeWithHeader, nextBlockOnDiskSize, fileContext); -349this.buf = new SingleByteBuff(b); -350if (fillHeader) { -351 overwriteHeader(); -352} -353this.buf.rewind(); -354 } -355 -356 /** -357 * Creates a block from an existing buffer starting with a header. Rewinds -358 * and takes ownership of the buffer. By definition of rewind, ignores the -359 * buffer position, but if you slice the buffer beforehand, it will rewind -360 * to that point. -361 * @param buf Has header, content, and trailing checksums if present. -362 */ -363 HFileBlock(ByteBuff buf, boolean usesHBaseChecksum, MemoryType memType, final long offset, -364 final int nextBlockOnDiskSize, HFileContext fileContext) throws IOException { -365buf.rewind(); -366final BlockType blockType = BlockType.read(buf); -367final int onDiskSizeWithoutHeader = buf.getInt(Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX); -368final int uncompressedSizeWithoutHeader = -369 buf.getInt(Header.UNCOMPRESSED_SIZE_WITHOUT_HEADER_INDEX); -370final long prevBlockOffset = buf.getLong(Header.PREV_BLOCK_OFFSET_INDEX); -371// This constructor is called when we deserialize a block from cache and when we read a block in -372// from the fs. fileCache is null when deserialized from cache so need to make up one. -373HFileContextBuilder fileContextBuilder = fileContext != null? -374new HFileContextBuilder(fileContext): new HFileContextBuilder(); -375 fileContextBuilder.withHBaseCheckSum(usesHBaseChecksum); -376int onDiskDataSizeWithHeader; -377if (usesHBaseChecksum) { -378 byte checksumType = buf.get(Header.CHECKSUM_TYPE_INDEX); -379 int bytesPerChecksum = buf.getInt(Header.BYTES_PER_CHECKSUM_INDEX); -380 onDiskDataSizeWithHeader = buf.getInt(Header.ON_DISK_DATA_SIZE_WITH_HEADER_INDEX); -381 // Use the checksum type a


[40/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.html
index f8ce32e..f118c08 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.html
@@ -41,273 +41,305 @@
 033import 
org.apache.hadoop.conf.Configuration;
 034import 
org.apache.hadoop.hbase.metrics.impl.FastLongHistogram;
 035import 
org.apache.hadoop.hbase.util.Bytes;
-036
-037/**
-038 * Utilty for aggregating counts in 
CachedBlocks and toString/toJSON CachedBlocks and BlockCaches.
-039 * No attempt has been made at making 
this thread safe.
-040 */
-041@InterfaceAudience.Private
-042public class BlockCacheUtil {
-043
-044
-045  public static final long 
NANOS_PER_SECOND = 10;
-046
-047  /**
-048   * Needed generating JSON.
-049   */
-050  private static final ObjectMapper 
MAPPER = new ObjectMapper();
-051  static {
-052
MAPPER.configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false);
-053
MAPPER.configure(SerializationFeature.FLUSH_AFTER_WRITE_VALUE, true);
-054
MAPPER.configure(SerializationFeature.INDENT_OUTPUT, true);
-055  }
-056
-057  /**
-058   * @param cb
-059   * @return The block content as 
String.
-060   */
-061  public static String toString(final 
CachedBlock cb, final long now) {
-062return "filename=" + cb.getFilename() 
+ ", " + toStringMinusFileName(cb, now);
-063  }
-064
-065  /**
-066   * Little data structure to hold counts 
for a file.
-067   * Used doing a toJSON.
-068   */
-069  static class CachedBlockCountsPerFile 
{
-070private int count = 0;
-071private long size = 0;
-072private int countData = 0;
-073private long sizeData = 0;
-074private final String filename;
-075
-076CachedBlockCountsPerFile(final String 
filename) {
-077  this.filename = filename;
-078}
-079
-080public int getCount() {
-081  return count;
-082}
-083
-084public long getSize() {
-085  return size;
-086}
-087
-088public int getCountData() {
-089  return countData;
-090}
-091
-092public long getSizeData() {
-093  return sizeData;
-094}
-095
-096public String getFilename() {
-097  return filename;
-098}
-099  }
-100
-101  /**
-102   * @param filename
-103   * @param blocks
-104   * @return A JSON String of 
filename and counts of 
blocks
-105   * @throws JsonGenerationException
-106   * @throws JsonMappingException
-107   * @throws IOException
-108   */
-109  public static String toJSON(final 
String filename, final NavigableSet blocks)
-110  throws JsonGenerationException, 
JsonMappingException, IOException {
-111CachedBlockCountsPerFile counts = new 
CachedBlockCountsPerFile(filename);
-112for (CachedBlock cb: blocks) {
-113  counts.count++;
-114  counts.size += cb.getSize();
-115  BlockType bt = cb.getBlockType();
-116  if (bt != null && 
bt.isData()) {
-117counts.countData++;
-118counts.sizeData += 
cb.getSize();
-119  }
-120}
-121return 
MAPPER.writeValueAsString(counts);
-122  }
-123
-124  /**
-125   * @param cbsbf
-126   * @return JSON string of 
cbsf aggregated
-127   * @throws JsonGenerationException
-128   * @throws JsonMappingException
-129   * @throws IOException
-130   */
-131  public static String toJSON(final 
CachedBlocksByFile cbsbf)
-132  throws JsonGenerationException, 
JsonMappingException, IOException {
-133return 
MAPPER.writeValueAsString(cbsbf);
-134  }
-135
-136  /**
-137   * @param bc
-138   * @return JSON string of 
bc content.
-139   * @throws JsonGenerationException
-140   * @throws JsonMappingException
-141   * @throws IOException
-142   */
-143  public static String toJSON(final 
BlockCache bc)
-144  throws JsonGenerationException, 
JsonMappingException, IOException {
-145return 
MAPPER.writeValueAsString(bc);
-146  }
-147
-148  /**
-149   * @param cb
-150   * @return The block content of 
bc as a String minus the filename.
-151   */
-152  public static String 
toStringMinusFileName(final CachedBlock cb, final long now) {
-153return "offset=" + cb.getOffset() +
-154  ", size=" + cb.getSize() +
-155  ", age=" + (now - 
cb.getCachedTime()) +
-156  ", type=" + cb.getBlockType() +
-157  ", priority=" + 
cb.getBlockPriority();
-158  }
-159
-160  /**
-161   * Get a {@link CachedBlocksByFile} 
instance and load it up by iterating content in
-162   * {@link BlockCache}.
-163   * @param conf Used to read 
configurations
-164   * @param bc Block Cache to iterate.
-165   * @return Laoded up instance of 
CachedBlocksByFile
-166   */
-167  public stati

[49/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html
index 3eaeafd..4aaab52 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html
@@ -183,14 +183,14 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.filter.FuzzyRowFilter.SatisfiesCode
 org.apache.hadoop.hbase.filter.FuzzyRowFilter.Order
+org.apache.hadoop.hbase.filter.FilterList.Operator
+org.apache.hadoop.hbase.filter.RegexStringComparator.EngineType
 org.apache.hadoop.hbase.filter.CompareFilter.CompareOp
-org.apache.hadoop.hbase.filter.FilterWrapper.FilterRowRetCode
+org.apache.hadoop.hbase.filter.FuzzyRowFilter.SatisfiesCode
 org.apache.hadoop.hbase.filter.BitComparator.BitwiseOp
-org.apache.hadoop.hbase.filter.FilterList.Operator
 org.apache.hadoop.hbase.filter.Filter.ReturnCode
-org.apache.hadoop.hbase.filter.RegexStringComparator.EngineType
+org.apache.hadoop.hbase.filter.FilterWrapper.FilterRowRetCode
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/devapidocs/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.CachedBlockCountsPerFile.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.CachedBlockCountsPerFile.html
 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.CachedBlockCountsPerFile.html
index 3ccf518..d643098 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.CachedBlockCountsPerFile.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.CachedBlockCountsPerFile.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class BlockCacheUtil.CachedBlockCountsPerFile
+static class BlockCacheUtil.CachedBlockCountsPerFile
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 Little data structure to hold counts for a file.
  Used doing a toJSON.
@@ -235,7 +235,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 count
-private int count
+private int count
 
 
 
@@ -244,7 +244,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 size
-private long size
+private long size
 
 
 
@@ -253,7 +253,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 countData
-private int countData
+private int countData
 
 
 
@@ -262,7 +262,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 sizeData
-private long sizeData
+private long sizeData
 
 
 
@@ -271,7 +271,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 filename
-private final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String filename
+private final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String filename
 
 
 
@@ -288,7 +288,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 CachedBlockCountsPerFile
-CachedBlockCountsPerFile(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String filename)
+CachedBlockCountsPerFile(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String filename)
 
 
 
@@ -305,7 +305,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getCount
-public int getCount()
+public int getCount()
 
 
 
@@ -314,7 +314,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getSize
-public long getSize()
+public long getSize()
 
 
 
@@ -323,7 +323,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getCountData
-public int getCountData()
+public int getCountData()
 
 
 
@@ -332,7 +332,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getSizeData
-public long getSizeData()
+public long getSizeData()
 
 
 
@@ -341,7 +341,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getFilename
-public https://docs.oracle.com/javas

[25/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntry.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntry.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntry.html
index e8070ca..8cb24b3 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntry.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntry.html
@@ -433,1254 +433,1261 @@
 425  return;
 426}
 427
-428if (backingMap.containsKey(cacheKey)) 
{
+428if (backingMap.containsKey(cacheKey) 
|| ramCache.containsKey(cacheKey)) {
 429  Cacheable existingBlock = 
getBlock(cacheKey, false, false, false);
-430  try {
-431if 
(BlockCacheUtil.compareCacheBlock(cachedItem, existingBlock) != 0) {
-432  throw new 
RuntimeException("Cached block contents differ, which should not have 
happened."
-433  + "cacheKey:" + 
cacheKey);
-434}
-435String msg = "Caching an already 
cached block: " + cacheKey;
-436msg += ". This is harmless and 
can happen in rare cases (see HBASE-8547)";
-437LOG.warn(msg);
-438  } finally {
-439// return the block since we need 
to decrement the count
-440returnBlock(cacheKey, 
existingBlock);
-441  }
-442  return;
-443}
-444
-445/*
-446 * Stuff the entry into the RAM cache 
so it can get drained to the persistent store
-447 */
-448RAMQueueEntry re =
-449new RAMQueueEntry(cacheKey, 
cachedItem, accessCount.incrementAndGet(), inMemory);
-450if (ramCache.putIfAbsent(cacheKey, 
re) != null) {
-451  return;
-452}
-453int queueNum = (cacheKey.hashCode() 
& 0x7FFF) % writerQueues.size();
-454BlockingQueue bq 
= writerQueues.get(queueNum);
-455boolean successfulAddition = false;
-456if (wait) {
-457  try {
-458successfulAddition = bq.offer(re, 
DEFAULT_CACHE_WAIT_TIME, TimeUnit.MILLISECONDS);
-459  } catch (InterruptedException e) 
{
-460
Thread.currentThread().interrupt();
-461  }
-462} else {
-463  successfulAddition = 
bq.offer(re);
-464}
-465if (!successfulAddition) {
-466  ramCache.remove(cacheKey);
-467  cacheStats.failInsert();
-468} else {
-469  this.blockNumber.increment();
-470  
this.heapSize.add(cachedItem.heapSize());
-471  blocksByHFile.add(cacheKey);
-472}
-473  }
-474
-475  /**
-476   * Get the buffer of the block with the 
specified key.
-477   * @param key block's cache key
-478   * @param caching true if the caller 
caches blocks on cache misses
-479   * @param repeat Whether this is a 
repeat lookup for the same block
-480   * @param updateCacheMetrics Whether we 
should update cache metrics or not
-481   * @return buffer of specified cache 
key, or null if not in cache
-482   */
-483  @Override
-484  public Cacheable getBlock(BlockCacheKey 
key, boolean caching, boolean repeat,
-485  boolean updateCacheMetrics) {
-486if (!cacheEnabled) {
-487  return null;
-488}
-489RAMQueueEntry re = 
ramCache.get(key);
-490if (re != null) {
-491  if (updateCacheMetrics) {
-492cacheStats.hit(caching, 
key.isPrimary(), key.getBlockType());
-493  }
-494  
re.access(accessCount.incrementAndGet());
-495  return re.getData();
-496}
-497BucketEntry bucketEntry = 
backingMap.get(key);
-498if (bucketEntry != null) {
-499  long start = System.nanoTime();
-500  ReentrantReadWriteLock lock = 
offsetLock.getLock(bucketEntry.offset());
-501  try {
-502lock.readLock().lock();
-503// We can not read here even if 
backingMap does contain the given key because its offset
-504// maybe changed. If we lock 
BlockCacheKey instead of offset, then we can only check
-505// existence here.
-506if 
(bucketEntry.equals(backingMap.get(key))) {
-507  // TODO : change this area - 
should be removed after server cells and
-508  // 12295 are available
-509  int len = 
bucketEntry.getLength();
-510  if (LOG.isTraceEnabled()) {
-511LOG.trace("Read offset=" + 
bucketEntry.offset() + ", len=" + len);
-512  }
-513  Cacheable cachedBlock = 
ioEngine.read(bucketEntry.offset(), len,
-514  
bucketEntry.deserializerReference(this.deserialiserMap));
-515  long timeTaken = 
System.nanoTime() - start;
-516  if (updateCacheMetrics) {
-517cacheStats.hit(caching, 
key.isPrimary(), key.getBlockType());
-518
cacheStats.ioHit(timeTaken);
+430
+431  try {
+432int comparison = 
BlockCacheUtil.validateBlockAddition(existingBlock, cachedItem, cacheKey);
+433if (comparison != 0) 

[48/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html
index eca9c0e..ae13b31 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class HFileBlock.FSReaderImpl
+static class HFileBlock.FSReaderImpl
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements HFileBlock.FSReader
 Reads version 2 HFile blocks from the filesystem.
@@ -376,7 +376,7 @@ implements 
 
 streamWrapper
-private FSDataInputStreamWrapper streamWrapper
+private FSDataInputStreamWrapper streamWrapper
 The file system stream of the underlying HFile that
  does or doesn't do checksum validations in the filesystem
 
@@ -387,7 +387,7 @@ implements 
 
 encodedBlockDecodingCtx
-private HFileBlockDecodingContext encodedBlockDecodingCtx
+private HFileBlockDecodingContext encodedBlockDecodingCtx
 
 
 
@@ -396,7 +396,7 @@ implements 
 
 defaultDecodingCtx
-private final HFileBlockDefaultDecodingContext defaultDecodingCtx
+private final HFileBlockDefaultDecodingContext defaultDecodingCtx
 Default context used when BlockType != BlockType.ENCODED_DATA.
 
 
@@ -406,7 +406,7 @@ implements 
 
 prefetchedHeader
-private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicReference.html?is-external=true";
 title="class or interface in 
java.util.concurrent.atomic">AtomicReference prefetchedHeader
+private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicReference.html?is-external=true";
 title="class or interface in 
java.util.concurrent.atomic">AtomicReference prefetchedHeader
 Cache of the NEXT header after this. Check it is indeed 
next blocks header
  before using it. TODO: Review. This overread into next block to fetch
  next blocks header seems unnecessary given we usually get the block size
@@ -419,7 +419,7 @@ implements 
 
 fileSize
-private long fileSize
+private long fileSize
 The size of the file we are reading from, or -1 if 
unknown.
 
 
@@ -429,7 +429,7 @@ implements 
 
 hdrSize
-protected final int hdrSize
+protected final int hdrSize
 The size of the header
 
 
@@ -439,7 +439,7 @@ implements 
 
 hfs
-private HFileSystem hfs
+private HFileSystem hfs
 The filesystem used to access data
 
 
@@ -449,7 +449,7 @@ implements 
 
 fileContext
-private HFileContext fileContext
+private HFileContext fileContext
 
 
 
@@ -458,7 +458,7 @@ implements 
 
 pathName
-private https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String pathName
+private https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String pathName
 
 
 
@@ -467,7 +467,7 @@ implements 
 
 streamLock
-private final https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/Lock.html?is-external=true";
 title="class or interface in java.util.concurrent.locks">Lock streamLock
+private final https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/Lock.html?is-external=true";
 title="class or interface in java.util.concurrent.locks">Lock streamLock
 
 
 
@@ -484,7 +484,7 @@ implements 
 
 FSReaderImpl
-FSReaderImpl(FSDataInputStreamWrapper stream,
+FSReaderImpl(FSDataInputStreamWrapper stream,
  long fileSize,
  HFileSystem hfs,
  org.apache.hadoop.fs.Path path,
@@ -502,7 +502,7 @@ implements 
 
 FSReaderImpl
-FSReaderImpl(org.apache.hadoop.fs.FSDataInputStream istream,
+FSReaderImpl(org.apache.hadoop.fs.FSDataInputStream istream,
  long fileSize,
  HFileContext fileContext)
   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
@@ -528,7 +528,7 @@ implements 
 
 blockRange
-public HFileBlock.BlockIterator blockRange(long startOffset,
+public HFileBlock.BlockIterator blockRange(long startOffset,
long endOffset)
 Description copied from 
interface: HFileBlock.FSReader
 Creates a block iterator over the given portion of the HFile.
@@ -553,7 +553,7 @@ implements 
 
 readAtOffset
-protected int readAtOffset(org.apache.hadoop.fs.FSDataInputStream istream,
+protected int readAtOffset(org.apache.hadoop.fs.FSDataInputStream istream,
byte[] dest,
int destOffset,
int siz

[39/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockIterator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockIterator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockIterator.html
index 97ceefd..b7b4236 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockIterator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockIterator.html
@@ -103,8 +103,8 @@
 095 * Caches cache whole blocks with 
trailing checksums if any. We then tag on some metadata, the
 096 * content of BLOCK_METADATA_SPACE which 
will be flag on if we are doing 'hbase'
 097 * checksums and then the offset into the 
file which is needed when we re-make a cache key
-098 * when we return the block to the cache 
as 'done'. See {@link Cacheable#serialize(ByteBuffer)} and
-099 * {@link Cacheable#getDeserializer()}.
+098 * when we return the block to the cache 
as 'done'.
+099 * See {@link 
Cacheable#serialize(ByteBuffer, boolean)} and {@link 
Cacheable#getDeserializer()}.
 100 *
 101 * 

TODO: Should we cache the checksums? Down in Writer#getBlockForCaching(CacheConfig) where 102 * we make a block to cache-on-write, there is an attempt at turning off checksums. This is not the @@ -333,1579 +333,1579 @@ 325 * Creates a new {@link HFile} block from the given fields. This constructor 326 * is used only while writing blocks and caching, 327 * and is sitting in a byte buffer and we want to stuff the block into cache. -328 * See {@link Writer#getBlockForCaching(CacheConfig)}. -329 * -330 *

TODO: The caller presumes no checksumming -331 * required of this block instance since going into cache; checksum already verified on -332 * underlying block data pulled in from filesystem. Is that correct? What if cache is SSD? -333 * -334 * @param blockType the type of this block, see {@link BlockType} -335 * @param onDiskSizeWithoutHeader see {@link #onDiskSizeWithoutHeader} -336 * @param uncompressedSizeWithoutHeader see {@link #uncompressedSizeWithoutHeader} -337 * @param prevBlockOffset see {@link #prevBlockOffset} -338 * @param b block header ({@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes) -339 * @param fillHeader when true, write the first 4 header fields into passed buffer. -340 * @param offset the file offset the block was read from -341 * @param onDiskDataSizeWithHeader see {@link #onDiskDataSizeWithHeader} -342 * @param fileContext HFile meta data -343 */ -344 HFileBlock(BlockType blockType, int onDiskSizeWithoutHeader, int uncompressedSizeWithoutHeader, -345 long prevBlockOffset, ByteBuffer b, boolean fillHeader, long offset, -346 final int nextBlockOnDiskSize, int onDiskDataSizeWithHeader, HFileContext fileContext) { -347init(blockType, onDiskSizeWithoutHeader, uncompressedSizeWithoutHeader, -348prevBlockOffset, offset, onDiskDataSizeWithHeader, nextBlockOnDiskSize, fileContext); -349this.buf = new SingleByteBuff(b); -350if (fillHeader) { -351 overwriteHeader(); -352} -353this.buf.rewind(); -354 } -355 -356 /** -357 * Creates a block from an existing buffer starting with a header. Rewinds -358 * and takes ownership of the buffer. By definition of rewind, ignores the -359 * buffer position, but if you slice the buffer beforehand, it will rewind -360 * to that point. -361 * @param buf Has header, content, and trailing checksums if present. -362 */ -363 HFileBlock(ByteBuff buf, boolean usesHBaseChecksum, MemoryType memType, final long offset, -364 final int nextBlockOnDiskSize, HFileContext fileContext) throws IOException { -365buf.rewind(); -366final BlockType blockType = BlockType.read(buf); -367final int onDiskSizeWithoutHeader = buf.getInt(Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX); -368final int uncompressedSizeWithoutHeader = -369 buf.getInt(Header.UNCOMPRESSED_SIZE_WITHOUT_HEADER_INDEX); -370final long prevBlockOffset = buf.getLong(Header.PREV_BLOCK_OFFSET_INDEX); -371// This constructor is called when we deserialize a block from cache and when we read a block in -372// from the fs. fileCache is null when deserialized from cache so need to make up one. -373HFileContextBuilder fileContextBuilder = fileContext != null? -374new HFileContextBuilder(fileContext): new HFileContextBuilder(); -375 fileContextBuilder.withHBaseCheckSum(usesHBaseChecksum); -376int onDiskDataSizeWithHeader; -377if (usesHBaseChecksum) { -378 byte checksumType = buf.get(Header.CHECKSUM_TYPE_INDEX); -379 int bytesPerChecksum = buf.getInt(Header.BYTES_PER_CHECKSUM_INDEX); -380 onDiskDataSizeWithHeader = buf.getInt(Header.ON_DISK_DATA_SIZE_WITH_HEADER_INDEX); -381 // Use the checksum t


[42/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.CachedBlockCountsPerFile.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.CachedBlockCountsPerFile.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.CachedBlockCountsPerFile.html
index f8ce32e..f118c08 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.CachedBlockCountsPerFile.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.CachedBlockCountsPerFile.html
@@ -41,273 +41,305 @@
 033import 
org.apache.hadoop.conf.Configuration;
 034import 
org.apache.hadoop.hbase.metrics.impl.FastLongHistogram;
 035import 
org.apache.hadoop.hbase.util.Bytes;
-036
-037/**
-038 * Utilty for aggregating counts in 
CachedBlocks and toString/toJSON CachedBlocks and BlockCaches.
-039 * No attempt has been made at making 
this thread safe.
-040 */
-041@InterfaceAudience.Private
-042public class BlockCacheUtil {
-043
-044
-045  public static final long 
NANOS_PER_SECOND = 10;
-046
-047  /**
-048   * Needed generating JSON.
-049   */
-050  private static final ObjectMapper 
MAPPER = new ObjectMapper();
-051  static {
-052
MAPPER.configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false);
-053
MAPPER.configure(SerializationFeature.FLUSH_AFTER_WRITE_VALUE, true);
-054
MAPPER.configure(SerializationFeature.INDENT_OUTPUT, true);
-055  }
-056
-057  /**
-058   * @param cb
-059   * @return The block content as 
String.
-060   */
-061  public static String toString(final 
CachedBlock cb, final long now) {
-062return "filename=" + cb.getFilename() 
+ ", " + toStringMinusFileName(cb, now);
-063  }
-064
-065  /**
-066   * Little data structure to hold counts 
for a file.
-067   * Used doing a toJSON.
-068   */
-069  static class CachedBlockCountsPerFile 
{
-070private int count = 0;
-071private long size = 0;
-072private int countData = 0;
-073private long sizeData = 0;
-074private final String filename;
-075
-076CachedBlockCountsPerFile(final String 
filename) {
-077  this.filename = filename;
-078}
-079
-080public int getCount() {
-081  return count;
-082}
-083
-084public long getSize() {
-085  return size;
-086}
-087
-088public int getCountData() {
-089  return countData;
-090}
-091
-092public long getSizeData() {
-093  return sizeData;
-094}
-095
-096public String getFilename() {
-097  return filename;
-098}
-099  }
-100
-101  /**
-102   * @param filename
-103   * @param blocks
-104   * @return A JSON String of 
filename and counts of 
blocks
-105   * @throws JsonGenerationException
-106   * @throws JsonMappingException
-107   * @throws IOException
-108   */
-109  public static String toJSON(final 
String filename, final NavigableSet blocks)
-110  throws JsonGenerationException, 
JsonMappingException, IOException {
-111CachedBlockCountsPerFile counts = new 
CachedBlockCountsPerFile(filename);
-112for (CachedBlock cb: blocks) {
-113  counts.count++;
-114  counts.size += cb.getSize();
-115  BlockType bt = cb.getBlockType();
-116  if (bt != null && 
bt.isData()) {
-117counts.countData++;
-118counts.sizeData += 
cb.getSize();
-119  }
-120}
-121return 
MAPPER.writeValueAsString(counts);
-122  }
-123
-124  /**
-125   * @param cbsbf
-126   * @return JSON string of 
cbsf aggregated
-127   * @throws JsonGenerationException
-128   * @throws JsonMappingException
-129   * @throws IOException
-130   */
-131  public static String toJSON(final 
CachedBlocksByFile cbsbf)
-132  throws JsonGenerationException, 
JsonMappingException, IOException {
-133return 
MAPPER.writeValueAsString(cbsbf);
-134  }
-135
-136  /**
-137   * @param bc
-138   * @return JSON string of 
bc content.
-139   * @throws JsonGenerationException
-140   * @throws JsonMappingException
-141   * @throws IOException
-142   */
-143  public static String toJSON(final 
BlockCache bc)
-144  throws JsonGenerationException, 
JsonMappingException, IOException {
-145return 
MAPPER.writeValueAsString(bc);
-146  }
-147
-148  /**
-149   * @param cb
-150   * @return The block content of 
bc as a String minus the filename.
-151   */
-152  public static String 
toStringMinusFileName(final CachedBlock cb, final long now) {
-153return "offset=" + cb.getOffset() +
-154  ", size=" + cb.getSize() +
-155  ", age=" + (now - 
cb.getCachedTime()) +
-156  ", type=" + cb.getBlockType() +
-157  ", priority=" + 
cb.getBlockPriority();
-158  }
-159
-160  /**
-161   * Get a {@link CachedBlocksByFile} 
instance and load it up by iterating content in
-162   * {@link BlockCache}.
-163   * @param conf Used to read 
configurations
-1

[41/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.CachedBlocksByFile.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.CachedBlocksByFile.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.CachedBlocksByFile.html
index f8ce32e..f118c08 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.CachedBlocksByFile.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.CachedBlocksByFile.html
@@ -41,273 +41,305 @@
 033import 
org.apache.hadoop.conf.Configuration;
 034import 
org.apache.hadoop.hbase.metrics.impl.FastLongHistogram;
 035import 
org.apache.hadoop.hbase.util.Bytes;
-036
-037/**
-038 * Utilty for aggregating counts in 
CachedBlocks and toString/toJSON CachedBlocks and BlockCaches.
-039 * No attempt has been made at making 
this thread safe.
-040 */
-041@InterfaceAudience.Private
-042public class BlockCacheUtil {
-043
-044
-045  public static final long 
NANOS_PER_SECOND = 10;
-046
-047  /**
-048   * Needed generating JSON.
-049   */
-050  private static final ObjectMapper 
MAPPER = new ObjectMapper();
-051  static {
-052
MAPPER.configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false);
-053
MAPPER.configure(SerializationFeature.FLUSH_AFTER_WRITE_VALUE, true);
-054
MAPPER.configure(SerializationFeature.INDENT_OUTPUT, true);
-055  }
-056
-057  /**
-058   * @param cb
-059   * @return The block content as 
String.
-060   */
-061  public static String toString(final 
CachedBlock cb, final long now) {
-062return "filename=" + cb.getFilename() 
+ ", " + toStringMinusFileName(cb, now);
-063  }
-064
-065  /**
-066   * Little data structure to hold counts 
for a file.
-067   * Used doing a toJSON.
-068   */
-069  static class CachedBlockCountsPerFile 
{
-070private int count = 0;
-071private long size = 0;
-072private int countData = 0;
-073private long sizeData = 0;
-074private final String filename;
-075
-076CachedBlockCountsPerFile(final String 
filename) {
-077  this.filename = filename;
-078}
-079
-080public int getCount() {
-081  return count;
-082}
-083
-084public long getSize() {
-085  return size;
-086}
-087
-088public int getCountData() {
-089  return countData;
-090}
-091
-092public long getSizeData() {
-093  return sizeData;
-094}
-095
-096public String getFilename() {
-097  return filename;
-098}
-099  }
-100
-101  /**
-102   * @param filename
-103   * @param blocks
-104   * @return A JSON String of 
filename and counts of 
blocks
-105   * @throws JsonGenerationException
-106   * @throws JsonMappingException
-107   * @throws IOException
-108   */
-109  public static String toJSON(final 
String filename, final NavigableSet blocks)
-110  throws JsonGenerationException, 
JsonMappingException, IOException {
-111CachedBlockCountsPerFile counts = new 
CachedBlockCountsPerFile(filename);
-112for (CachedBlock cb: blocks) {
-113  counts.count++;
-114  counts.size += cb.getSize();
-115  BlockType bt = cb.getBlockType();
-116  if (bt != null && 
bt.isData()) {
-117counts.countData++;
-118counts.sizeData += 
cb.getSize();
-119  }
-120}
-121return 
MAPPER.writeValueAsString(counts);
-122  }
-123
-124  /**
-125   * @param cbsbf
-126   * @return JSON string of 
cbsf aggregated
-127   * @throws JsonGenerationException
-128   * @throws JsonMappingException
-129   * @throws IOException
-130   */
-131  public static String toJSON(final 
CachedBlocksByFile cbsbf)
-132  throws JsonGenerationException, 
JsonMappingException, IOException {
-133return 
MAPPER.writeValueAsString(cbsbf);
-134  }
-135
-136  /**
-137   * @param bc
-138   * @return JSON string of 
bc content.
-139   * @throws JsonGenerationException
-140   * @throws JsonMappingException
-141   * @throws IOException
-142   */
-143  public static String toJSON(final 
BlockCache bc)
-144  throws JsonGenerationException, 
JsonMappingException, IOException {
-145return 
MAPPER.writeValueAsString(bc);
-146  }
-147
-148  /**
-149   * @param cb
-150   * @return The block content of 
bc as a String minus the filename.
-151   */
-152  public static String 
toStringMinusFileName(final CachedBlock cb, final long now) {
-153return "offset=" + cb.getOffset() +
-154  ", size=" + cb.getSize() +
-155  ", age=" + (now - 
cb.getCachedTime()) +
-156  ", type=" + cb.getBlockType() +
-157  ", priority=" + 
cb.getBlockPriority();
-158  }
-159
-160  /**
-161   * Get a {@link CachedBlocksByFile} 
instance and load it up by iterating content in
-162   * {@link BlockCache}.
-163   * @param conf Used to read 
configurations
-164   * @param bc Block Cache t

[31/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.html
index 97ceefd..b7b4236 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.html
@@ -103,8 +103,8 @@
 095 * Caches cache whole blocks with 
trailing checksums if any. We then tag on some metadata, the
 096 * content of BLOCK_METADATA_SPACE which 
will be flag on if we are doing 'hbase'
 097 * checksums and then the offset into the 
file which is needed when we re-make a cache key
-098 * when we return the block to the cache 
as 'done'. See {@link Cacheable#serialize(ByteBuffer)} and
-099 * {@link Cacheable#getDeserializer()}.
+098 * when we return the block to the cache 
as 'done'.
+099 * See {@link 
Cacheable#serialize(ByteBuffer, boolean)} and {@link 
Cacheable#getDeserializer()}.
 100 *
 101 * 

TODO: Should we cache the checksums? Down in Writer#getBlockForCaching(CacheConfig) where 102 * we make a block to cache-on-write, there is an attempt at turning off checksums. This is not the @@ -333,1579 +333,1579 @@ 325 * Creates a new {@link HFile} block from the given fields. This constructor 326 * is used only while writing blocks and caching, 327 * and is sitting in a byte buffer and we want to stuff the block into cache. -328 * See {@link Writer#getBlockForCaching(CacheConfig)}. -329 * -330 *

TODO: The caller presumes no checksumming -331 * required of this block instance since going into cache; checksum already verified on -332 * underlying block data pulled in from filesystem. Is that correct? What if cache is SSD? -333 * -334 * @param blockType the type of this block, see {@link BlockType} -335 * @param onDiskSizeWithoutHeader see {@link #onDiskSizeWithoutHeader} -336 * @param uncompressedSizeWithoutHeader see {@link #uncompressedSizeWithoutHeader} -337 * @param prevBlockOffset see {@link #prevBlockOffset} -338 * @param b block header ({@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes) -339 * @param fillHeader when true, write the first 4 header fields into passed buffer. -340 * @param offset the file offset the block was read from -341 * @param onDiskDataSizeWithHeader see {@link #onDiskDataSizeWithHeader} -342 * @param fileContext HFile meta data -343 */ -344 HFileBlock(BlockType blockType, int onDiskSizeWithoutHeader, int uncompressedSizeWithoutHeader, -345 long prevBlockOffset, ByteBuffer b, boolean fillHeader, long offset, -346 final int nextBlockOnDiskSize, int onDiskDataSizeWithHeader, HFileContext fileContext) { -347init(blockType, onDiskSizeWithoutHeader, uncompressedSizeWithoutHeader, -348prevBlockOffset, offset, onDiskDataSizeWithHeader, nextBlockOnDiskSize, fileContext); -349this.buf = new SingleByteBuff(b); -350if (fillHeader) { -351 overwriteHeader(); -352} -353this.buf.rewind(); -354 } -355 -356 /** -357 * Creates a block from an existing buffer starting with a header. Rewinds -358 * and takes ownership of the buffer. By definition of rewind, ignores the -359 * buffer position, but if you slice the buffer beforehand, it will rewind -360 * to that point. -361 * @param buf Has header, content, and trailing checksums if present. -362 */ -363 HFileBlock(ByteBuff buf, boolean usesHBaseChecksum, MemoryType memType, final long offset, -364 final int nextBlockOnDiskSize, HFileContext fileContext) throws IOException { -365buf.rewind(); -366final BlockType blockType = BlockType.read(buf); -367final int onDiskSizeWithoutHeader = buf.getInt(Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX); -368final int uncompressedSizeWithoutHeader = -369 buf.getInt(Header.UNCOMPRESSED_SIZE_WITHOUT_HEADER_INDEX); -370final long prevBlockOffset = buf.getLong(Header.PREV_BLOCK_OFFSET_INDEX); -371// This constructor is called when we deserialize a block from cache and when we read a block in -372// from the fs. fileCache is null when deserialized from cache so need to make up one. -373HFileContextBuilder fileContextBuilder = fileContext != null? -374new HFileContextBuilder(fileContext): new HFileContextBuilder(); -375 fileContextBuilder.withHBaseCheckSum(usesHBaseChecksum); -376int onDiskDataSizeWithHeader; -377if (usesHBaseChecksum) { -378 byte checksumType = buf.get(Header.CHECKSUM_TYPE_INDEX); -379 int bytesPerChecksum = buf.getInt(Header.BYTES_PER_CHECKSUM_INDEX); -380 onDiskDataSizeWithHeader = buf.getInt(Header.ON_DISK_DATA_SIZE_WITH_HEADER_INDEX); -381 // Use the checksum type and bytes per checksum from header, not from filecontext. -382


[45/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.html 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.html
index 9b25d3b..11df7d7 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.html
@@ -1523,7 +1523,7 @@ implements 
 
 getBlock
-public Cacheable getBlock(BlockCacheKey key,
+public Cacheable getBlock(BlockCacheKey key,
   boolean caching,
   boolean repeat,
   boolean updateCacheMetrics)
@@ -1547,7 +1547,7 @@ implements 
 
 blockEvicted
-void blockEvicted(BlockCacheKey cacheKey,
+void blockEvicted(BlockCacheKey cacheKey,
   BucketCache.BucketEntry bucketEntry,
   boolean decrementBlockNumber)
 
@@ -1558,7 +1558,7 @@ implements 
 
 evictBlock
-public boolean evictBlock(BlockCacheKey cacheKey)
+public boolean evictBlock(BlockCacheKey cacheKey)
 Description copied from 
interface: BlockCache
 Evict block from cache.
 
@@ -1577,7 +1577,7 @@ implements 
 
 forceEvict
-private boolean forceEvict(BlockCacheKey cacheKey)
+private boolean forceEvict(BlockCacheKey cacheKey)
 
 
 
@@ -1586,7 +1586,7 @@ implements 
 
 checkRamCache
-private BucketCache.RAMQueueEntry checkRamCache(BlockCacheKey cacheKey)
+private BucketCache.RAMQueueEntry checkRamCache(BlockCacheKey cacheKey)
 
 
 
@@ -1595,7 +1595,7 @@ implements 
 
 evictBlock
-public boolean evictBlock(BlockCacheKey cacheKey,
+public boolean evictBlock(BlockCacheKey cacheKey,
   boolean deletedBlock)
 
 
@@ -1605,7 +1605,7 @@ implements 
 
 logStats
-public void logStats()
+public void logStats()
 
 
 
@@ -1614,7 +1614,7 @@ implements 
 
 getRealCacheSize
-public long getRealCacheSize()
+public long getRealCacheSize()
 
 
 
@@ -1623,7 +1623,7 @@ implements 
 
 acceptableSize
-private long acceptableSize()
+private long acceptableSize()
 
 
 
@@ -1632,7 +1632,7 @@ implements 
 
 getPartitionSize
-long getPartitionSize(float partitionFactor)
+long getPartitionSize(float partitionFactor)
 
 
 
@@ -1641,7 +1641,7 @@ implements 
 
 bucketSizesAboveThresholdCount
-private int bucketSizesAboveThresholdCount(float minFactor)
+private int bucketSizesAboveThresholdCount(float minFactor)
 Return the count of bucketSizeinfos still need free 
space
 
 
@@ -1651,7 +1651,7 @@ implements 
 
 freeEntireBuckets
-private void freeEntireBuckets(int completelyFreeBucketsNeeded)
+private void freeEntireBuckets(int completelyFreeBucketsNeeded)
 This method will find the buckets that are minimally 
occupied
  and are not reference counted and will free them completely
  without any constraint on the access times of the elements,
@@ -1669,7 +1669,7 @@ implements 
 
 freeSpace
-private void freeSpace(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String why)
+private void freeSpace(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String why)
 Free the space if the used size reaches acceptableSize() or 
one size block
  couldn't be allocated. When freeing the space, we use the LRU algorithm and
  ensure there must be some blocks evicted
@@ -1685,7 +1685,7 @@ implements 
 
 getRAMQueueEntries
-static https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List getRAMQueueEntries(https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/BlockingQueue.html?is-external=true";
 title="class or interface in java.util.concurrent">BlockingQueue q,
+static https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List getRAMQueueEntries(https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/BlockingQueue.html?is-external=true";
 title="class or interface in java.util.concurrent">BlockingQueue q,
   https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List receptacle)
throws https://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true";
 title="class or interface in java.lang">InterruptedException
 Blocks until elements available in q then 
tries to grab as many as possible
@@ -1708,7 +1708,7 @@ implements 
 
 persistToFile
-private void persistToFile()
+private void persistToFile()
 

[32/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html
index 97ceefd..b7b4236 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html
@@ -103,8 +103,8 @@
 095 * Caches cache whole blocks with 
trailing checksums if any. We then tag on some metadata, the
 096 * content of BLOCK_METADATA_SPACE which 
will be flag on if we are doing 'hbase'
 097 * checksums and then the offset into the 
file which is needed when we re-make a cache key
-098 * when we return the block to the cache 
as 'done'. See {@link Cacheable#serialize(ByteBuffer)} and
-099 * {@link Cacheable#getDeserializer()}.
+098 * when we return the block to the cache 
as 'done'.
+099 * See {@link 
Cacheable#serialize(ByteBuffer, boolean)} and {@link 
Cacheable#getDeserializer()}.
 100 *
 101 * 

TODO: Should we cache the checksums? Down in Writer#getBlockForCaching(CacheConfig) where 102 * we make a block to cache-on-write, there is an attempt at turning off checksums. This is not the @@ -333,1579 +333,1579 @@ 325 * Creates a new {@link HFile} block from the given fields. This constructor 326 * is used only while writing blocks and caching, 327 * and is sitting in a byte buffer and we want to stuff the block into cache. -328 * See {@link Writer#getBlockForCaching(CacheConfig)}. -329 * -330 *

TODO: The caller presumes no checksumming -331 * required of this block instance since going into cache; checksum already verified on -332 * underlying block data pulled in from filesystem. Is that correct? What if cache is SSD? -333 * -334 * @param blockType the type of this block, see {@link BlockType} -335 * @param onDiskSizeWithoutHeader see {@link #onDiskSizeWithoutHeader} -336 * @param uncompressedSizeWithoutHeader see {@link #uncompressedSizeWithoutHeader} -337 * @param prevBlockOffset see {@link #prevBlockOffset} -338 * @param b block header ({@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes) -339 * @param fillHeader when true, write the first 4 header fields into passed buffer. -340 * @param offset the file offset the block was read from -341 * @param onDiskDataSizeWithHeader see {@link #onDiskDataSizeWithHeader} -342 * @param fileContext HFile meta data -343 */ -344 HFileBlock(BlockType blockType, int onDiskSizeWithoutHeader, int uncompressedSizeWithoutHeader, -345 long prevBlockOffset, ByteBuffer b, boolean fillHeader, long offset, -346 final int nextBlockOnDiskSize, int onDiskDataSizeWithHeader, HFileContext fileContext) { -347init(blockType, onDiskSizeWithoutHeader, uncompressedSizeWithoutHeader, -348prevBlockOffset, offset, onDiskDataSizeWithHeader, nextBlockOnDiskSize, fileContext); -349this.buf = new SingleByteBuff(b); -350if (fillHeader) { -351 overwriteHeader(); -352} -353this.buf.rewind(); -354 } -355 -356 /** -357 * Creates a block from an existing buffer starting with a header. Rewinds -358 * and takes ownership of the buffer. By definition of rewind, ignores the -359 * buffer position, but if you slice the buffer beforehand, it will rewind -360 * to that point. -361 * @param buf Has header, content, and trailing checksums if present. -362 */ -363 HFileBlock(ByteBuff buf, boolean usesHBaseChecksum, MemoryType memType, final long offset, -364 final int nextBlockOnDiskSize, HFileContext fileContext) throws IOException { -365buf.rewind(); -366final BlockType blockType = BlockType.read(buf); -367final int onDiskSizeWithoutHeader = buf.getInt(Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX); -368final int uncompressedSizeWithoutHeader = -369 buf.getInt(Header.UNCOMPRESSED_SIZE_WITHOUT_HEADER_INDEX); -370final long prevBlockOffset = buf.getLong(Header.PREV_BLOCK_OFFSET_INDEX); -371// This constructor is called when we deserialize a block from cache and when we read a block in -372// from the fs. fileCache is null when deserialized from cache so need to make up one. -373HFileContextBuilder fileContextBuilder = fileContext != null? -374new HFileContextBuilder(fileContext): new HFileContextBuilder(); -375 fileContextBuilder.withHBaseCheckSum(usesHBaseChecksum); -376int onDiskDataSizeWithHeader; -377if (usesHBaseChecksum) { -378 byte checksumType = buf.get(Header.CHECKSUM_TYPE_INDEX); -379 int bytesPerChecksum = buf.getInt(Header.BYTES_PER_CHECKSUM_INDEX); -380 onDiskDataSizeWithHeader = buf.getInt(Header.ON_DISK_DATA_SIZE_WITH_HEADER_INDEX); -381 // Use the checksum type and bytes per checksum from hea


[46/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/devapidocs/org/apache/hadoop/hbase/io/hfile/LruBlockCache.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/io/hfile/LruBlockCache.html 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/LruBlockCache.html
index 03041e5..8d21f22 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/hfile/LruBlockCache.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/LruBlockCache.html
@@ -1265,7 +1265,7 @@ implements 
 
 CACHE_FIXED_OVERHEAD
-public static final long CACHE_FIXED_OVERHEAD
+public static final long CACHE_FIXED_OVERHEAD
 
 
 
@@ -1422,7 +1422,7 @@ implements 
 
 assertCounterSanity
-private static void assertCounterSanity(long mapSize,
+private static void assertCounterSanity(long mapSize,
 long counterVal)
 Sanity-checking for parity between actual block cache 
content and metrics.
  Intended only for use with TRACE level logging and -ea JVM.
@@ -1434,7 +1434,7 @@ implements 
 
 cacheBlock
-public void cacheBlock(BlockCacheKey cacheKey,
+public void cacheBlock(BlockCacheKey cacheKey,
Cacheable buf)
 Cache the block with the specified name and buffer.
  
@@ -1453,7 +1453,7 @@ implements 
 
 updateSizeMetrics
-private long updateSizeMetrics(LruCachedBlock cb,
+private long updateSizeMetrics(LruCachedBlock cb,
boolean evict)
 Helper function that updates the local size counter and 
also updates any
  per-cf or per-blocktype metrics it can discern from given
@@ -1466,7 +1466,7 @@ implements 
 
 getBlock
-public Cacheable getBlock(BlockCacheKey cacheKey,
+public Cacheable getBlock(BlockCacheKey cacheKey,
   boolean caching,
   boolean repeat,
   boolean updateCacheMetrics)
@@ -1492,7 +1492,7 @@ implements 
 
 containsBlock
-public boolean containsBlock(BlockCacheKey cacheKey)
+public boolean containsBlock(BlockCacheKey cacheKey)
 Whether the cache contains block with specified 
cacheKey
 
 Returns:
@@ -1506,7 +1506,7 @@ implements 
 
 evictBlock
-public boolean evictBlock(BlockCacheKey cacheKey)
+public boolean evictBlock(BlockCacheKey cacheKey)
 Description copied from 
interface: BlockCache
 Evict block from cache.
 
@@ -1525,7 +1525,7 @@ implements 
 
 evictBlocksByHfileName
-public int evictBlocksByHfileName(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String hfileName)
+public int evictBlocksByHfileName(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String hfileName)
 Evicts all blocks for a specific HFile. This is an
  expensive operation implemented as a linear-time search through all blocks
  in the cache. Ideally this should be a search in a log-access-time map.
@@ -1546,7 +1546,7 @@ implements 
 
 evictBlock
-protected long evictBlock(LruCachedBlock block,
+protected long evictBlock(LruCachedBlock block,
   boolean evictedByEvictionProcess)
 Evict the block, and it will be cached by the victim 
handler if exists &&
  block may be read again later
@@ -1565,7 +1565,7 @@ implements 
 
 runEviction
-private void runEviction()
+private void runEviction()
 Multi-threaded call to run the eviction process.
 
 
@@ -1575,7 +1575,7 @@ implements 
 
 isEvictionInProgress
-boolean isEvictionInProgress()
+boolean isEvictionInProgress()
 
 
 
@@ -1584,7 +1584,7 @@ implements 
 
 getOverhead
-long getOverhead()
+long getOverhead()
 
 
 
@@ -1593,7 +1593,7 @@ implements 
 
 evict
-void evict()
+void evict()
 Eviction method.
 
 
@@ -1603,7 +1603,7 @@ implements 
 
 toString
-public https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String toString()
+public https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String toString()
 
 Overrides:
 https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--";
 title="class or interface in java.lang">toString in 
class https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
@@ -1616,7 +1616,7 @@ implements 
 
 getMaxSize
-public long getMaxSize()
+public long getMaxSize()
 Get the maximum size of this cache.
 
 Specified by:
@@ -1632,7 +1632,7 @@ implements 
 
 getCurrentSize
-public long getCurrentSize()
+public long getCurrentSize()
 Description copied from 
interface: BlockCache
 Returns the occupied size of the block cache, in 
bytes.
 
@@ -1649,7 +1649,7 @@ implements 
 
 getCurrentDataSize
-public long getCurrentDataSize()
+public long getCurrentDataSize()
 Description copied from 
interface: BlockCache
 Returns the occupied size of data blocks, in b

[30/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/LruBlockCache.BlockBucket.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/LruBlockCache.BlockBucket.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/LruBlockCache.BlockBucket.html
index f5747c6..b94ef5e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/LruBlockCache.BlockBucket.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/LruBlockCache.BlockBucket.html
@@ -388,811 +388,816 @@
 380
 381LruCachedBlock cb = 
map.get(cacheKey);
 382if (cb != null) {
-383  // compare the contents, if they 
are not equal, we are in big trouble
-384  if 
(BlockCacheUtil.compareCacheBlock(buf, cb.getBuffer()) != 0) {
-385throw new 
RuntimeException("Cached block contents differ, which should not have 
happened."
-386  + "cacheKey:" + cacheKey);
-387  }
-388  String msg = "Cached an already 
cached block: " + cacheKey + " cb:" + cb.getCacheKey();
-389  msg += ". This is harmless and can 
happen in rare cases (see HBASE-8547)";
-390  LOG.warn(msg);
-391  return;
-392}
-393long currentSize = size.get();
-394long currentAcceptableSize = 
acceptableSize();
-395long hardLimitSize = (long) 
(hardCapacityLimitFactor * currentAcceptableSize);
-396if (currentSize >= hardLimitSize) 
{
-397  stats.failInsert();
-398  if (LOG.isTraceEnabled()) {
-399LOG.trace("LruBlockCache current 
size " + StringUtils.byteDesc(currentSize)
-400  + " has exceeded acceptable 
size " + StringUtils.byteDesc(currentAcceptableSize) + "."
-401  + " The hard limit size is " + 
StringUtils.byteDesc(hardLimitSize)
-402  + ", failed to put cacheKey:" + 
cacheKey + " into LruBlockCache.");
-403  }
-404  if (!evictionInProgress) {
-405runEviction();
-406  }
-407  return;
-408}
-409cb = new LruCachedBlock(cacheKey, 
buf, count.incrementAndGet(), inMemory);
-410long newSize = updateSizeMetrics(cb, 
false);
-411map.put(cacheKey, cb);
-412long val = 
elements.incrementAndGet();
-413if (buf.getBlockType().isData()) {
-414   dataBlockElements.increment();
-415}
-416if (LOG.isTraceEnabled()) {
-417  long size = map.size();
-418  assertCounterSanity(size, val);
-419}
-420if (newSize > 
currentAcceptableSize && !evictionInProgress) {
-421  runEviction();
-422}
-423  }
-424
-425  /**
-426   * Sanity-checking for parity between 
actual block cache content and metrics.
-427   * Intended only for use with TRACE 
level logging and -ea JVM.
-428   */
-429  private static void 
assertCounterSanity(long mapSize, long counterVal) {
-430if (counterVal < 0) {
-431  LOG.trace("counterVal overflow. 
Assertions unreliable. counterVal=" + counterVal +
-432", mapSize=" + mapSize);
-433  return;
-434}
-435if (mapSize < Integer.MAX_VALUE) 
{
-436  double pct_diff = 
Math.absdouble) counterVal) / ((double) mapSize)) - 1.);
-437  if (pct_diff > 0.05) {
-438LOG.trace("delta between reported 
and actual size > 5%. counterVal=" + counterVal +
-439  ", mapSize=" + mapSize);
-440  }
-441}
-442  }
-443
-444  /**
-445   * Cache the block with the specified 
name and buffer.
-446   * 

-447 * -448 * @param cacheKey block's cache key -449 * @param buf block buffer -450 */ -451 @Override -452 public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) { -453cacheBlock(cacheKey, buf, false); -454 } -455 -456 /** -457 * Helper function that updates the local size counter and also updates any -458 * per-cf or per-blocktype metrics it can discern from given -459 * {@link LruCachedBlock} -460 */ -461 private long updateSizeMetrics(LruCachedBlock cb, boolean evict) { -462long heapsize = cb.heapSize(); -463BlockType bt = cb.getBuffer().getBlockType(); -464if (evict) { -465 heapsize *= -1; -466} -467if (bt != null && bt.isData()) { -468 dataBlockSize.add(heapsize); -469} -470return size.addAndGet(heapsize); -471 } -472 -473 /** -474 * Get the buffer of the block with the specified name. -475 * -476 * @param cacheKey block's cache key -477 * @param cachingtrue if the caller caches blocks on cache misses -478 * @param repeat Whether this is a repeat lookup for the same block -479 * (used to avoid double counting cache misses when doing double-check -480 * locking) -481 * @param updateCacheMetrics Whether to update cache metrics or not -482 * -483 * @return buffer of specified cache key, or null if not in cache -484 */ -485 @Override -486 public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean


[23/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.RAMQueueEntry.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.RAMQueueEntry.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.RAMQueueEntry.html
index e8070ca..8cb24b3 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.RAMQueueEntry.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.RAMQueueEntry.html
@@ -433,1254 +433,1261 @@
 425  return;
 426}
 427
-428if (backingMap.containsKey(cacheKey)) 
{
+428if (backingMap.containsKey(cacheKey) 
|| ramCache.containsKey(cacheKey)) {
 429  Cacheable existingBlock = 
getBlock(cacheKey, false, false, false);
-430  try {
-431if 
(BlockCacheUtil.compareCacheBlock(cachedItem, existingBlock) != 0) {
-432  throw new 
RuntimeException("Cached block contents differ, which should not have 
happened."
-433  + "cacheKey:" + 
cacheKey);
-434}
-435String msg = "Caching an already 
cached block: " + cacheKey;
-436msg += ". This is harmless and 
can happen in rare cases (see HBASE-8547)";
-437LOG.warn(msg);
-438  } finally {
-439// return the block since we need 
to decrement the count
-440returnBlock(cacheKey, 
existingBlock);
-441  }
-442  return;
-443}
-444
-445/*
-446 * Stuff the entry into the RAM cache 
so it can get drained to the persistent store
-447 */
-448RAMQueueEntry re =
-449new RAMQueueEntry(cacheKey, 
cachedItem, accessCount.incrementAndGet(), inMemory);
-450if (ramCache.putIfAbsent(cacheKey, 
re) != null) {
-451  return;
-452}
-453int queueNum = (cacheKey.hashCode() 
& 0x7FFF) % writerQueues.size();
-454BlockingQueue bq 
= writerQueues.get(queueNum);
-455boolean successfulAddition = false;
-456if (wait) {
-457  try {
-458successfulAddition = bq.offer(re, 
DEFAULT_CACHE_WAIT_TIME, TimeUnit.MILLISECONDS);
-459  } catch (InterruptedException e) 
{
-460
Thread.currentThread().interrupt();
-461  }
-462} else {
-463  successfulAddition = 
bq.offer(re);
-464}
-465if (!successfulAddition) {
-466  ramCache.remove(cacheKey);
-467  cacheStats.failInsert();
-468} else {
-469  this.blockNumber.increment();
-470  
this.heapSize.add(cachedItem.heapSize());
-471  blocksByHFile.add(cacheKey);
-472}
-473  }
-474
-475  /**
-476   * Get the buffer of the block with the 
specified key.
-477   * @param key block's cache key
-478   * @param caching true if the caller 
caches blocks on cache misses
-479   * @param repeat Whether this is a 
repeat lookup for the same block
-480   * @param updateCacheMetrics Whether we 
should update cache metrics or not
-481   * @return buffer of specified cache 
key, or null if not in cache
-482   */
-483  @Override
-484  public Cacheable getBlock(BlockCacheKey 
key, boolean caching, boolean repeat,
-485  boolean updateCacheMetrics) {
-486if (!cacheEnabled) {
-487  return null;
-488}
-489RAMQueueEntry re = 
ramCache.get(key);
-490if (re != null) {
-491  if (updateCacheMetrics) {
-492cacheStats.hit(caching, 
key.isPrimary(), key.getBlockType());
-493  }
-494  
re.access(accessCount.incrementAndGet());
-495  return re.getData();
-496}
-497BucketEntry bucketEntry = 
backingMap.get(key);
-498if (bucketEntry != null) {
-499  long start = System.nanoTime();
-500  ReentrantReadWriteLock lock = 
offsetLock.getLock(bucketEntry.offset());
-501  try {
-502lock.readLock().lock();
-503// We can not read here even if 
backingMap does contain the given key because its offset
-504// maybe changed. If we lock 
BlockCacheKey instead of offset, then we can only check
-505// existence here.
-506if 
(bucketEntry.equals(backingMap.get(key))) {
-507  // TODO : change this area - 
should be removed after server cells and
-508  // 12295 are available
-509  int len = 
bucketEntry.getLength();
-510  if (LOG.isTraceEnabled()) {
-511LOG.trace("Read offset=" + 
bucketEntry.offset() + ", len=" + len);
-512  }
-513  Cacheable cachedBlock = 
ioEngine.read(bucketEntry.offset(), len,
-514  
bucketEntry.deserializerReference(this.deserialiserMap));
-515  long timeTaken = 
System.nanoTime() - start;
-516  if (updateCacheMetrics) {
-517cacheStats.hit(caching, 
key.isPrimary(), key.getBlockType());
-518
cacheStats.ioHit(timeTaken);
+430
+431  try {
+432int comparison = 
BlockCacheUtil.validateBlockAddition(existingBlock, cachedItem, cacheKey);
+433if (compari

[21/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.StatisticsThread.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.StatisticsThread.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.StatisticsThread.html
index e8070ca..8cb24b3 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.StatisticsThread.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.StatisticsThread.html
@@ -433,1254 +433,1261 @@
 425  return;
 426}
 427
-428if (backingMap.containsKey(cacheKey)) 
{
+428if (backingMap.containsKey(cacheKey) 
|| ramCache.containsKey(cacheKey)) {
 429  Cacheable existingBlock = 
getBlock(cacheKey, false, false, false);
-430  try {
-431if 
(BlockCacheUtil.compareCacheBlock(cachedItem, existingBlock) != 0) {
-432  throw new 
RuntimeException("Cached block contents differ, which should not have 
happened."
-433  + "cacheKey:" + 
cacheKey);
-434}
-435String msg = "Caching an already 
cached block: " + cacheKey;
-436msg += ". This is harmless and 
can happen in rare cases (see HBASE-8547)";
-437LOG.warn(msg);
-438  } finally {
-439// return the block since we need 
to decrement the count
-440returnBlock(cacheKey, 
existingBlock);
-441  }
-442  return;
-443}
-444
-445/*
-446 * Stuff the entry into the RAM cache 
so it can get drained to the persistent store
-447 */
-448RAMQueueEntry re =
-449new RAMQueueEntry(cacheKey, 
cachedItem, accessCount.incrementAndGet(), inMemory);
-450if (ramCache.putIfAbsent(cacheKey, 
re) != null) {
-451  return;
-452}
-453int queueNum = (cacheKey.hashCode() 
& 0x7FFF) % writerQueues.size();
-454BlockingQueue bq 
= writerQueues.get(queueNum);
-455boolean successfulAddition = false;
-456if (wait) {
-457  try {
-458successfulAddition = bq.offer(re, 
DEFAULT_CACHE_WAIT_TIME, TimeUnit.MILLISECONDS);
-459  } catch (InterruptedException e) 
{
-460
Thread.currentThread().interrupt();
-461  }
-462} else {
-463  successfulAddition = 
bq.offer(re);
-464}
-465if (!successfulAddition) {
-466  ramCache.remove(cacheKey);
-467  cacheStats.failInsert();
-468} else {
-469  this.blockNumber.increment();
-470  
this.heapSize.add(cachedItem.heapSize());
-471  blocksByHFile.add(cacheKey);
-472}
-473  }
-474
-475  /**
-476   * Get the buffer of the block with the 
specified key.
-477   * @param key block's cache key
-478   * @param caching true if the caller 
caches blocks on cache misses
-479   * @param repeat Whether this is a 
repeat lookup for the same block
-480   * @param updateCacheMetrics Whether we 
should update cache metrics or not
-481   * @return buffer of specified cache 
key, or null if not in cache
-482   */
-483  @Override
-484  public Cacheable getBlock(BlockCacheKey 
key, boolean caching, boolean repeat,
-485  boolean updateCacheMetrics) {
-486if (!cacheEnabled) {
-487  return null;
-488}
-489RAMQueueEntry re = 
ramCache.get(key);
-490if (re != null) {
-491  if (updateCacheMetrics) {
-492cacheStats.hit(caching, 
key.isPrimary(), key.getBlockType());
-493  }
-494  
re.access(accessCount.incrementAndGet());
-495  return re.getData();
-496}
-497BucketEntry bucketEntry = 
backingMap.get(key);
-498if (bucketEntry != null) {
-499  long start = System.nanoTime();
-500  ReentrantReadWriteLock lock = 
offsetLock.getLock(bucketEntry.offset());
-501  try {
-502lock.readLock().lock();
-503// We can not read here even if 
backingMap does contain the given key because its offset
-504// maybe changed. If we lock 
BlockCacheKey instead of offset, then we can only check
-505// existence here.
-506if 
(bucketEntry.equals(backingMap.get(key))) {
-507  // TODO : change this area - 
should be removed after server cells and
-508  // 12295 are available
-509  int len = 
bucketEntry.getLength();
-510  if (LOG.isTraceEnabled()) {
-511LOG.trace("Read offset=" + 
bucketEntry.offset() + ", len=" + len);
-512  }
-513  Cacheable cachedBlock = 
ioEngine.read(bucketEntry.offset(), len,
-514  
bucketEntry.deserializerReference(this.deserialiserMap));
-515  long timeTaken = 
System.nanoTime() - start;
-516  if (updateCacheMetrics) {
-517cacheStats.hit(caching, 
key.isPrimary(), key.getBlockType());
-518
cacheStats.ioHit(timeTaken);
+430
+431  try {
+432int comparison = 
BlockCacheUtil.validateBlockAddition(existingBlock, cachedItem, cacheKey);
+433

[10/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/testdevapidocs/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.CachedItem.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.CachedItem.html
 
b/testdevapidocs/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.CachedItem.html
index 5cc06e0..3061419 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.CachedItem.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.CachedItem.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private static class TestLruBlockCache.CachedItem
+private static class TestLruBlockCache.CachedItem
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements org.apache.hadoop.hbase.io.hfile.Cacheable
 
@@ -229,7 +229,8 @@ implements org.apache.hadoop.hbase.io.hfile.Cacheable
 
 
 void
-serialize(https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in 
java.nio">ByteBuffer destination) 
+serialize(https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer destination,
+ boolean includeNextBlockMetadata) 
 
 
 
@@ -259,7 +260,7 @@ implements org.apache.hadoop.hbase.io.hfile.Cacheable
 
 
 cacheKey
-org.apache.hadoop.hbase.io.hfile.BlockCacheKey cacheKey
+org.apache.hadoop.hbase.io.hfile.BlockCacheKey cacheKey
 
 
 
@@ -268,7 +269,7 @@ implements org.apache.hadoop.hbase.io.hfile.Cacheable
 
 
 size
-int size
+int size
 
 
 
@@ -285,7 +286,7 @@ implements org.apache.hadoop.hbase.io.hfile.Cacheable
 
 
 CachedItem
-CachedItem(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String blockName,
+CachedItem(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String blockName,
int size,
int offset)
 
@@ -296,7 +297,7 @@ implements org.apache.hadoop.hbase.io.hfile.Cacheable
 
 
 CachedItem
-CachedItem(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String blockName,
+CachedItem(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String blockName,
int size)
 
 
@@ -314,7 +315,7 @@ implements org.apache.hadoop.hbase.io.hfile.Cacheable
 
 
 heapSize
-public long heapSize()
+public long heapSize()
 The size of this item reported to the block cache 
layer
 
 Specified by:
@@ -328,7 +329,7 @@ implements org.apache.hadoop.hbase.io.hfile.Cacheable
 
 
 cacheBlockHeapSize
-public long cacheBlockHeapSize()
+public long cacheBlockHeapSize()
 Size of the cache block holding this item. Used for 
verification.
 
 
@@ -338,7 +339,7 @@ implements org.apache.hadoop.hbase.io.hfile.Cacheable
 
 
 getSerializedLength
-public int getSerializedLength()
+public int getSerializedLength()
 
 Specified by:
 getSerializedLength in 
interface org.apache.hadoop.hbase.io.hfile.Cacheable
@@ -351,20 +352,21 @@ implements 
org.apache.hadoop.hbase.io.hfile.Cacheable
 
 
 getDeserializer
-public org.apache.hadoop.hbase.io.hfile.CacheableDeserializer getDeserializer()
+public org.apache.hadoop.hbase.io.hfile.CacheableDeserializer getDeserializer()
 
 Specified by:
 getDeserializer in 
interface org.apache.hadoop.hbase.io.hfile.Cacheable
 
 
 
-
+
 
 
 
 
 serialize
-public void serialize(https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer destination)
+public void serialize(https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer destination,
+  boolean includeNextBlockMetadata)
 
 Specified by:
 serialize in 
interface org.apache.hadoop.hbase.io.hfile.Cacheable
@@ -377,7 +379,7 @@ implements org.apache.hadoop.hbase.io.hfile.Cacheable
 
 
 getBlockType
-public org.apache.hadoop.hbase.io.hfile.BlockType getBlockType()
+public org.apache.hadoop.hbase.io.hfile.BlockType getBlockType()
 
 Specified by:
 getBlockType in 
interface org.apache.hadoop.hbase.io.hfile.Cacheable
@@ -390,7 +392,7 @@ implements org.apache.hadoop.hbase.io.hfile.Cacheable
 
 
 getMemoryType
-public org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType getMemoryType()
+public org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType getMemoryType()
 
 Specified by:
 getMemoryType in 
interface org.apache.hadoop.hbase.io.hfile.Cacheable

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac

[18/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.Builder.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.Builder.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.Builder.html
index cabb570..90f3b1e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.Builder.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.Builder.html
@@ -107,660 +107,661 @@
 099
 100  private Cell lastPeekedCell = null;
 101
-102  /**
-103   * Tracks the relevant server side 
metrics during scans. null when metrics should not be tracked
-104   */
-105  final ServerSideScanMetrics metrics;
-106
-107  ScannerContext(boolean keepProgress, 
LimitFields limitsToCopy, boolean trackMetrics) {
-108this.limits = new LimitFields();
-109if (limitsToCopy != null) {
-110  this.limits.copy(limitsToCopy);
-111}
+102  // Set this to true will have the same 
behavior with reaching the time limit.
+103  // This is used when you want to make 
the current RSRpcService.scan returns immediately. For
+104  // example, when we want to switch from 
pread to stream, we can only do it after the rpc call is
+105  // returned.
+106  private boolean returnImmediately;
+107
+108  /**
+109   * Tracks the relevant server side 
metrics during scans. null when metrics should not be tracked
+110   */
+111  final ServerSideScanMetrics metrics;
 112
-113// Progress fields are initialized to 
0
-114progress = new ProgressFields(0, 0, 
0);
-115
-116this.keepProgress = keepProgress;
-117this.scannerState = DEFAULT_STATE;
-118this.metrics = trackMetrics ? new 
ServerSideScanMetrics() : null;
-119  }
-120
-121  boolean isTrackingMetrics() {
-122return this.metrics != null;
-123  }
-124
-125  /**
-126   * Get the metrics instance. Should 
only be called after a call to {@link #isTrackingMetrics()}
-127   * has been made to confirm that 
metrics are indeed being tracked.
-128   * @return {@link 
ServerSideScanMetrics} instance that is tracking metrics for this scan
-129   */
-130  ServerSideScanMetrics getMetrics() {
-131assert isTrackingMetrics();
-132return this.metrics;
-133  }
-134
-135  /**
-136   * @return true if the progress tracked 
so far in this instance will be considered during an
-137   * invocation of {@link 
InternalScanner#next(java.util.List)} or
-138   * {@link 
RegionScanner#next(java.util.List)}. false when the progress tracked so far
-139   * should not be considered and 
should instead be wiped away via {@link #clearProgress()}
-140   */
-141  boolean getKeepProgress() {
-142return keepProgress;
-143  }
-144
-145  void setKeepProgress(boolean 
keepProgress) {
-146this.keepProgress = keepProgress;
-147  }
-148
-149  /**
-150   * Progress towards the batch limit has 
been made. Increment internal tracking of batch progress
-151   */
-152  void incrementBatchProgress(int batch) 
{
-153int currentBatch = 
progress.getBatch();
-154progress.setBatch(currentBatch + 
batch);
-155  }
-156
-157  /**
-158   * Progress towards the size limit has 
been made. Increment internal tracking of size progress
-159   */
-160  void incrementSizeProgress(long 
dataSize, long heapSize) {
-161long curDataSize = 
progress.getDataSize();
-162progress.setDataSize(curDataSize + 
dataSize);
-163long curHeapSize = 
progress.getHeapSize();
-164progress.setHeapSize(curHeapSize + 
heapSize);
-165  }
-166
-167  int getBatchProgress() {
-168return progress.getBatch();
-169  }
-170
-171  long getDataSizeProgress() {
-172return progress.getDataSize();
-173  }
-174
-175  long getHeapSizeProgress() {
-176return progress.getHeapSize();
-177  }
-178
-179  void setProgress(int batchProgress, 
long sizeProgress, long heapSizeProgress) {
-180setBatchProgress(batchProgress);
-181setSizeProgress(sizeProgress, 
heapSizeProgress);
-182  }
-183
-184  void setSizeProgress(long 
dataSizeProgress, long heapSizeProgress) {
-185
progress.setDataSize(dataSizeProgress);
-186
progress.setHeapSize(heapSizeProgress);
-187  }
-188
-189  void setBatchProgress(int 
batchProgress) {
-190progress.setBatch(batchProgress);
-191  }
-192
-193  /**
-194   * Clear away any progress that has 
been made so far. All progress fields are reset to initial
-195   * values
-196   */
-197  void clearProgress() {
-198progress.setFields(0, 0, 0);
-199  }
-200
-201  /**
-202   * Note that this is not a typical 
setter. This setter returns the {@link NextState} that was
-203   * passed in so that methods can be 
invoked against the new state. Furthermore, this pattern
-204   * allows the {@link 
NoLimitScannerContext} to cleanly override this setter and simply return the
-205   * new state, thus preserving th

[26/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.HFileBlockTranscoder.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.HFileBlockTranscoder.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.HFileBlockTranscoder.html
index 80df615..e32fc70 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.HFileBlockTranscoder.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.HFileBlockTranscoder.html
@@ -271,7 +271,7 @@
 263@Override
 264public CachedData encode(HFileBlock 
block) {
 265  ByteBuffer bb = 
ByteBuffer.allocate(block.getSerializedLength());
-266  block.serialize(bb);
+266  block.serialize(bb, true);
 267  return new CachedData(0, 
bb.array(), CachedData.MAX_SIZE);
 268}
 269

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.html
index 80df615..e32fc70 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.html
@@ -271,7 +271,7 @@
 263@Override
 264public CachedData encode(HFileBlock 
block) {
 265  ByteBuffer bb = 
ByteBuffer.allocate(block.getSerializedLength());
-266  block.serialize(bb);
+266  block.serialize(bb, true);
 267  return new CachedData(0, 
bb.array(), CachedData.MAX_SIZE);
 268}
 269



[17/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.LimitFields.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.LimitFields.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.LimitFields.html
index cabb570..90f3b1e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.LimitFields.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.LimitFields.html
@@ -107,660 +107,661 @@
 099
 100  private Cell lastPeekedCell = null;
 101
-102  /**
-103   * Tracks the relevant server side 
metrics during scans. null when metrics should not be tracked
-104   */
-105  final ServerSideScanMetrics metrics;
-106
-107  ScannerContext(boolean keepProgress, 
LimitFields limitsToCopy, boolean trackMetrics) {
-108this.limits = new LimitFields();
-109if (limitsToCopy != null) {
-110  this.limits.copy(limitsToCopy);
-111}
+102  // Set this to true will have the same 
behavior with reaching the time limit.
+103  // This is used when you want to make 
the current RSRpcService.scan returns immediately. For
+104  // example, when we want to switch from 
pread to stream, we can only do it after the rpc call is
+105  // returned.
+106  private boolean returnImmediately;
+107
+108  /**
+109   * Tracks the relevant server side 
metrics during scans. null when metrics should not be tracked
+110   */
+111  final ServerSideScanMetrics metrics;
 112
-113// Progress fields are initialized to 
0
-114progress = new ProgressFields(0, 0, 
0);
-115
-116this.keepProgress = keepProgress;
-117this.scannerState = DEFAULT_STATE;
-118this.metrics = trackMetrics ? new 
ServerSideScanMetrics() : null;
-119  }
-120
-121  boolean isTrackingMetrics() {
-122return this.metrics != null;
-123  }
-124
-125  /**
-126   * Get the metrics instance. Should 
only be called after a call to {@link #isTrackingMetrics()}
-127   * has been made to confirm that 
metrics are indeed being tracked.
-128   * @return {@link 
ServerSideScanMetrics} instance that is tracking metrics for this scan
-129   */
-130  ServerSideScanMetrics getMetrics() {
-131assert isTrackingMetrics();
-132return this.metrics;
-133  }
-134
-135  /**
-136   * @return true if the progress tracked 
so far in this instance will be considered during an
-137   * invocation of {@link 
InternalScanner#next(java.util.List)} or
-138   * {@link 
RegionScanner#next(java.util.List)}. false when the progress tracked so far
-139   * should not be considered and 
should instead be wiped away via {@link #clearProgress()}
-140   */
-141  boolean getKeepProgress() {
-142return keepProgress;
-143  }
-144
-145  void setKeepProgress(boolean 
keepProgress) {
-146this.keepProgress = keepProgress;
-147  }
-148
-149  /**
-150   * Progress towards the batch limit has 
been made. Increment internal tracking of batch progress
-151   */
-152  void incrementBatchProgress(int batch) 
{
-153int currentBatch = 
progress.getBatch();
-154progress.setBatch(currentBatch + 
batch);
-155  }
-156
-157  /**
-158   * Progress towards the size limit has 
been made. Increment internal tracking of size progress
-159   */
-160  void incrementSizeProgress(long 
dataSize, long heapSize) {
-161long curDataSize = 
progress.getDataSize();
-162progress.setDataSize(curDataSize + 
dataSize);
-163long curHeapSize = 
progress.getHeapSize();
-164progress.setHeapSize(curHeapSize + 
heapSize);
-165  }
-166
-167  int getBatchProgress() {
-168return progress.getBatch();
-169  }
-170
-171  long getDataSizeProgress() {
-172return progress.getDataSize();
-173  }
-174
-175  long getHeapSizeProgress() {
-176return progress.getHeapSize();
-177  }
-178
-179  void setProgress(int batchProgress, 
long sizeProgress, long heapSizeProgress) {
-180setBatchProgress(batchProgress);
-181setSizeProgress(sizeProgress, 
heapSizeProgress);
-182  }
-183
-184  void setSizeProgress(long 
dataSizeProgress, long heapSizeProgress) {
-185
progress.setDataSize(dataSizeProgress);
-186
progress.setHeapSize(heapSizeProgress);
-187  }
-188
-189  void setBatchProgress(int 
batchProgress) {
-190progress.setBatch(batchProgress);
-191  }
-192
-193  /**
-194   * Clear away any progress that has 
been made so far. All progress fields are reset to initial
-195   * values
-196   */
-197  void clearProgress() {
-198progress.setFields(0, 0, 0);
-199  }
-200
-201  /**
-202   * Note that this is not a typical 
setter. This setter returns the {@link NextState} that was
-203   * passed in so that methods can be 
invoked against the new state. Furthermore, this pattern
-204   * allows the {@link 
NoLimitScannerContext} to cleanly override this setter and simply return the
-205   * new state

[15/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.NextState.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.NextState.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.NextState.html
index cabb570..90f3b1e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.NextState.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.NextState.html
@@ -107,660 +107,661 @@
 099
 100  private Cell lastPeekedCell = null;
 101
-102  /**
-103   * Tracks the relevant server side 
metrics during scans. null when metrics should not be tracked
-104   */
-105  final ServerSideScanMetrics metrics;
-106
-107  ScannerContext(boolean keepProgress, 
LimitFields limitsToCopy, boolean trackMetrics) {
-108this.limits = new LimitFields();
-109if (limitsToCopy != null) {
-110  this.limits.copy(limitsToCopy);
-111}
+102  // Set this to true will have the same 
behavior with reaching the time limit.
+103  // This is used when you want to make 
the current RSRpcService.scan returns immediately. For
+104  // example, when we want to switch from 
pread to stream, we can only do it after the rpc call is
+105  // returned.
+106  private boolean returnImmediately;
+107
+108  /**
+109   * Tracks the relevant server side 
metrics during scans. null when metrics should not be tracked
+110   */
+111  final ServerSideScanMetrics metrics;
 112
-113// Progress fields are initialized to 
0
-114progress = new ProgressFields(0, 0, 
0);
-115
-116this.keepProgress = keepProgress;
-117this.scannerState = DEFAULT_STATE;
-118this.metrics = trackMetrics ? new 
ServerSideScanMetrics() : null;
-119  }
-120
-121  boolean isTrackingMetrics() {
-122return this.metrics != null;
-123  }
-124
-125  /**
-126   * Get the metrics instance. Should 
only be called after a call to {@link #isTrackingMetrics()}
-127   * has been made to confirm that 
metrics are indeed being tracked.
-128   * @return {@link 
ServerSideScanMetrics} instance that is tracking metrics for this scan
-129   */
-130  ServerSideScanMetrics getMetrics() {
-131assert isTrackingMetrics();
-132return this.metrics;
-133  }
-134
-135  /**
-136   * @return true if the progress tracked 
so far in this instance will be considered during an
-137   * invocation of {@link 
InternalScanner#next(java.util.List)} or
-138   * {@link 
RegionScanner#next(java.util.List)}. false when the progress tracked so far
-139   * should not be considered and 
should instead be wiped away via {@link #clearProgress()}
-140   */
-141  boolean getKeepProgress() {
-142return keepProgress;
-143  }
-144
-145  void setKeepProgress(boolean 
keepProgress) {
-146this.keepProgress = keepProgress;
-147  }
-148
-149  /**
-150   * Progress towards the batch limit has 
been made. Increment internal tracking of batch progress
-151   */
-152  void incrementBatchProgress(int batch) 
{
-153int currentBatch = 
progress.getBatch();
-154progress.setBatch(currentBatch + 
batch);
-155  }
-156
-157  /**
-158   * Progress towards the size limit has 
been made. Increment internal tracking of size progress
-159   */
-160  void incrementSizeProgress(long 
dataSize, long heapSize) {
-161long curDataSize = 
progress.getDataSize();
-162progress.setDataSize(curDataSize + 
dataSize);
-163long curHeapSize = 
progress.getHeapSize();
-164progress.setHeapSize(curHeapSize + 
heapSize);
-165  }
-166
-167  int getBatchProgress() {
-168return progress.getBatch();
-169  }
-170
-171  long getDataSizeProgress() {
-172return progress.getDataSize();
-173  }
-174
-175  long getHeapSizeProgress() {
-176return progress.getHeapSize();
-177  }
-178
-179  void setProgress(int batchProgress, 
long sizeProgress, long heapSizeProgress) {
-180setBatchProgress(batchProgress);
-181setSizeProgress(sizeProgress, 
heapSizeProgress);
-182  }
-183
-184  void setSizeProgress(long 
dataSizeProgress, long heapSizeProgress) {
-185
progress.setDataSize(dataSizeProgress);
-186
progress.setHeapSize(heapSizeProgress);
-187  }
-188
-189  void setBatchProgress(int 
batchProgress) {
-190progress.setBatch(batchProgress);
-191  }
-192
-193  /**
-194   * Clear away any progress that has 
been made so far. All progress fields are reset to initial
-195   * values
-196   */
-197  void clearProgress() {
-198progress.setFields(0, 0, 0);
-199  }
-200
-201  /**
-202   * Note that this is not a typical 
setter. This setter returns the {@link NextState} that was
-203   * passed in so that methods can be 
invoked against the new state. Furthermore, this pattern
-204   * allows the {@link 
NoLimitScannerContext} to cleanly override this setter and simply return the
-205   * new state, thus pre

[22/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.SharedMemoryBucketEntry.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.SharedMemoryBucketEntry.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.SharedMemoryBucketEntry.html
index e8070ca..8cb24b3 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.SharedMemoryBucketEntry.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.SharedMemoryBucketEntry.html
@@ -433,1254 +433,1261 @@
 425  return;
 426}
 427
-428if (backingMap.containsKey(cacheKey)) 
{
+428if (backingMap.containsKey(cacheKey) 
|| ramCache.containsKey(cacheKey)) {
 429  Cacheable existingBlock = 
getBlock(cacheKey, false, false, false);
-430  try {
-431if 
(BlockCacheUtil.compareCacheBlock(cachedItem, existingBlock) != 0) {
-432  throw new 
RuntimeException("Cached block contents differ, which should not have 
happened."
-433  + "cacheKey:" + 
cacheKey);
-434}
-435String msg = "Caching an already 
cached block: " + cacheKey;
-436msg += ". This is harmless and 
can happen in rare cases (see HBASE-8547)";
-437LOG.warn(msg);
-438  } finally {
-439// return the block since we need 
to decrement the count
-440returnBlock(cacheKey, 
existingBlock);
-441  }
-442  return;
-443}
-444
-445/*
-446 * Stuff the entry into the RAM cache 
so it can get drained to the persistent store
-447 */
-448RAMQueueEntry re =
-449new RAMQueueEntry(cacheKey, 
cachedItem, accessCount.incrementAndGet(), inMemory);
-450if (ramCache.putIfAbsent(cacheKey, 
re) != null) {
-451  return;
-452}
-453int queueNum = (cacheKey.hashCode() 
& 0x7FFF) % writerQueues.size();
-454BlockingQueue bq 
= writerQueues.get(queueNum);
-455boolean successfulAddition = false;
-456if (wait) {
-457  try {
-458successfulAddition = bq.offer(re, 
DEFAULT_CACHE_WAIT_TIME, TimeUnit.MILLISECONDS);
-459  } catch (InterruptedException e) 
{
-460
Thread.currentThread().interrupt();
-461  }
-462} else {
-463  successfulAddition = 
bq.offer(re);
-464}
-465if (!successfulAddition) {
-466  ramCache.remove(cacheKey);
-467  cacheStats.failInsert();
-468} else {
-469  this.blockNumber.increment();
-470  
this.heapSize.add(cachedItem.heapSize());
-471  blocksByHFile.add(cacheKey);
-472}
-473  }
-474
-475  /**
-476   * Get the buffer of the block with the 
specified key.
-477   * @param key block's cache key
-478   * @param caching true if the caller 
caches blocks on cache misses
-479   * @param repeat Whether this is a 
repeat lookup for the same block
-480   * @param updateCacheMetrics Whether we 
should update cache metrics or not
-481   * @return buffer of specified cache 
key, or null if not in cache
-482   */
-483  @Override
-484  public Cacheable getBlock(BlockCacheKey 
key, boolean caching, boolean repeat,
-485  boolean updateCacheMetrics) {
-486if (!cacheEnabled) {
-487  return null;
-488}
-489RAMQueueEntry re = 
ramCache.get(key);
-490if (re != null) {
-491  if (updateCacheMetrics) {
-492cacheStats.hit(caching, 
key.isPrimary(), key.getBlockType());
-493  }
-494  
re.access(accessCount.incrementAndGet());
-495  return re.getData();
-496}
-497BucketEntry bucketEntry = 
backingMap.get(key);
-498if (bucketEntry != null) {
-499  long start = System.nanoTime();
-500  ReentrantReadWriteLock lock = 
offsetLock.getLock(bucketEntry.offset());
-501  try {
-502lock.readLock().lock();
-503// We can not read here even if 
backingMap does contain the given key because its offset
-504// maybe changed. If we lock 
BlockCacheKey instead of offset, then we can only check
-505// existence here.
-506if 
(bucketEntry.equals(backingMap.get(key))) {
-507  // TODO : change this area - 
should be removed after server cells and
-508  // 12295 are available
-509  int len = 
bucketEntry.getLength();
-510  if (LOG.isTraceEnabled()) {
-511LOG.trace("Read offset=" + 
bucketEntry.offset() + ", len=" + len);
-512  }
-513  Cacheable cachedBlock = 
ioEngine.read(bucketEntry.offset(), len,
-514  
bucketEntry.deserializerReference(this.deserialiserMap));
-515  long timeTaken = 
System.nanoTime() - start;
-516  if (updateCacheMetrics) {
-517cacheStats.hit(caching, 
key.isPrimary(), key.getBlockType());
-518
cacheStats.ioHit(timeTaken);
+430
+431  try {
+432int comparison = 
BlockCacheUtil.validateBlockAddition(existingBlo

[34/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.PrefetchedHeader.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.PrefetchedHeader.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.PrefetchedHeader.html
index 97ceefd..b7b4236 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.PrefetchedHeader.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.PrefetchedHeader.html
@@ -103,8 +103,8 @@
 095 * Caches cache whole blocks with 
trailing checksums if any. We then tag on some metadata, the
 096 * content of BLOCK_METADATA_SPACE which 
will be flag on if we are doing 'hbase'
 097 * checksums and then the offset into the 
file which is needed when we re-make a cache key
-098 * when we return the block to the cache 
as 'done'. See {@link Cacheable#serialize(ByteBuffer)} and
-099 * {@link Cacheable#getDeserializer()}.
+098 * when we return the block to the cache 
as 'done'.
+099 * See {@link 
Cacheable#serialize(ByteBuffer, boolean)} and {@link 
Cacheable#getDeserializer()}.
 100 *
 101 * 

TODO: Should we cache the checksums? Down in Writer#getBlockForCaching(CacheConfig) where 102 * we make a block to cache-on-write, there is an attempt at turning off checksums. This is not the @@ -333,1579 +333,1579 @@ 325 * Creates a new {@link HFile} block from the given fields. This constructor 326 * is used only while writing blocks and caching, 327 * and is sitting in a byte buffer and we want to stuff the block into cache. -328 * See {@link Writer#getBlockForCaching(CacheConfig)}. -329 * -330 *

TODO: The caller presumes no checksumming -331 * required of this block instance since going into cache; checksum already verified on -332 * underlying block data pulled in from filesystem. Is that correct? What if cache is SSD? -333 * -334 * @param blockType the type of this block, see {@link BlockType} -335 * @param onDiskSizeWithoutHeader see {@link #onDiskSizeWithoutHeader} -336 * @param uncompressedSizeWithoutHeader see {@link #uncompressedSizeWithoutHeader} -337 * @param prevBlockOffset see {@link #prevBlockOffset} -338 * @param b block header ({@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes) -339 * @param fillHeader when true, write the first 4 header fields into passed buffer. -340 * @param offset the file offset the block was read from -341 * @param onDiskDataSizeWithHeader see {@link #onDiskDataSizeWithHeader} -342 * @param fileContext HFile meta data -343 */ -344 HFileBlock(BlockType blockType, int onDiskSizeWithoutHeader, int uncompressedSizeWithoutHeader, -345 long prevBlockOffset, ByteBuffer b, boolean fillHeader, long offset, -346 final int nextBlockOnDiskSize, int onDiskDataSizeWithHeader, HFileContext fileContext) { -347init(blockType, onDiskSizeWithoutHeader, uncompressedSizeWithoutHeader, -348prevBlockOffset, offset, onDiskDataSizeWithHeader, nextBlockOnDiskSize, fileContext); -349this.buf = new SingleByteBuff(b); -350if (fillHeader) { -351 overwriteHeader(); -352} -353this.buf.rewind(); -354 } -355 -356 /** -357 * Creates a block from an existing buffer starting with a header. Rewinds -358 * and takes ownership of the buffer. By definition of rewind, ignores the -359 * buffer position, but if you slice the buffer beforehand, it will rewind -360 * to that point. -361 * @param buf Has header, content, and trailing checksums if present. -362 */ -363 HFileBlock(ByteBuff buf, boolean usesHBaseChecksum, MemoryType memType, final long offset, -364 final int nextBlockOnDiskSize, HFileContext fileContext) throws IOException { -365buf.rewind(); -366final BlockType blockType = BlockType.read(buf); -367final int onDiskSizeWithoutHeader = buf.getInt(Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX); -368final int uncompressedSizeWithoutHeader = -369 buf.getInt(Header.UNCOMPRESSED_SIZE_WITHOUT_HEADER_INDEX); -370final long prevBlockOffset = buf.getLong(Header.PREV_BLOCK_OFFSET_INDEX); -371// This constructor is called when we deserialize a block from cache and when we read a block in -372// from the fs. fileCache is null when deserialized from cache so need to make up one. -373HFileContextBuilder fileContextBuilder = fileContext != null? -374new HFileContextBuilder(fileContext): new HFileContextBuilder(); -375 fileContextBuilder.withHBaseCheckSum(usesHBaseChecksum); -376int onDiskDataSizeWithHeader; -377if (usesHBaseChecksum) { -378 byte checksumType = buf.get(Header.CHECKSUM_TYPE_INDEX); -379 int bytesPerChecksum = buf.getInt(Header.BYTES_PER_CHECKSUM_INDEX); -380 onDiskDataSizeWithHeader = buf.getInt(Header.ON_DISK_DATA_SIZE_WITH_HEADER_INDEX); -381 // Use


[27/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/LruBlockCache.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/LruBlockCache.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/LruBlockCache.html
index f5747c6..b94ef5e 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/LruBlockCache.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/LruBlockCache.html
@@ -388,811 +388,816 @@
 380
 381LruCachedBlock cb = 
map.get(cacheKey);
 382if (cb != null) {
-383  // compare the contents, if they 
are not equal, we are in big trouble
-384  if 
(BlockCacheUtil.compareCacheBlock(buf, cb.getBuffer()) != 0) {
-385throw new 
RuntimeException("Cached block contents differ, which should not have 
happened."
-386  + "cacheKey:" + cacheKey);
-387  }
-388  String msg = "Cached an already 
cached block: " + cacheKey + " cb:" + cb.getCacheKey();
-389  msg += ". This is harmless and can 
happen in rare cases (see HBASE-8547)";
-390  LOG.warn(msg);
-391  return;
-392}
-393long currentSize = size.get();
-394long currentAcceptableSize = 
acceptableSize();
-395long hardLimitSize = (long) 
(hardCapacityLimitFactor * currentAcceptableSize);
-396if (currentSize >= hardLimitSize) 
{
-397  stats.failInsert();
-398  if (LOG.isTraceEnabled()) {
-399LOG.trace("LruBlockCache current 
size " + StringUtils.byteDesc(currentSize)
-400  + " has exceeded acceptable 
size " + StringUtils.byteDesc(currentAcceptableSize) + "."
-401  + " The hard limit size is " + 
StringUtils.byteDesc(hardLimitSize)
-402  + ", failed to put cacheKey:" + 
cacheKey + " into LruBlockCache.");
-403  }
-404  if (!evictionInProgress) {
-405runEviction();
-406  }
-407  return;
-408}
-409cb = new LruCachedBlock(cacheKey, 
buf, count.incrementAndGet(), inMemory);
-410long newSize = updateSizeMetrics(cb, 
false);
-411map.put(cacheKey, cb);
-412long val = 
elements.incrementAndGet();
-413if (buf.getBlockType().isData()) {
-414   dataBlockElements.increment();
-415}
-416if (LOG.isTraceEnabled()) {
-417  long size = map.size();
-418  assertCounterSanity(size, val);
-419}
-420if (newSize > 
currentAcceptableSize && !evictionInProgress) {
-421  runEviction();
-422}
-423  }
-424
-425  /**
-426   * Sanity-checking for parity between 
actual block cache content and metrics.
-427   * Intended only for use with TRACE 
level logging and -ea JVM.
-428   */
-429  private static void 
assertCounterSanity(long mapSize, long counterVal) {
-430if (counterVal < 0) {
-431  LOG.trace("counterVal overflow. 
Assertions unreliable. counterVal=" + counterVal +
-432", mapSize=" + mapSize);
-433  return;
-434}
-435if (mapSize < Integer.MAX_VALUE) 
{
-436  double pct_diff = 
Math.absdouble) counterVal) / ((double) mapSize)) - 1.);
-437  if (pct_diff > 0.05) {
-438LOG.trace("delta between reported 
and actual size > 5%. counterVal=" + counterVal +
-439  ", mapSize=" + mapSize);
-440  }
-441}
-442  }
-443
-444  /**
-445   * Cache the block with the specified 
name and buffer.
-446   * 

-447 * -448 * @param cacheKey block's cache key -449 * @param buf block buffer -450 */ -451 @Override -452 public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) { -453cacheBlock(cacheKey, buf, false); -454 } -455 -456 /** -457 * Helper function that updates the local size counter and also updates any -458 * per-cf or per-blocktype metrics it can discern from given -459 * {@link LruCachedBlock} -460 */ -461 private long updateSizeMetrics(LruCachedBlock cb, boolean evict) { -462long heapsize = cb.heapSize(); -463BlockType bt = cb.getBuffer().getBlockType(); -464if (evict) { -465 heapsize *= -1; -466} -467if (bt != null && bt.isData()) { -468 dataBlockSize.add(heapsize); -469} -470return size.addAndGet(heapsize); -471 } -472 -473 /** -474 * Get the buffer of the block with the specified name. -475 * -476 * @param cacheKey block's cache key -477 * @param cachingtrue if the caller caches blocks on cache misses -478 * @param repeat Whether this is a repeat lookup for the same block -479 * (used to avoid double counting cache misses when doing double-check -480 * locking) -481 * @param updateCacheMetrics Whether to update cache metrics or not -482 * -483 * @return buffer of specified cache key, or null if not in cache -484 */ -485 @Override -486 public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat, -487 boolean updateCacheMetrics) { -488LruCach


[35/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Header.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Header.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Header.html
index 97ceefd..b7b4236 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Header.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Header.html
@@ -103,8 +103,8 @@
 095 * Caches cache whole blocks with 
trailing checksums if any. We then tag on some metadata, the
 096 * content of BLOCK_METADATA_SPACE which 
will be flag on if we are doing 'hbase'
 097 * checksums and then the offset into the 
file which is needed when we re-make a cache key
-098 * when we return the block to the cache 
as 'done'. See {@link Cacheable#serialize(ByteBuffer)} and
-099 * {@link Cacheable#getDeserializer()}.
+098 * when we return the block to the cache 
as 'done'.
+099 * See {@link 
Cacheable#serialize(ByteBuffer, boolean)} and {@link 
Cacheable#getDeserializer()}.
 100 *
 101 * 

TODO: Should we cache the checksums? Down in Writer#getBlockForCaching(CacheConfig) where 102 * we make a block to cache-on-write, there is an attempt at turning off checksums. This is not the @@ -333,1579 +333,1579 @@ 325 * Creates a new {@link HFile} block from the given fields. This constructor 326 * is used only while writing blocks and caching, 327 * and is sitting in a byte buffer and we want to stuff the block into cache. -328 * See {@link Writer#getBlockForCaching(CacheConfig)}. -329 * -330 *

TODO: The caller presumes no checksumming -331 * required of this block instance since going into cache; checksum already verified on -332 * underlying block data pulled in from filesystem. Is that correct? What if cache is SSD? -333 * -334 * @param blockType the type of this block, see {@link BlockType} -335 * @param onDiskSizeWithoutHeader see {@link #onDiskSizeWithoutHeader} -336 * @param uncompressedSizeWithoutHeader see {@link #uncompressedSizeWithoutHeader} -337 * @param prevBlockOffset see {@link #prevBlockOffset} -338 * @param b block header ({@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes) -339 * @param fillHeader when true, write the first 4 header fields into passed buffer. -340 * @param offset the file offset the block was read from -341 * @param onDiskDataSizeWithHeader see {@link #onDiskDataSizeWithHeader} -342 * @param fileContext HFile meta data -343 */ -344 HFileBlock(BlockType blockType, int onDiskSizeWithoutHeader, int uncompressedSizeWithoutHeader, -345 long prevBlockOffset, ByteBuffer b, boolean fillHeader, long offset, -346 final int nextBlockOnDiskSize, int onDiskDataSizeWithHeader, HFileContext fileContext) { -347init(blockType, onDiskSizeWithoutHeader, uncompressedSizeWithoutHeader, -348prevBlockOffset, offset, onDiskDataSizeWithHeader, nextBlockOnDiskSize, fileContext); -349this.buf = new SingleByteBuff(b); -350if (fillHeader) { -351 overwriteHeader(); -352} -353this.buf.rewind(); -354 } -355 -356 /** -357 * Creates a block from an existing buffer starting with a header. Rewinds -358 * and takes ownership of the buffer. By definition of rewind, ignores the -359 * buffer position, but if you slice the buffer beforehand, it will rewind -360 * to that point. -361 * @param buf Has header, content, and trailing checksums if present. -362 */ -363 HFileBlock(ByteBuff buf, boolean usesHBaseChecksum, MemoryType memType, final long offset, -364 final int nextBlockOnDiskSize, HFileContext fileContext) throws IOException { -365buf.rewind(); -366final BlockType blockType = BlockType.read(buf); -367final int onDiskSizeWithoutHeader = buf.getInt(Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX); -368final int uncompressedSizeWithoutHeader = -369 buf.getInt(Header.UNCOMPRESSED_SIZE_WITHOUT_HEADER_INDEX); -370final long prevBlockOffset = buf.getLong(Header.PREV_BLOCK_OFFSET_INDEX); -371// This constructor is called when we deserialize a block from cache and when we read a block in -372// from the fs. fileCache is null when deserialized from cache so need to make up one. -373HFileContextBuilder fileContextBuilder = fileContext != null? -374new HFileContextBuilder(fileContext): new HFileContextBuilder(); -375 fileContextBuilder.withHBaseCheckSum(usesHBaseChecksum); -376int onDiskDataSizeWithHeader; -377if (usesHBaseChecksum) { -378 byte checksumType = buf.get(Header.CHECKSUM_TYPE_INDEX); -379 int bytesPerChecksum = buf.getInt(Header.BYTES_PER_CHECKSUM_INDEX); -380 onDiskDataSizeWithHeader = buf.getInt(Header.ON_DISK_DATA_SIZE_WITH_HEADER_INDEX); -381 // Use the checksum type and bytes per checksum from hea


[28/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/LruBlockCache.StatisticsThread.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/LruBlockCache.StatisticsThread.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/LruBlockCache.StatisticsThread.html
index f5747c6..b94ef5e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/LruBlockCache.StatisticsThread.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/LruBlockCache.StatisticsThread.html
@@ -388,811 +388,816 @@
 380
 381LruCachedBlock cb = 
map.get(cacheKey);
 382if (cb != null) {
-383  // compare the contents, if they 
are not equal, we are in big trouble
-384  if 
(BlockCacheUtil.compareCacheBlock(buf, cb.getBuffer()) != 0) {
-385throw new 
RuntimeException("Cached block contents differ, which should not have 
happened."
-386  + "cacheKey:" + cacheKey);
-387  }
-388  String msg = "Cached an already 
cached block: " + cacheKey + " cb:" + cb.getCacheKey();
-389  msg += ". This is harmless and can 
happen in rare cases (see HBASE-8547)";
-390  LOG.warn(msg);
-391  return;
-392}
-393long currentSize = size.get();
-394long currentAcceptableSize = 
acceptableSize();
-395long hardLimitSize = (long) 
(hardCapacityLimitFactor * currentAcceptableSize);
-396if (currentSize >= hardLimitSize) 
{
-397  stats.failInsert();
-398  if (LOG.isTraceEnabled()) {
-399LOG.trace("LruBlockCache current 
size " + StringUtils.byteDesc(currentSize)
-400  + " has exceeded acceptable 
size " + StringUtils.byteDesc(currentAcceptableSize) + "."
-401  + " The hard limit size is " + 
StringUtils.byteDesc(hardLimitSize)
-402  + ", failed to put cacheKey:" + 
cacheKey + " into LruBlockCache.");
-403  }
-404  if (!evictionInProgress) {
-405runEviction();
-406  }
-407  return;
-408}
-409cb = new LruCachedBlock(cacheKey, 
buf, count.incrementAndGet(), inMemory);
-410long newSize = updateSizeMetrics(cb, 
false);
-411map.put(cacheKey, cb);
-412long val = 
elements.incrementAndGet();
-413if (buf.getBlockType().isData()) {
-414   dataBlockElements.increment();
-415}
-416if (LOG.isTraceEnabled()) {
-417  long size = map.size();
-418  assertCounterSanity(size, val);
-419}
-420if (newSize > 
currentAcceptableSize && !evictionInProgress) {
-421  runEviction();
-422}
-423  }
-424
-425  /**
-426   * Sanity-checking for parity between 
actual block cache content and metrics.
-427   * Intended only for use with TRACE 
level logging and -ea JVM.
-428   */
-429  private static void 
assertCounterSanity(long mapSize, long counterVal) {
-430if (counterVal < 0) {
-431  LOG.trace("counterVal overflow. 
Assertions unreliable. counterVal=" + counterVal +
-432", mapSize=" + mapSize);
-433  return;
-434}
-435if (mapSize < Integer.MAX_VALUE) 
{
-436  double pct_diff = 
Math.absdouble) counterVal) / ((double) mapSize)) - 1.);
-437  if (pct_diff > 0.05) {
-438LOG.trace("delta between reported 
and actual size > 5%. counterVal=" + counterVal +
-439  ", mapSize=" + mapSize);
-440  }
-441}
-442  }
-443
-444  /**
-445   * Cache the block with the specified 
name and buffer.
-446   * 

-447 * -448 * @param cacheKey block's cache key -449 * @param buf block buffer -450 */ -451 @Override -452 public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) { -453cacheBlock(cacheKey, buf, false); -454 } -455 -456 /** -457 * Helper function that updates the local size counter and also updates any -458 * per-cf or per-blocktype metrics it can discern from given -459 * {@link LruCachedBlock} -460 */ -461 private long updateSizeMetrics(LruCachedBlock cb, boolean evict) { -462long heapsize = cb.heapSize(); -463BlockType bt = cb.getBuffer().getBlockType(); -464if (evict) { -465 heapsize *= -1; -466} -467if (bt != null && bt.isData()) { -468 dataBlockSize.add(heapsize); -469} -470return size.addAndGet(heapsize); -471 } -472 -473 /** -474 * Get the buffer of the block with the specified name. -475 * -476 * @param cacheKey block's cache key -477 * @param cachingtrue if the caller caches blocks on cache misses -478 * @param repeat Whether this is a repeat lookup for the same block -479 * (used to avoid double counting cache misses when doing double-check -480 * locking) -481 * @param updateCacheMetrics Whether to update cache metrics or not -482 * -483 * @return buffer of specified cache key, or null if not in cache -484 */ -485 @Override -486 public Cacheable getBlock(BlockCacheKey cacheKey,


[20/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.WriterThread.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.WriterThread.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.WriterThread.html
index e8070ca..8cb24b3 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.WriterThread.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.WriterThread.html
@@ -433,1254 +433,1261 @@
 425  return;
 426}
 427
-428if (backingMap.containsKey(cacheKey)) 
{
+428if (backingMap.containsKey(cacheKey) 
|| ramCache.containsKey(cacheKey)) {
 429  Cacheable existingBlock = 
getBlock(cacheKey, false, false, false);
-430  try {
-431if 
(BlockCacheUtil.compareCacheBlock(cachedItem, existingBlock) != 0) {
-432  throw new 
RuntimeException("Cached block contents differ, which should not have 
happened."
-433  + "cacheKey:" + 
cacheKey);
-434}
-435String msg = "Caching an already 
cached block: " + cacheKey;
-436msg += ". This is harmless and 
can happen in rare cases (see HBASE-8547)";
-437LOG.warn(msg);
-438  } finally {
-439// return the block since we need 
to decrement the count
-440returnBlock(cacheKey, 
existingBlock);
-441  }
-442  return;
-443}
-444
-445/*
-446 * Stuff the entry into the RAM cache 
so it can get drained to the persistent store
-447 */
-448RAMQueueEntry re =
-449new RAMQueueEntry(cacheKey, 
cachedItem, accessCount.incrementAndGet(), inMemory);
-450if (ramCache.putIfAbsent(cacheKey, 
re) != null) {
-451  return;
-452}
-453int queueNum = (cacheKey.hashCode() 
& 0x7FFF) % writerQueues.size();
-454BlockingQueue bq 
= writerQueues.get(queueNum);
-455boolean successfulAddition = false;
-456if (wait) {
-457  try {
-458successfulAddition = bq.offer(re, 
DEFAULT_CACHE_WAIT_TIME, TimeUnit.MILLISECONDS);
-459  } catch (InterruptedException e) 
{
-460
Thread.currentThread().interrupt();
-461  }
-462} else {
-463  successfulAddition = 
bq.offer(re);
-464}
-465if (!successfulAddition) {
-466  ramCache.remove(cacheKey);
-467  cacheStats.failInsert();
-468} else {
-469  this.blockNumber.increment();
-470  
this.heapSize.add(cachedItem.heapSize());
-471  blocksByHFile.add(cacheKey);
-472}
-473  }
-474
-475  /**
-476   * Get the buffer of the block with the 
specified key.
-477   * @param key block's cache key
-478   * @param caching true if the caller 
caches blocks on cache misses
-479   * @param repeat Whether this is a 
repeat lookup for the same block
-480   * @param updateCacheMetrics Whether we 
should update cache metrics or not
-481   * @return buffer of specified cache 
key, or null if not in cache
-482   */
-483  @Override
-484  public Cacheable getBlock(BlockCacheKey 
key, boolean caching, boolean repeat,
-485  boolean updateCacheMetrics) {
-486if (!cacheEnabled) {
-487  return null;
-488}
-489RAMQueueEntry re = 
ramCache.get(key);
-490if (re != null) {
-491  if (updateCacheMetrics) {
-492cacheStats.hit(caching, 
key.isPrimary(), key.getBlockType());
-493  }
-494  
re.access(accessCount.incrementAndGet());
-495  return re.getData();
-496}
-497BucketEntry bucketEntry = 
backingMap.get(key);
-498if (bucketEntry != null) {
-499  long start = System.nanoTime();
-500  ReentrantReadWriteLock lock = 
offsetLock.getLock(bucketEntry.offset());
-501  try {
-502lock.readLock().lock();
-503// We can not read here even if 
backingMap does contain the given key because its offset
-504// maybe changed. If we lock 
BlockCacheKey instead of offset, then we can only check
-505// existence here.
-506if 
(bucketEntry.equals(backingMap.get(key))) {
-507  // TODO : change this area - 
should be removed after server cells and
-508  // 12295 are available
-509  int len = 
bucketEntry.getLength();
-510  if (LOG.isTraceEnabled()) {
-511LOG.trace("Read offset=" + 
bucketEntry.offset() + ", len=" + len);
-512  }
-513  Cacheable cachedBlock = 
ioEngine.read(bucketEntry.offset(), len,
-514  
bucketEntry.deserializerReference(this.deserialiserMap));
-515  long timeTaken = 
System.nanoTime() - start;
-516  if (updateCacheMetrics) {
-517cacheStats.hit(caching, 
key.isPrimary(), key.getBlockType());
-518
cacheStats.ioHit(timeTaken);
+430
+431  try {
+432int comparison = 
BlockCacheUtil.validateBlockAddition(existingBlock, cachedItem, cacheKey);
+433if (comparison !

[24/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntryGroup.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntryGroup.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntryGroup.html
index e8070ca..8cb24b3 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntryGroup.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntryGroup.html
@@ -433,1254 +433,1261 @@
 425  return;
 426}
 427
-428if (backingMap.containsKey(cacheKey)) 
{
+428if (backingMap.containsKey(cacheKey) 
|| ramCache.containsKey(cacheKey)) {
 429  Cacheable existingBlock = 
getBlock(cacheKey, false, false, false);
-430  try {
-431if 
(BlockCacheUtil.compareCacheBlock(cachedItem, existingBlock) != 0) {
-432  throw new 
RuntimeException("Cached block contents differ, which should not have 
happened."
-433  + "cacheKey:" + 
cacheKey);
-434}
-435String msg = "Caching an already 
cached block: " + cacheKey;
-436msg += ". This is harmless and 
can happen in rare cases (see HBASE-8547)";
-437LOG.warn(msg);
-438  } finally {
-439// return the block since we need 
to decrement the count
-440returnBlock(cacheKey, 
existingBlock);
-441  }
-442  return;
-443}
-444
-445/*
-446 * Stuff the entry into the RAM cache 
so it can get drained to the persistent store
-447 */
-448RAMQueueEntry re =
-449new RAMQueueEntry(cacheKey, 
cachedItem, accessCount.incrementAndGet(), inMemory);
-450if (ramCache.putIfAbsent(cacheKey, 
re) != null) {
-451  return;
-452}
-453int queueNum = (cacheKey.hashCode() 
& 0x7FFF) % writerQueues.size();
-454BlockingQueue bq 
= writerQueues.get(queueNum);
-455boolean successfulAddition = false;
-456if (wait) {
-457  try {
-458successfulAddition = bq.offer(re, 
DEFAULT_CACHE_WAIT_TIME, TimeUnit.MILLISECONDS);
-459  } catch (InterruptedException e) 
{
-460
Thread.currentThread().interrupt();
-461  }
-462} else {
-463  successfulAddition = 
bq.offer(re);
-464}
-465if (!successfulAddition) {
-466  ramCache.remove(cacheKey);
-467  cacheStats.failInsert();
-468} else {
-469  this.blockNumber.increment();
-470  
this.heapSize.add(cachedItem.heapSize());
-471  blocksByHFile.add(cacheKey);
-472}
-473  }
-474
-475  /**
-476   * Get the buffer of the block with the 
specified key.
-477   * @param key block's cache key
-478   * @param caching true if the caller 
caches blocks on cache misses
-479   * @param repeat Whether this is a 
repeat lookup for the same block
-480   * @param updateCacheMetrics Whether we 
should update cache metrics or not
-481   * @return buffer of specified cache 
key, or null if not in cache
-482   */
-483  @Override
-484  public Cacheable getBlock(BlockCacheKey 
key, boolean caching, boolean repeat,
-485  boolean updateCacheMetrics) {
-486if (!cacheEnabled) {
-487  return null;
-488}
-489RAMQueueEntry re = 
ramCache.get(key);
-490if (re != null) {
-491  if (updateCacheMetrics) {
-492cacheStats.hit(caching, 
key.isPrimary(), key.getBlockType());
-493  }
-494  
re.access(accessCount.incrementAndGet());
-495  return re.getData();
-496}
-497BucketEntry bucketEntry = 
backingMap.get(key);
-498if (bucketEntry != null) {
-499  long start = System.nanoTime();
-500  ReentrantReadWriteLock lock = 
offsetLock.getLock(bucketEntry.offset());
-501  try {
-502lock.readLock().lock();
-503// We can not read here even if 
backingMap does contain the given key because its offset
-504// maybe changed. If we lock 
BlockCacheKey instead of offset, then we can only check
-505// existence here.
-506if 
(bucketEntry.equals(backingMap.get(key))) {
-507  // TODO : change this area - 
should be removed after server cells and
-508  // 12295 are available
-509  int len = 
bucketEntry.getLength();
-510  if (LOG.isTraceEnabled()) {
-511LOG.trace("Read offset=" + 
bucketEntry.offset() + ", len=" + len);
-512  }
-513  Cacheable cachedBlock = 
ioEngine.read(bucketEntry.offset(), len,
-514  
bucketEntry.deserializerReference(this.deserialiserMap));
-515  long timeTaken = 
System.nanoTime() - start;
-516  if (updateCacheMetrics) {
-517cacheStats.hit(caching, 
key.isPrimary(), key.getBlockType());
-518
cacheStats.ioHit(timeTaken);
+430
+431  try {
+432int comparison = 
BlockCacheUtil.validateBlockAddition(existingBlock, cachedItem, cacheKey);
+433

[29/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/LruBlockCache.EvictionThread.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/LruBlockCache.EvictionThread.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/LruBlockCache.EvictionThread.html
index f5747c6..b94ef5e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/LruBlockCache.EvictionThread.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/LruBlockCache.EvictionThread.html
@@ -388,811 +388,816 @@
 380
 381LruCachedBlock cb = 
map.get(cacheKey);
 382if (cb != null) {
-383  // compare the contents, if they 
are not equal, we are in big trouble
-384  if 
(BlockCacheUtil.compareCacheBlock(buf, cb.getBuffer()) != 0) {
-385throw new 
RuntimeException("Cached block contents differ, which should not have 
happened."
-386  + "cacheKey:" + cacheKey);
-387  }
-388  String msg = "Cached an already 
cached block: " + cacheKey + " cb:" + cb.getCacheKey();
-389  msg += ". This is harmless and can 
happen in rare cases (see HBASE-8547)";
-390  LOG.warn(msg);
-391  return;
-392}
-393long currentSize = size.get();
-394long currentAcceptableSize = 
acceptableSize();
-395long hardLimitSize = (long) 
(hardCapacityLimitFactor * currentAcceptableSize);
-396if (currentSize >= hardLimitSize) 
{
-397  stats.failInsert();
-398  if (LOG.isTraceEnabled()) {
-399LOG.trace("LruBlockCache current 
size " + StringUtils.byteDesc(currentSize)
-400  + " has exceeded acceptable 
size " + StringUtils.byteDesc(currentAcceptableSize) + "."
-401  + " The hard limit size is " + 
StringUtils.byteDesc(hardLimitSize)
-402  + ", failed to put cacheKey:" + 
cacheKey + " into LruBlockCache.");
-403  }
-404  if (!evictionInProgress) {
-405runEviction();
-406  }
-407  return;
-408}
-409cb = new LruCachedBlock(cacheKey, 
buf, count.incrementAndGet(), inMemory);
-410long newSize = updateSizeMetrics(cb, 
false);
-411map.put(cacheKey, cb);
-412long val = 
elements.incrementAndGet();
-413if (buf.getBlockType().isData()) {
-414   dataBlockElements.increment();
-415}
-416if (LOG.isTraceEnabled()) {
-417  long size = map.size();
-418  assertCounterSanity(size, val);
-419}
-420if (newSize > 
currentAcceptableSize && !evictionInProgress) {
-421  runEviction();
-422}
-423  }
-424
-425  /**
-426   * Sanity-checking for parity between 
actual block cache content and metrics.
-427   * Intended only for use with TRACE 
level logging and -ea JVM.
-428   */
-429  private static void 
assertCounterSanity(long mapSize, long counterVal) {
-430if (counterVal < 0) {
-431  LOG.trace("counterVal overflow. 
Assertions unreliable. counterVal=" + counterVal +
-432", mapSize=" + mapSize);
-433  return;
-434}
-435if (mapSize < Integer.MAX_VALUE) 
{
-436  double pct_diff = 
Math.absdouble) counterVal) / ((double) mapSize)) - 1.);
-437  if (pct_diff > 0.05) {
-438LOG.trace("delta between reported 
and actual size > 5%. counterVal=" + counterVal +
-439  ", mapSize=" + mapSize);
-440  }
-441}
-442  }
-443
-444  /**
-445   * Cache the block with the specified 
name and buffer.
-446   * 

-447 * -448 * @param cacheKey block's cache key -449 * @param buf block buffer -450 */ -451 @Override -452 public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) { -453cacheBlock(cacheKey, buf, false); -454 } -455 -456 /** -457 * Helper function that updates the local size counter and also updates any -458 * per-cf or per-blocktype metrics it can discern from given -459 * {@link LruCachedBlock} -460 */ -461 private long updateSizeMetrics(LruCachedBlock cb, boolean evict) { -462long heapsize = cb.heapSize(); -463BlockType bt = cb.getBuffer().getBlockType(); -464if (evict) { -465 heapsize *= -1; -466} -467if (bt != null && bt.isData()) { -468 dataBlockSize.add(heapsize); -469} -470return size.addAndGet(heapsize); -471 } -472 -473 /** -474 * Get the buffer of the block with the specified name. -475 * -476 * @param cacheKey block's cache key -477 * @param cachingtrue if the caller caches blocks on cache misses -478 * @param repeat Whether this is a repeat lookup for the same block -479 * (used to avoid double counting cache misses when doing double-check -480 * locking) -481 * @param updateCacheMetrics Whether to update cache metrics or not -482 * -483 * @return buffer of specified cache key, or null if not in cache -484 */ -485 @Override -486 public Cacheable getBlock(BlockCacheKey cacheKey, boolean ca


[19/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.html
index e8070ca..8cb24b3 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.html
@@ -433,1254 +433,1261 @@
 425  return;
 426}
 427
-428if (backingMap.containsKey(cacheKey)) 
{
+428if (backingMap.containsKey(cacheKey) 
|| ramCache.containsKey(cacheKey)) {
 429  Cacheable existingBlock = 
getBlock(cacheKey, false, false, false);
-430  try {
-431if 
(BlockCacheUtil.compareCacheBlock(cachedItem, existingBlock) != 0) {
-432  throw new 
RuntimeException("Cached block contents differ, which should not have 
happened."
-433  + "cacheKey:" + 
cacheKey);
-434}
-435String msg = "Caching an already 
cached block: " + cacheKey;
-436msg += ". This is harmless and 
can happen in rare cases (see HBASE-8547)";
-437LOG.warn(msg);
-438  } finally {
-439// return the block since we need 
to decrement the count
-440returnBlock(cacheKey, 
existingBlock);
-441  }
-442  return;
-443}
-444
-445/*
-446 * Stuff the entry into the RAM cache 
so it can get drained to the persistent store
-447 */
-448RAMQueueEntry re =
-449new RAMQueueEntry(cacheKey, 
cachedItem, accessCount.incrementAndGet(), inMemory);
-450if (ramCache.putIfAbsent(cacheKey, 
re) != null) {
-451  return;
-452}
-453int queueNum = (cacheKey.hashCode() 
& 0x7FFF) % writerQueues.size();
-454BlockingQueue bq 
= writerQueues.get(queueNum);
-455boolean successfulAddition = false;
-456if (wait) {
-457  try {
-458successfulAddition = bq.offer(re, 
DEFAULT_CACHE_WAIT_TIME, TimeUnit.MILLISECONDS);
-459  } catch (InterruptedException e) 
{
-460
Thread.currentThread().interrupt();
-461  }
-462} else {
-463  successfulAddition = 
bq.offer(re);
-464}
-465if (!successfulAddition) {
-466  ramCache.remove(cacheKey);
-467  cacheStats.failInsert();
-468} else {
-469  this.blockNumber.increment();
-470  
this.heapSize.add(cachedItem.heapSize());
-471  blocksByHFile.add(cacheKey);
-472}
-473  }
-474
-475  /**
-476   * Get the buffer of the block with the 
specified key.
-477   * @param key block's cache key
-478   * @param caching true if the caller 
caches blocks on cache misses
-479   * @param repeat Whether this is a 
repeat lookup for the same block
-480   * @param updateCacheMetrics Whether we 
should update cache metrics or not
-481   * @return buffer of specified cache 
key, or null if not in cache
-482   */
-483  @Override
-484  public Cacheable getBlock(BlockCacheKey 
key, boolean caching, boolean repeat,
-485  boolean updateCacheMetrics) {
-486if (!cacheEnabled) {
-487  return null;
-488}
-489RAMQueueEntry re = 
ramCache.get(key);
-490if (re != null) {
-491  if (updateCacheMetrics) {
-492cacheStats.hit(caching, 
key.isPrimary(), key.getBlockType());
-493  }
-494  
re.access(accessCount.incrementAndGet());
-495  return re.getData();
-496}
-497BucketEntry bucketEntry = 
backingMap.get(key);
-498if (bucketEntry != null) {
-499  long start = System.nanoTime();
-500  ReentrantReadWriteLock lock = 
offsetLock.getLock(bucketEntry.offset());
-501  try {
-502lock.readLock().lock();
-503// We can not read here even if 
backingMap does contain the given key because its offset
-504// maybe changed. If we lock 
BlockCacheKey instead of offset, then we can only check
-505// existence here.
-506if 
(bucketEntry.equals(backingMap.get(key))) {
-507  // TODO : change this area - 
should be removed after server cells and
-508  // 12295 are available
-509  int len = 
bucketEntry.getLength();
-510  if (LOG.isTraceEnabled()) {
-511LOG.trace("Read offset=" + 
bucketEntry.offset() + ", len=" + len);
-512  }
-513  Cacheable cachedBlock = 
ioEngine.read(bucketEntry.offset(), len,
-514  
bucketEntry.deserializerReference(this.deserialiserMap));
-515  long timeTaken = 
System.nanoTime() - start;
-516  if (updateCacheMetrics) {
-517cacheStats.hit(caching, 
key.isPrimary(), key.getBlockType());
-518
cacheStats.ioHit(timeTaken);
+430
+431  try {
+432int comparison = 
BlockCacheUtil.validateBlockAddition(existingBlock, cachedItem, cacheKey);
+433if (comparison != 0) {
+434  if (comparison < 0) {
+435LOG.war

[44/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/devapidocs/org/apache/hadoop/hbase/regionserver/ScannerContext.LimitFields.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/ScannerContext.LimitFields.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/ScannerContext.LimitFields.html
index 4cec32f..21e34e3 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/ScannerContext.LimitFields.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/ScannerContext.LimitFields.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private static class ScannerContext.LimitFields
+private static class ScannerContext.LimitFields
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 The different fields that can be used as limits in calls to
  InternalScanner.next(java.util.List)
 and InternalScanner.next(java.util.List)
@@ -198,14 +198,6 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 Fields keep their default values.
 
 
-
-LimitFields(int batch,
-   ScannerContext.LimitScope sizeScope,
-   long size,
-   long heapSize,
-   ScannerContext.LimitScope timeScope,
-   long time) 
-
 
 
 
@@ -332,7 +324,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 DEFAULT_BATCH
-private static int DEFAULT_BATCH
+private static int DEFAULT_BATCH
 Default values of the limit fields. Defined such that if a 
field does NOT change from its
  default, it will not be enforced
 
@@ -343,7 +335,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 DEFAULT_SIZE
-private static long DEFAULT_SIZE
+private static long DEFAULT_SIZE
 
 
 
@@ -352,7 +344,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 DEFAULT_TIME
-private static long DEFAULT_TIME
+private static long DEFAULT_TIME
 
 
 
@@ -361,7 +353,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 DEFAULT_SCOPE
-private static final ScannerContext.LimitScope DEFAULT_SCOPE
+private static final ScannerContext.LimitScope DEFAULT_SCOPE
 Default scope that is assigned to a limit if a scope is not 
specified.
 
 
@@ -371,7 +363,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 batch
-int batch
+int batch
 
 
 
@@ -380,7 +372,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 sizeScope
-ScannerContext.LimitScope sizeScope
+ScannerContext.LimitScope sizeScope
 
 
 
@@ -389,7 +381,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 dataSize
-long dataSize
+long dataSize
 
 
 
@@ -398,7 +390,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 heapSize
-long heapSize
+long heapSize
 
 
 
@@ -407,7 +399,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 timeScope
-ScannerContext.LimitScope timeScope
+ScannerContext.LimitScope timeScope
 
 
 
@@ -416,7 +408,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 time
-long time
+long time
 
 
 
@@ -430,25 +422,11 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 
-
-
-LimitFields
-LimitFields()
-Fields keep their default values.
-
-
-
-
-
 
 
 LimitFields
-LimitFields(int batch,
-ScannerContext.LimitScope sizeScope,
-long size,
-long heapSize,
-ScannerContext.LimitScope timeScope,
-long time)
+LimitFields()
+Fields keep their default values.
 
 
 
@@ -465,7 +443,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 copy
-void copy(ScannerContext.LimitFields limitsToCopy)
+void copy(ScannerContext.LimitFields limitsToCopy)
 
 
 
@@ -474,7 +452,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 setFields
-void setFields(int batch,
+void setFields(int batch,
ScannerContext.LimitScope sizeScope,
long dataSize,
long heapSize,
@@ -489,7 +467,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getBatch
-int getBatch()
+int getBatch()
 
 
 
@@ -498,7 +476,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 setBatch
-void setBatch(int batch)
+void setBatch(int batch)
 
 
 
@@ -507,7 +485,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 canEnforceBatchLimitFromScope
-boolean canEnforceBatchLimitFromScope(ScannerContext.LimitScope checkerScope)
+boolean canEnforceBatchLimitFromScope(ScannerContext.LimitScope checkerScope)
 
 Parameters:
 checkerScope - 
@@ -522,7 +500,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getDataSize
-long getDataSize()
+long getDataSize()

[07/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.html 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.html
index 37f7b15..da0a780 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.html
@@ -476,7 +476,7 @@
 468// test serialized blocks
 469for (boolean reuseBuffer : 
new boolean[] { false, true }) {
 470  ByteBuffer serialized = 
ByteBuffer.allocate(blockFromHFile.getSerializedLength());
-471  
blockFromHFile.serialize(serialized);
+471  
blockFromHFile.serialize(serialized, true);
 472  HFileBlock deserialized =
 473  (HFileBlock) 
blockFromHFile.getDeserializer().deserialize(
 474new 
SingleByteBuff(serialized), reuseBuffer, MemoryType.EXCLUSIVE);
@@ -866,7 +866,30 @@
 858  block.heapSize());
 859}
 860  }
-861}
+861
+862  @Test
+863  public void 
testSerializeWithoutNextBlockMetadata() {
+864int size = 100;
+865int length = 
HConstants.HFILEBLOCK_HEADER_SIZE + size;
+866byte[] byteArr = new byte[length];
+867ByteBuffer buf = 
ByteBuffer.wrap(byteArr, 0, size);
+868HFileContext meta = new 
HFileContextBuilder().build();
+869HFileBlock blockWithNextBlockMetadata 
= new HFileBlock(BlockType.DATA, size, size, -1, buf,
+870HFileBlock.FILL_HEADER, -1, 52, 
-1, meta);
+871HFileBlock 
blockWithoutNextBlockMetadata = new HFileBlock(BlockType.DATA, size, size, -1, 
buf,
+872HFileBlock.FILL_HEADER, -1, -1, 
-1, meta);
+873ByteBuffer buff1 = 
ByteBuffer.allocate(length);
+874ByteBuffer buff2 = 
ByteBuffer.allocate(length);
+875
blockWithNextBlockMetadata.serialize(buff1, true);
+876
blockWithoutNextBlockMetadata.serialize(buff2, true);
+877assertNotEquals(buff1, buff2);
+878buff1.clear();
+879buff2.clear();
+880
blockWithNextBlockMetadata.serialize(buff1, false);
+881
blockWithoutNextBlockMetadata.serialize(buff2, false);
+882assertEquals(buff1, buff2);
+883  }
+884}
 
 
 



[12/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.html
index baef4a1..9e934af 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.html
@@ -561,594 +561,609 @@
 553
 554LOOP: do {
 555  // Update and check the time limit 
based on the configured value of cellsPerTimeoutCheck
-556  if ((kvsScanned % 
cellsPerHeartbeatCheck == 0)) {
-557if 
(scannerContext.checkTimeLimit(LimitScope.BETWEEN_CELLS)) {
-558  return 
scannerContext.setScannerState(NextState.TIME_LIMIT_REACHED).hasMoreValues();
-559}
-560  }
-561  // Do object compare - we set 
prevKV from the same heap.
-562  if (prevCell != cell) {
-563++kvsScanned;
-564  }
-565  checkScanOrder(prevCell, cell, 
comparator);
-566  int cellSize = 
PrivateCellUtil.estimatedSerializedSizeOf(cell);
-567  bytesRead += cellSize;
-568  prevCell = cell;
-569  
scannerContext.setLastPeekedCell(cell);
-570  topChanged = false;
-571  ScanQueryMatcher.MatchCode qcode = 
matcher.match(cell);
-572  switch (qcode) {
-573case INCLUDE:
-574case INCLUDE_AND_SEEK_NEXT_ROW:
-575case INCLUDE_AND_SEEK_NEXT_COL:
-576
-577  Filter f = 
matcher.getFilter();
-578  if (f != null) {
-579cell = 
f.transformCell(cell);
-580  }
-581
-582  this.countPerRow++;
-583  if (storeLimit > -1 
&& this.countPerRow > (storeLimit + storeOffset)) {
-584// do what SEEK_NEXT_ROW 
does.
-585if 
(!matcher.moreRowsMayExistAfter(cell)) {
-586  close(false);// Do all 
cleanup except heap.close()
-587  return 
scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues();
-588}
-589matcher.clearCurrentRow();
-590seekToNextRow(cell);
-591break LOOP;
-592  }
-593
-594  // add to results only if we 
have skipped #storeOffset kvs
-595  // also update metric 
accordingly
-596  if (this.countPerRow > 
storeOffset) {
-597outResult.add(cell);
-598
-599// Update local tracking 
information
-600count++;
-601totalBytesRead += cellSize;
-602
-603// Update the progress of the 
scanner context
-604
scannerContext.incrementSizeProgress(cellSize,
-605  
PrivateCellUtil.estimatedSizeOfCell(cell));
-606
scannerContext.incrementBatchProgress(1);
-607
-608if (matcher.isUserScan() 
&& totalBytesRead > maxRowSize) {
-609  throw new 
RowTooBigException(
-610  "Max row size allowed: 
" + maxRowSize + ", but the row is bigger than that.");
-611}
-612  }
+556  // Or if the preadMaxBytes is 
reached and we may want to return so we can switch to stream in
+557  // the shipped method below.
+558  if (kvsScanned % 
cellsPerHeartbeatCheck == 0 || (scanUsePread &&
+559scan.getReadType() == 
Scan.ReadType.DEFAULT && bytesRead > preadMaxBytes)) {
+560if 
(scannerContext.checkTimeLimit(LimitScope.BETWEEN_CELLS)) {
+561  return 
scannerContext.setScannerState(NextState.TIME_LIMIT_REACHED).hasMoreValues();
+562}
+563  }
+564  // Do object compare - we set 
prevKV from the same heap.
+565  if (prevCell != cell) {
+566++kvsScanned;
+567  }
+568  checkScanOrder(prevCell, cell, 
comparator);
+569  int cellSize = 
PrivateCellUtil.estimatedSerializedSizeOf(cell);
+570  bytesRead += cellSize;
+571  if (scanUsePread && 
scan.getReadType() == Scan.ReadType.DEFAULT &&
+572bytesRead > preadMaxBytes) {
+573// return immediately if we want 
to switch from pread to stream. We need this because we can
+574// only switch in the shipped 
method, if user use a filter to filter out everything and rpc
+575// timeout is very large then the 
shipped method will never be called until the whole scan
+576// is finished, but at that time 
we have already scan all the data...
+577// See HBASE-20457 for more 
details.
+578// And there is still a scenario 
that can not be handled. If we have a very large row, which
+579// have millions of qualifiers, 
and filter.filterRow is used, then even if we set the flag
+580// here, we still need to scan 
all the qualifiers before returning...
+581
scannerContext.returnImmediately();
+582  }
+583  prevCell = cell;
+584  
scannerContext.setLastPeekedCell(cell);
+585  topChanged = fals

[09/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.MatchLastRowCellNextColFilter.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.MatchLastRowCellNextColFilter.html
 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.MatchLastRowCellNextColFilter.html
new file mode 100644
index 000..a2bd2f4
--- /dev/null
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.MatchLastRowCellNextColFilter.html
@@ -0,0 +1,337 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+
+
+
+TestSwitchToStreamRead.MatchLastRowCellNextColFilter (Apache HBase 
3.0.0-SNAPSHOT Test API)
+
+
+
+
+
+var methods = {"i0":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev Class
+Next Class
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+Summary: 
+Nested | 
+Field | 
+Constr | 
+Method
+
+
+Detail: 
+Field | 
+Constr | 
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.regionserver
+Class TestSwitchToStreamRead.MatchLastRowCellNextColFilter
+
+
+
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.filter.Filter
+
+
+org.apache.hadoop.hbase.filter.FilterBase
+
+
+org.apache.hadoop.hbase.regionserver.TestSwitchToStreamRead.MatchLastRowCellNextColFilter
+
+
+
+
+
+
+
+
+
+
+
+Enclosing class:
+TestSwitchToStreamRead
+
+
+
+public static final class TestSwitchToStreamRead.MatchLastRowCellNextColFilter
+extends org.apache.hadoop.hbase.filter.FilterBase
+
+
+
+
+
+
+
+
+
+
+
+Nested Class Summary
+
+
+
+
+Nested classes/interfaces inherited from 
class org.apache.hadoop.hbase.filter.Filter
+org.apache.hadoop.hbase.filter.Filter.ReturnCode
+
+
+
+
+
+
+
+
+Field Summary
+
+
+
+
+Fields inherited from class org.apache.hadoop.hbase.filter.Filter
+reversed
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors 
+
+Constructor and Description
+
+
+MatchLastRowCellNextColFilter() 
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All Methods Instance Methods Concrete Methods 
+
+Modifier and Type
+Method and Description
+
+
+org.apache.hadoop.hbase.filter.Filter.ReturnCode
+filterCell(org.apache.hadoop.hbase.Cell c) 
+
+
+
+
+
+
+Methods inherited from 
class org.apache.hadoop.hbase.filter.FilterBase
+createFilterFromArguments, filterAllRemaining, filterRow, 
filterRowCells, filterRowKey, filterRowKey, getNextCellHint, hasFilterRow, 
isFamilyEssential, reset, toByteArray, toString, transformCell
+
+
+
+
+
+Methods inherited from 
class org.apache.hadoop.hbase.filter.Filter
+filterKeyValue, isReversed, parseFrom, setReversed
+
+
+
+
+
+Methods inherited from class java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--";
 title="class or interface in java.lang">clone, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-";
 title="class or interface in java.lang">equals, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--";
 title="class or interface in java.lang">finalize, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--";
 title="class or interface in java.lang">getClass, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--";
 title="class or interface in java.lang">hashCode, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--";
 title="class or interface in java.lang">notify, https://docs.oracle.com/javase/8/docs/api/ja
 va/lang/Object.html?is-external=true#notifyAll--" title="class or interface in 
java.lang">notifyAll, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--";
 title="class or interface in java.lang">wait, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.

[04/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.MockedBucketCache.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.MockedBucketCache.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.MockedBucketCache.html
index dd58cd9..01157c1 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.MockedBucketCache.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.MockedBucketCache.html
@@ -27,394 +27,433 @@
 019
 020import static 
org.junit.Assert.assertEquals;
 021import static 
org.junit.Assert.assertFalse;
-022import static 
org.junit.Assert.assertTrue;
-023
-024import java.io.FileNotFoundException;
-025import java.io.IOException;
-026import java.util.ArrayList;
-027import java.util.Arrays;
-028import java.util.List;
-029import java.util.Map;
-030import java.util.Random;
-031import java.util.Set;
-032import 
java.util.concurrent.locks.ReentrantReadWriteLock;
-033import 
org.apache.hadoop.conf.Configuration;
-034import org.apache.hadoop.fs.Path;
-035import 
org.apache.hadoop.hbase.HBaseClassTestRule;
-036import 
org.apache.hadoop.hbase.HBaseConfiguration;
-037import 
org.apache.hadoop.hbase.HBaseTestingUtility;
-038import 
org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
-039import 
org.apache.hadoop.hbase.io.hfile.CacheTestUtils;
-040import 
org.apache.hadoop.hbase.io.hfile.CacheTestUtils.HFileBlockPair;
-041import 
org.apache.hadoop.hbase.io.hfile.Cacheable;
-042import 
org.apache.hadoop.hbase.io.hfile.bucket.BucketAllocator.BucketSizeInfo;
-043import 
org.apache.hadoop.hbase.io.hfile.bucket.BucketAllocator.IndexStatistics;
-044import 
org.apache.hadoop.hbase.testclassification.IOTests;
-045import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-046import org.junit.After;
-047import org.junit.Before;
-048import org.junit.ClassRule;
-049import org.junit.Test;
-050import 
org.junit.experimental.categories.Category;
-051import org.junit.runner.RunWith;
-052import org.junit.runners.Parameterized;
-053
-054import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap;
-055
-056/**
-057 * Basic test of BucketCache.Puts and 
gets.
-058 * 

-059 * Tests will ensure that blocks' data correctness under several threads concurrency -060 */ -061@RunWith(Parameterized.class) -062@Category({ IOTests.class, MediumTests.class }) -063public class TestBucketCache { -064 -065 @ClassRule -066 public static final HBaseClassTestRule CLASS_RULE = -067 HBaseClassTestRule.forClass(TestBucketCache.class); -068 -069 private static final Random RAND = new Random(); -070 -071 @Parameterized.Parameters(name = "{index}: blockSize={0}, bucketSizes={1}") -072 public static Iterable data() { -073return Arrays.asList(new Object[][] { -074{ 8192, null }, // TODO: why is 8k the default blocksize for these tests? -075{ -07616 * 1024, -077new int[] { 2 * 1024 + 1024, 4 * 1024 + 1024, 8 * 1024 + 1024, 16 * 1024 + 1024, -07828 * 1024 + 1024, 32 * 1024 + 1024, 64 * 1024 + 1024, 96 * 1024 + 1024, -079128 * 1024 + 1024 } } }); -080 } -081 -082 @Parameterized.Parameter(0) -083 public int constructedBlockSize; -084 -085 @Parameterized.Parameter(1) -086 public int[] constructedBlockSizes; -087 -088 BucketCache cache; -089 final int CACHE_SIZE = 100; -090 final int NUM_BLOCKS = 100; -091 final int BLOCK_SIZE = CACHE_SIZE / NUM_BLOCKS; -092 final int NUM_THREADS = 100; -093 final int NUM_QUERIES = 1; +022import static org.junit.Assert.assertNull; +023import static org.junit.Assert.assertTrue; +024 +025import java.io.FileNotFoundException; +026import java.io.IOException; +027import java.nio.ByteBuffer; +028import java.util.ArrayList; +029import java.util.Arrays; +030import java.util.List; +031import java.util.Map; +032import java.util.Random; +033import java.util.Set; +034import java.util.concurrent.locks.ReentrantReadWriteLock; +035import org.apache.hadoop.conf.Configuration; +036import org.apache.hadoop.fs.Path; +037import org.apache.hadoop.hbase.HBaseClassTestRule; +038import org.apache.hadoop.hbase.HBaseConfiguration; +039import org.apache.hadoop.hbase.HBaseTestingUtility; +040import org.apache.hadoop.hbase.HConstants; +041import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; +042import org.apache.hadoop.hbase.io.hfile.BlockType; +043import org.apache.hadoop.hbase.io.hfile.CacheTestUtils; +044import org.apache.hadoop.hbase.io.hfile.CacheTestUtils.HFileBlockPair; +045import org.apache.hadoop.hbase.io.hfile.Cacheable; +046import org.apache.hadoop.hbase.io.hfile.HFileBlock; +047import org.apache.hadoop.hbase.io.hfile.HFileContext; +048import org.apache.hadoop.hbase.io.h


[11/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/downloads.html
--
diff --git a/downloads.html b/downloads.html
index 46eec4c..f65c283 100644
--- a/downloads.html
+++ b/downloads.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Apache HBase Downloads
 
@@ -366,7 +366,7 @@ under the License. -->
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-05-14
+  Last Published: 
2018-05-15
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/export_control.html
--
diff --git a/export_control.html b/export_control.html
index 99d5056..c3ea1e3 100644
--- a/export_control.html
+++ b/export_control.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – 
   Export Control
@@ -331,7 +331,7 @@ for more details.
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-05-14
+  Last Published: 
2018-05-15
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/index.html
--
diff --git a/index.html b/index.html
index 3ddba67..c6c9050 100644
--- a/index.html
+++ b/index.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Apache HBase™ Home
 
@@ -409,7 +409,7 @@ Apache HBase is an open-source, distributed, versioned, 
non-relational database
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-05-14
+  Last Published: 
2018-05-15
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/integration.html
--
diff --git a/integration.html b/integration.html
index 43ec0c6..a64256a 100644
--- a/integration.html
+++ b/integration.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – CI Management
 
@@ -291,7 +291,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-05-14
+  Last Published: 
2018-05-15
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/issue-tracking.html
--
diff --git a/issue-tracking.html b/issue-tracking.html
index 0b12b13..42db059 100644
--- a/issue-tracking.html
+++ b/issue-tracking.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Issue Management
 
@@ -288,7 +288,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-05-14
+  Last Published: 
2018-05-15
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/license.html
--
diff --git a/license.html b/license.html
index 350b6f0..7147b25 100644
--- a/license.html
+++ b/license.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Project Licenses
 
@@ -491,7 +491,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-05-14
+  Last Published: 
2018-05-15
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/mail-lists.html
--
diff --git a/mail-lists.html b/mail-lists.html
index bdddcde..7ab7e28 100644
--- a/mail-lists.html
+++ b/mail-lists.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Project Mailing Lists
 
@@ -341,7 +341,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-05-14
+  Last Published: 
2018-05-15
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/metrics.html
--
diff --git a/metrics.html b/metrics.html
index cb50802..724fbb6 100644
--- a/metrics.html
+++ b/m

[02/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.MatchLastRowCellNextColFilter.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.MatchLastRowCellNextColFilter.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.MatchLastRowCellNextColFilter.html
new file mode 100644
index 000..6f38b3f
--- /dev/null
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.MatchLastRowCellNextColFilter.html
@@ -0,0 +1,334 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+Source code
+
+
+
+
+001/**
+002 * Licensed to the Apache Software 
Foundation (ASF) under one
+003 * or more contributor license 
agreements.  See the NOTICE file
+004 * distributed with this work for 
additional information
+005 * regarding copyright ownership.  The 
ASF licenses this file
+006 * to you under the Apache License, 
Version 2.0 (the
+007 * "License"); you may not use this file 
except in compliance
+008 * with the License.  You may obtain a 
copy of the License at
+009 *
+010 * 
http://www.apache.org/licenses/LICENSE-2.0
+011 *
+012 * Unless required by applicable law or 
agreed to in writing, software
+013 * distributed under the License is 
distributed on an "AS IS" BASIS,
+014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+015 * See the License for the specific 
language governing permissions and
+016 * limitations under the License.
+017 */
+018package 
org.apache.hadoop.hbase.regionserver;
+019
+020import static 
org.junit.Assert.assertEquals;
+021import static 
org.junit.Assert.assertFalse;
+022import static 
org.junit.Assert.assertTrue;
+023
+024import java.io.IOException;
+025import java.util.ArrayList;
+026import java.util.List;
+027import 
java.util.concurrent.ThreadLocalRandom;
+028import org.apache.hadoop.hbase.Cell;
+029import 
org.apache.hadoop.hbase.HBaseClassTestRule;
+030import 
org.apache.hadoop.hbase.HBaseTestingUtility;
+031import 
org.apache.hadoop.hbase.TableName;
+032import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+033import 
org.apache.hadoop.hbase.client.Put;
+034import 
org.apache.hadoop.hbase.client.Result;
+035import 
org.apache.hadoop.hbase.client.Scan;
+036import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+037import 
org.apache.hadoop.hbase.filter.Filter;
+038import 
org.apache.hadoop.hbase.filter.FilterBase;
+039import 
org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl;
+040import 
org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope;
+041import 
org.apache.hadoop.hbase.testclassification.MediumTests;
+042import 
org.apache.hadoop.hbase.testclassification.RegionServerTests;
+043import 
org.apache.hadoop.hbase.util.Bytes;
+044import org.junit.AfterClass;
+045import org.junit.BeforeClass;
+046import org.junit.ClassRule;
+047import org.junit.Ignore;
+048import org.junit.Test;
+049import 
org.junit.experimental.categories.Category;
+050
+051@Category({ RegionServerTests.class, 
MediumTests.class })
+052public class TestSwitchToStreamRead {
+053
+054  @ClassRule
+055  public static final HBaseClassTestRule 
CLASS_RULE =
+056
HBaseClassTestRule.forClass(TestSwitchToStreamRead.class);
+057
+058  private static final 
HBaseTestingUtility UTIL = new HBaseTestingUtility();
+059
+060  private static TableName TABLE_NAME = 
TableName.valueOf("stream");
+061
+062  private static byte[] FAMILY = 
Bytes.toBytes("cf");
+063
+064  private static byte[] QUAL = 
Bytes.toBytes("cq");
+065
+066  private static String VALUE_PREFIX;
+067
+068  private static HRegion REGION;
+069
+070  @BeforeClass
+071  public static void setUp() throws 
IOException {
+072
UTIL.getConfiguration().setLong(StoreScanner.STORESCANNER_PREAD_MAX_BYTES, 
2048);
+073StringBuilder sb = new 
StringBuilder(256);
+074for (int i = 0; i < 255; i++) {
+075  sb.append((char) 
ThreadLocalRandom.current().nextInt('A', 'z' + 1));
+076}
+077VALUE_PREFIX = 
sb.append("-").toString();
+078REGION = UTIL.createLocalHRegion(
+079  
TableDescriptorBuilder.newBuilder(TABLE_NAME)
+080.setColumnFamily(
+081  
ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setBlocksize(1024).build())
+082.build(),
+083  null, null);
+084for (int i = 0; i < 900; i++) {
+085  REGION
+086.put(new 
Put(Bytes.toBytes(i)).addColumn(FAMILY, QUAL, Bytes.toBytes(VALUE_PREFIX + 
i)));
+087}
+088REGION.flush(true);
+089for (int i = 900; i < 1000; i++) 
{
+090  REGION
+091.put(new 
Put(Bytes.toBytes(i)).addColumn(FAMILY, QUAL, Bytes.toBytes(VALUE_PREFIX + 
i)));
+092}
+093  }
+094
+095  @AfterClass
+096  public static void tearDown() throws 
IOException {
+097REGION.close(true);
+098UTIL.cleanupTestDir();
+0

[05/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.html
index 1774db3..2ab8397 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.html
@@ -39,879 +39,933 @@
 031import 
org.apache.hadoop.conf.Configuration;
 032import 
org.apache.hadoop.hbase.HBaseClassTestRule;
 033import 
org.apache.hadoop.hbase.HBaseConfiguration;
-034import org.apache.hadoop.hbase.Waiter;
-035import 
org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
-036import 
org.apache.hadoop.hbase.io.HeapSize;
-037import 
org.apache.hadoop.hbase.io.hfile.LruBlockCache.EvictionThread;
-038import 
org.apache.hadoop.hbase.testclassification.IOTests;
-039import 
org.apache.hadoop.hbase.testclassification.SmallTests;
-040import 
org.apache.hadoop.hbase.util.ClassSize;
-041import org.junit.ClassRule;
-042import org.junit.Test;
-043import 
org.junit.experimental.categories.Category;
-044
-045/**
-046 * Tests the concurrent 
LruBlockCache.

-047 * -048 * Tests will ensure it grows and shrinks in size properly, -049 * evictions run when they're supposed to and do what they should, -050 * and that cached blocks are accessible when expected to be. -051 */ -052@Category({IOTests.class, SmallTests.class}) -053public class TestLruBlockCache { -054 -055 @ClassRule -056 public static final HBaseClassTestRule CLASS_RULE = -057 HBaseClassTestRule.forClass(TestLruBlockCache.class); -058 -059 @Test -060 public void testCacheEvictionThreadSafe() throws Exception { -061long maxSize = 10; -062int numBlocks = 9; -063int testRuns = 10; -064final long blockSize = calculateBlockSizeDefault(maxSize, numBlocks); -065assertTrue("calculateBlockSize appears broken.", blockSize * numBlocks <= maxSize); -066 -067final Configuration conf = HBaseConfiguration.create(); -068final LruBlockCache cache = new LruBlockCache(maxSize, blockSize); -069EvictionThread evictionThread = cache.getEvictionThread(); -070assertTrue(evictionThread != null); -071while (!evictionThread.isEnteringRun()) { -072 Thread.sleep(1); -073} -074final String hfileName = "hfile"; -075int threads = 10; -076final int blocksPerThread = 5 * numBlocks; -077for (int run = 0; run != testRuns; ++run) { -078 final AtomicInteger blockCount = new AtomicInteger(0); -079 ExecutorService service = Executors.newFixedThreadPool(threads); -080 for (int i = 0; i != threads; ++i) { -081service.execute(new Runnable() { -082 @Override -083 public void run() { -084for (int blockIndex = 0; blockIndex < blocksPerThread || (!cache.isEvictionInProgress()); ++blockIndex) { -085 CachedItem block = new CachedItem(hfileName, (int) blockSize, blockCount.getAndIncrement()); -086 boolean inMemory = Math.random() > 0.5; -087 cache.cacheBlock(block.cacheKey, block, inMemory); -088} -089 cache.evictBlocksByHfileName(hfileName); -090 } -091}); -092 } -093 service.shutdown(); -094 // The test may fail here if the evict thread frees the blocks too fast -095 service.awaitTermination(10, TimeUnit.MINUTES); -096 Waiter.waitFor(conf, 1, 100, new ExplainingPredicate() { -097@Override -098public boolean evaluate() throws Exception { -099 return cache.getBlockCount() == 0; -100} -101 -102@Override -103public String explainFailure() throws Exception { -104 return "Cache block count failed to return to 0"; -105} -106 }); -107 assertEquals(0, cache.getBlockCount()); -108 assertEquals(cache.getOverhead(), cache.getCurrentSize()); -109} -110 } -111 @Test -112 public void testBackgroundEvictionThread() throws Exception { -113long maxSize = 10; -114int numBlocks = 9; -115long blockSize = calculateBlockSizeDefault(maxSize, numBlocks); -116assertTrue("calculateBlockSize appears broken.", blockSize * numBlocks <= maxSize); -117 -118LruBlockCache cache = new LruBlockCache(maxSize,blockSize); -119EvictionThread evictionThread = cache.getEvictionThread(); -120assertTrue(evictionThread != null); -121 -122CachedItem[] blocks = generateFixedBlocks(numBlocks + 1, blockSize, "block"); -123 -124// Make sure eviction thread has entered run method -125while (!evictionThread.isEnteringRun()) { -126 Thread.sleep(1); -127} -128 -129// Add all the blocks -130for (CachedItem block : blocks) { -1


[14/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.ProgressFields.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.ProgressFields.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.ProgressFields.html
index cabb570..90f3b1e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.ProgressFields.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.ProgressFields.html
@@ -107,660 +107,661 @@
 099
 100  private Cell lastPeekedCell = null;
 101
-102  /**
-103   * Tracks the relevant server side 
metrics during scans. null when metrics should not be tracked
-104   */
-105  final ServerSideScanMetrics metrics;
-106
-107  ScannerContext(boolean keepProgress, 
LimitFields limitsToCopy, boolean trackMetrics) {
-108this.limits = new LimitFields();
-109if (limitsToCopy != null) {
-110  this.limits.copy(limitsToCopy);
-111}
+102  // Set this to true will have the same 
behavior with reaching the time limit.
+103  // This is used when you want to make 
the current RSRpcService.scan returns immediately. For
+104  // example, when we want to switch from 
pread to stream, we can only do it after the rpc call is
+105  // returned.
+106  private boolean returnImmediately;
+107
+108  /**
+109   * Tracks the relevant server side 
metrics during scans. null when metrics should not be tracked
+110   */
+111  final ServerSideScanMetrics metrics;
 112
-113// Progress fields are initialized to 
0
-114progress = new ProgressFields(0, 0, 
0);
-115
-116this.keepProgress = keepProgress;
-117this.scannerState = DEFAULT_STATE;
-118this.metrics = trackMetrics ? new 
ServerSideScanMetrics() : null;
-119  }
-120
-121  boolean isTrackingMetrics() {
-122return this.metrics != null;
-123  }
-124
-125  /**
-126   * Get the metrics instance. Should 
only be called after a call to {@link #isTrackingMetrics()}
-127   * has been made to confirm that 
metrics are indeed being tracked.
-128   * @return {@link 
ServerSideScanMetrics} instance that is tracking metrics for this scan
-129   */
-130  ServerSideScanMetrics getMetrics() {
-131assert isTrackingMetrics();
-132return this.metrics;
-133  }
-134
-135  /**
-136   * @return true if the progress tracked 
so far in this instance will be considered during an
-137   * invocation of {@link 
InternalScanner#next(java.util.List)} or
-138   * {@link 
RegionScanner#next(java.util.List)}. false when the progress tracked so far
-139   * should not be considered and 
should instead be wiped away via {@link #clearProgress()}
-140   */
-141  boolean getKeepProgress() {
-142return keepProgress;
-143  }
-144
-145  void setKeepProgress(boolean 
keepProgress) {
-146this.keepProgress = keepProgress;
-147  }
-148
-149  /**
-150   * Progress towards the batch limit has 
been made. Increment internal tracking of batch progress
-151   */
-152  void incrementBatchProgress(int batch) 
{
-153int currentBatch = 
progress.getBatch();
-154progress.setBatch(currentBatch + 
batch);
-155  }
-156
-157  /**
-158   * Progress towards the size limit has 
been made. Increment internal tracking of size progress
-159   */
-160  void incrementSizeProgress(long 
dataSize, long heapSize) {
-161long curDataSize = 
progress.getDataSize();
-162progress.setDataSize(curDataSize + 
dataSize);
-163long curHeapSize = 
progress.getHeapSize();
-164progress.setHeapSize(curHeapSize + 
heapSize);
-165  }
-166
-167  int getBatchProgress() {
-168return progress.getBatch();
-169  }
-170
-171  long getDataSizeProgress() {
-172return progress.getDataSize();
-173  }
-174
-175  long getHeapSizeProgress() {
-176return progress.getHeapSize();
-177  }
-178
-179  void setProgress(int batchProgress, 
long sizeProgress, long heapSizeProgress) {
-180setBatchProgress(batchProgress);
-181setSizeProgress(sizeProgress, 
heapSizeProgress);
-182  }
-183
-184  void setSizeProgress(long 
dataSizeProgress, long heapSizeProgress) {
-185
progress.setDataSize(dataSizeProgress);
-186
progress.setHeapSize(heapSizeProgress);
-187  }
-188
-189  void setBatchProgress(int 
batchProgress) {
-190progress.setBatch(batchProgress);
-191  }
-192
-193  /**
-194   * Clear away any progress that has 
been made so far. All progress fields are reset to initial
-195   * values
-196   */
-197  void clearProgress() {
-198progress.setFields(0, 0, 0);
-199  }
-200
-201  /**
-202   * Note that this is not a typical 
setter. This setter returns the {@link NextState} that was
-203   * passed in so that methods can be 
invoked against the new state. Furthermore, this pattern
-204   * allows the {@link 
NoLimitScannerContext} to cleanly override this setter and simply return the
-20

[06/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.CachedItem.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.CachedItem.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.CachedItem.html
index 1774db3..2ab8397 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.CachedItem.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.CachedItem.html
@@ -39,879 +39,933 @@
 031import 
org.apache.hadoop.conf.Configuration;
 032import 
org.apache.hadoop.hbase.HBaseClassTestRule;
 033import 
org.apache.hadoop.hbase.HBaseConfiguration;
-034import org.apache.hadoop.hbase.Waiter;
-035import 
org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
-036import 
org.apache.hadoop.hbase.io.HeapSize;
-037import 
org.apache.hadoop.hbase.io.hfile.LruBlockCache.EvictionThread;
-038import 
org.apache.hadoop.hbase.testclassification.IOTests;
-039import 
org.apache.hadoop.hbase.testclassification.SmallTests;
-040import 
org.apache.hadoop.hbase.util.ClassSize;
-041import org.junit.ClassRule;
-042import org.junit.Test;
-043import 
org.junit.experimental.categories.Category;
-044
-045/**
-046 * Tests the concurrent 
LruBlockCache.

-047 * -048 * Tests will ensure it grows and shrinks in size properly, -049 * evictions run when they're supposed to and do what they should, -050 * and that cached blocks are accessible when expected to be. -051 */ -052@Category({IOTests.class, SmallTests.class}) -053public class TestLruBlockCache { -054 -055 @ClassRule -056 public static final HBaseClassTestRule CLASS_RULE = -057 HBaseClassTestRule.forClass(TestLruBlockCache.class); -058 -059 @Test -060 public void testCacheEvictionThreadSafe() throws Exception { -061long maxSize = 10; -062int numBlocks = 9; -063int testRuns = 10; -064final long blockSize = calculateBlockSizeDefault(maxSize, numBlocks); -065assertTrue("calculateBlockSize appears broken.", blockSize * numBlocks <= maxSize); -066 -067final Configuration conf = HBaseConfiguration.create(); -068final LruBlockCache cache = new LruBlockCache(maxSize, blockSize); -069EvictionThread evictionThread = cache.getEvictionThread(); -070assertTrue(evictionThread != null); -071while (!evictionThread.isEnteringRun()) { -072 Thread.sleep(1); -073} -074final String hfileName = "hfile"; -075int threads = 10; -076final int blocksPerThread = 5 * numBlocks; -077for (int run = 0; run != testRuns; ++run) { -078 final AtomicInteger blockCount = new AtomicInteger(0); -079 ExecutorService service = Executors.newFixedThreadPool(threads); -080 for (int i = 0; i != threads; ++i) { -081service.execute(new Runnable() { -082 @Override -083 public void run() { -084for (int blockIndex = 0; blockIndex < blocksPerThread || (!cache.isEvictionInProgress()); ++blockIndex) { -085 CachedItem block = new CachedItem(hfileName, (int) blockSize, blockCount.getAndIncrement()); -086 boolean inMemory = Math.random() > 0.5; -087 cache.cacheBlock(block.cacheKey, block, inMemory); -088} -089 cache.evictBlocksByHfileName(hfileName); -090 } -091}); -092 } -093 service.shutdown(); -094 // The test may fail here if the evict thread frees the blocks too fast -095 service.awaitTermination(10, TimeUnit.MINUTES); -096 Waiter.waitFor(conf, 1, 100, new ExplainingPredicate() { -097@Override -098public boolean evaluate() throws Exception { -099 return cache.getBlockCount() == 0; -100} -101 -102@Override -103public String explainFailure() throws Exception { -104 return "Cache block count failed to return to 0"; -105} -106 }); -107 assertEquals(0, cache.getBlockCount()); -108 assertEquals(cache.getOverhead(), cache.getCurrentSize()); -109} -110 } -111 @Test -112 public void testBackgroundEvictionThread() throws Exception { -113long maxSize = 10; -114int numBlocks = 9; -115long blockSize = calculateBlockSizeDefault(maxSize, numBlocks); -116assertTrue("calculateBlockSize appears broken.", blockSize * numBlocks <= maxSize); -117 -118LruBlockCache cache = new LruBlockCache(maxSize,blockSize); -119EvictionThread evictionThread = cache.getEvictionThread(); -120assertTrue(evictionThread != null); -121 -122CachedItem[] blocks = generateFixedBlocks(numBlocks + 1, blockSize, "block"); -123 -124// Make sure eviction thread has entered run method -125while (!evictionThread.isEnteringRun()) { -126 Thread.sleep(1); -127} -128 -129// Add all


[13/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.html
index cabb570..90f3b1e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.html
@@ -107,660 +107,661 @@
 099
 100  private Cell lastPeekedCell = null;
 101
-102  /**
-103   * Tracks the relevant server side 
metrics during scans. null when metrics should not be tracked
-104   */
-105  final ServerSideScanMetrics metrics;
-106
-107  ScannerContext(boolean keepProgress, 
LimitFields limitsToCopy, boolean trackMetrics) {
-108this.limits = new LimitFields();
-109if (limitsToCopy != null) {
-110  this.limits.copy(limitsToCopy);
-111}
+102  // Set this to true will have the same 
behavior with reaching the time limit.
+103  // This is used when you want to make 
the current RSRpcService.scan returns immediately. For
+104  // example, when we want to switch from 
pread to stream, we can only do it after the rpc call is
+105  // returned.
+106  private boolean returnImmediately;
+107
+108  /**
+109   * Tracks the relevant server side 
metrics during scans. null when metrics should not be tracked
+110   */
+111  final ServerSideScanMetrics metrics;
 112
-113// Progress fields are initialized to 
0
-114progress = new ProgressFields(0, 0, 
0);
-115
-116this.keepProgress = keepProgress;
-117this.scannerState = DEFAULT_STATE;
-118this.metrics = trackMetrics ? new 
ServerSideScanMetrics() : null;
-119  }
-120
-121  boolean isTrackingMetrics() {
-122return this.metrics != null;
-123  }
-124
-125  /**
-126   * Get the metrics instance. Should 
only be called after a call to {@link #isTrackingMetrics()}
-127   * has been made to confirm that 
metrics are indeed being tracked.
-128   * @return {@link 
ServerSideScanMetrics} instance that is tracking metrics for this scan
-129   */
-130  ServerSideScanMetrics getMetrics() {
-131assert isTrackingMetrics();
-132return this.metrics;
-133  }
-134
-135  /**
-136   * @return true if the progress tracked 
so far in this instance will be considered during an
-137   * invocation of {@link 
InternalScanner#next(java.util.List)} or
-138   * {@link 
RegionScanner#next(java.util.List)}. false when the progress tracked so far
-139   * should not be considered and 
should instead be wiped away via {@link #clearProgress()}
-140   */
-141  boolean getKeepProgress() {
-142return keepProgress;
-143  }
-144
-145  void setKeepProgress(boolean 
keepProgress) {
-146this.keepProgress = keepProgress;
-147  }
-148
-149  /**
-150   * Progress towards the batch limit has 
been made. Increment internal tracking of batch progress
-151   */
-152  void incrementBatchProgress(int batch) 
{
-153int currentBatch = 
progress.getBatch();
-154progress.setBatch(currentBatch + 
batch);
-155  }
-156
-157  /**
-158   * Progress towards the size limit has 
been made. Increment internal tracking of size progress
-159   */
-160  void incrementSizeProgress(long 
dataSize, long heapSize) {
-161long curDataSize = 
progress.getDataSize();
-162progress.setDataSize(curDataSize + 
dataSize);
-163long curHeapSize = 
progress.getHeapSize();
-164progress.setHeapSize(curHeapSize + 
heapSize);
-165  }
-166
-167  int getBatchProgress() {
-168return progress.getBatch();
-169  }
-170
-171  long getDataSizeProgress() {
-172return progress.getDataSize();
-173  }
-174
-175  long getHeapSizeProgress() {
-176return progress.getHeapSize();
-177  }
-178
-179  void setProgress(int batchProgress, 
long sizeProgress, long heapSizeProgress) {
-180setBatchProgress(batchProgress);
-181setSizeProgress(sizeProgress, 
heapSizeProgress);
-182  }
-183
-184  void setSizeProgress(long 
dataSizeProgress, long heapSizeProgress) {
-185
progress.setDataSize(dataSizeProgress);
-186
progress.setHeapSize(heapSizeProgress);
-187  }
-188
-189  void setBatchProgress(int 
batchProgress) {
-190progress.setBatch(batchProgress);
-191  }
-192
-193  /**
-194   * Clear away any progress that has 
been made so far. All progress fields are reset to initial
-195   * values
-196   */
-197  void clearProgress() {
-198progress.setFields(0, 0, 0);
-199  }
-200
-201  /**
-202   * Note that this is not a typical 
setter. This setter returns the {@link NextState} that was
-203   * passed in so that methods can be 
invoked against the new state. Furthermore, this pattern
-204   * allows the {@link 
NoLimitScannerContext} to cleanly override this setter and simply return the
-205   * new state, thus preserving the 
immutability of {@link NoLimitScannerC

[16/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.LimitScope.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.LimitScope.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.LimitScope.html
index cabb570..90f3b1e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.LimitScope.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.LimitScope.html
@@ -107,660 +107,661 @@
 099
 100  private Cell lastPeekedCell = null;
 101
-102  /**
-103   * Tracks the relevant server side 
metrics during scans. null when metrics should not be tracked
-104   */
-105  final ServerSideScanMetrics metrics;
-106
-107  ScannerContext(boolean keepProgress, 
LimitFields limitsToCopy, boolean trackMetrics) {
-108this.limits = new LimitFields();
-109if (limitsToCopy != null) {
-110  this.limits.copy(limitsToCopy);
-111}
+102  // Set this to true will have the same 
behavior with reaching the time limit.
+103  // This is used when you want to make 
the current RSRpcService.scan returns immediately. For
+104  // example, when we want to switch from 
pread to stream, we can only do it after the rpc call is
+105  // returned.
+106  private boolean returnImmediately;
+107
+108  /**
+109   * Tracks the relevant server side 
metrics during scans. null when metrics should not be tracked
+110   */
+111  final ServerSideScanMetrics metrics;
 112
-113// Progress fields are initialized to 
0
-114progress = new ProgressFields(0, 0, 
0);
-115
-116this.keepProgress = keepProgress;
-117this.scannerState = DEFAULT_STATE;
-118this.metrics = trackMetrics ? new 
ServerSideScanMetrics() : null;
-119  }
-120
-121  boolean isTrackingMetrics() {
-122return this.metrics != null;
-123  }
-124
-125  /**
-126   * Get the metrics instance. Should 
only be called after a call to {@link #isTrackingMetrics()}
-127   * has been made to confirm that 
metrics are indeed being tracked.
-128   * @return {@link 
ServerSideScanMetrics} instance that is tracking metrics for this scan
-129   */
-130  ServerSideScanMetrics getMetrics() {
-131assert isTrackingMetrics();
-132return this.metrics;
-133  }
-134
-135  /**
-136   * @return true if the progress tracked 
so far in this instance will be considered during an
-137   * invocation of {@link 
InternalScanner#next(java.util.List)} or
-138   * {@link 
RegionScanner#next(java.util.List)}. false when the progress tracked so far
-139   * should not be considered and 
should instead be wiped away via {@link #clearProgress()}
-140   */
-141  boolean getKeepProgress() {
-142return keepProgress;
-143  }
-144
-145  void setKeepProgress(boolean 
keepProgress) {
-146this.keepProgress = keepProgress;
-147  }
-148
-149  /**
-150   * Progress towards the batch limit has 
been made. Increment internal tracking of batch progress
-151   */
-152  void incrementBatchProgress(int batch) 
{
-153int currentBatch = 
progress.getBatch();
-154progress.setBatch(currentBatch + 
batch);
-155  }
-156
-157  /**
-158   * Progress towards the size limit has 
been made. Increment internal tracking of size progress
-159   */
-160  void incrementSizeProgress(long 
dataSize, long heapSize) {
-161long curDataSize = 
progress.getDataSize();
-162progress.setDataSize(curDataSize + 
dataSize);
-163long curHeapSize = 
progress.getHeapSize();
-164progress.setHeapSize(curHeapSize + 
heapSize);
-165  }
-166
-167  int getBatchProgress() {
-168return progress.getBatch();
-169  }
-170
-171  long getDataSizeProgress() {
-172return progress.getDataSize();
-173  }
-174
-175  long getHeapSizeProgress() {
-176return progress.getHeapSize();
-177  }
-178
-179  void setProgress(int batchProgress, 
long sizeProgress, long heapSizeProgress) {
-180setBatchProgress(batchProgress);
-181setSizeProgress(sizeProgress, 
heapSizeProgress);
-182  }
-183
-184  void setSizeProgress(long 
dataSizeProgress, long heapSizeProgress) {
-185
progress.setDataSize(dataSizeProgress);
-186
progress.setHeapSize(heapSizeProgress);
-187  }
-188
-189  void setBatchProgress(int 
batchProgress) {
-190progress.setBatch(batchProgress);
-191  }
-192
-193  /**
-194   * Clear away any progress that has 
been made so far. All progress fields are reset to initial
-195   * values
-196   */
-197  void clearProgress() {
-198progress.setFields(0, 0, 0);
-199  }
-200
-201  /**
-202   * Note that this is not a typical 
setter. This setter returns the {@link NextState} that was
-203   * passed in so that methods can be 
invoked against the new state. Furthermore, this pattern
-204   * allows the {@link 
NoLimitScannerContext} to cleanly override this setter and simply return the
-205   * new state, thu

[08/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.html
 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.html
index 224dcde..2217fe3 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = {"i0":9,"i1":9,"i2":10};
+var methods = {"i0":9,"i1":9,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -50,7 +50,7 @@ var activeTableTab = "activeTableTab";
 
 
 Prev Class
-Next Class
+Next Class
 
 
 Frames
@@ -74,7 +74,7 @@ var activeTableTab = "activeTableTab";
 
 
 Summary: 
-Nested | 
+Nested | 
 Field | 
 Constr | 
 Method
@@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class TestSwitchToStreamRead
+public class TestSwitchToStreamRead
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 
 
@@ -117,6 +117,37 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 
+
+
+
+
+
+Nested Class Summary
+
+Nested Classes 
+
+Modifier and Type
+Class and Description
+
+
+static class 
+TestSwitchToStreamRead.MatchLastRowCellNextColFilter 
+
+
+static class 
+TestSwitchToStreamRead.MatchLastRowCellNextRowFilter 
+
+
+static class 
+TestSwitchToStreamRead.MatchLastRowFilterRowFilter 
+
+
+static class 
+TestSwitchToStreamRead.MatchLastRowKeyFilter 
+
+
+
+
 
 
 
@@ -201,6 +232,26 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 void
 test() 
 
+
+private void
+testFilter(org.apache.hadoop.hbase.filter.Filter filter) 
+
+
+void
+testFilterCellNextCol() 
+
+
+void
+testFilterCellNextRow() 
+
+
+void
+testFilterRow() 
+
+
+void
+testFilterRowKey() 
+
 
 
 
@@ -229,7 +280,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 CLASS_RULE
-public static final HBaseClassTestRule CLASS_RULE
+public static final HBaseClassTestRule CLASS_RULE
 
 
 
@@ -238,7 +289,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 UTIL
-private static final HBaseTestingUtility UTIL
+private static final HBaseTestingUtility UTIL
 
 
 
@@ -247,7 +298,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 TABLE_NAME
-private static org.apache.hadoop.hbase.TableName TABLE_NAME
+private static org.apache.hadoop.hbase.TableName TABLE_NAME
 
 
 
@@ -256,7 +307,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 FAMILY
-private static byte[] FAMILY
+private static byte[] FAMILY
 
 
 
@@ -265,7 +316,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 QUAL
-private static byte[] QUAL
+private static byte[] QUAL
 
 
 
@@ -274,7 +325,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 VALUE_PREFIX
-private static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String VALUE_PREFIX
+private static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String VALUE_PREFIX
 
 
 
@@ -283,7 +334,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 REGION
-private static org.apache.hadoop.hbase.regionserver.HRegion REGION
+private static org.apache.hadoop.hbase.regionserver.HRegion REGION
 
 
 
@@ -300,7 +351,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 TestSwitchToStreamRead
-public TestSwitchToStreamRead()
+public TestSwitchToStreamRead()
 
 
 
@@ -317,7 +368,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 setUp
-public static void setUp()
+public static void setUp()
   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 
 Throws:
@@ -331,7 +382,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 tearDown
-public static void tearDown()
+public static void tearDown()
  throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 
 Throws:
@@ -342,10 +393,10 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 
-
+
 
 test
-public void test()
+public void test()
   throws https://

[03/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.html
index dd58cd9..01157c1 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.html
@@ -27,394 +27,433 @@
 019
 020import static 
org.junit.Assert.assertEquals;
 021import static 
org.junit.Assert.assertFalse;
-022import static 
org.junit.Assert.assertTrue;
-023
-024import java.io.FileNotFoundException;
-025import java.io.IOException;
-026import java.util.ArrayList;
-027import java.util.Arrays;
-028import java.util.List;
-029import java.util.Map;
-030import java.util.Random;
-031import java.util.Set;
-032import 
java.util.concurrent.locks.ReentrantReadWriteLock;
-033import 
org.apache.hadoop.conf.Configuration;
-034import org.apache.hadoop.fs.Path;
-035import 
org.apache.hadoop.hbase.HBaseClassTestRule;
-036import 
org.apache.hadoop.hbase.HBaseConfiguration;
-037import 
org.apache.hadoop.hbase.HBaseTestingUtility;
-038import 
org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
-039import 
org.apache.hadoop.hbase.io.hfile.CacheTestUtils;
-040import 
org.apache.hadoop.hbase.io.hfile.CacheTestUtils.HFileBlockPair;
-041import 
org.apache.hadoop.hbase.io.hfile.Cacheable;
-042import 
org.apache.hadoop.hbase.io.hfile.bucket.BucketAllocator.BucketSizeInfo;
-043import 
org.apache.hadoop.hbase.io.hfile.bucket.BucketAllocator.IndexStatistics;
-044import 
org.apache.hadoop.hbase.testclassification.IOTests;
-045import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-046import org.junit.After;
-047import org.junit.Before;
-048import org.junit.ClassRule;
-049import org.junit.Test;
-050import 
org.junit.experimental.categories.Category;
-051import org.junit.runner.RunWith;
-052import org.junit.runners.Parameterized;
-053
-054import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap;
-055
-056/**
-057 * Basic test of BucketCache.Puts and 
gets.
-058 * 

-059 * Tests will ensure that blocks' data correctness under several threads concurrency -060 */ -061@RunWith(Parameterized.class) -062@Category({ IOTests.class, MediumTests.class }) -063public class TestBucketCache { -064 -065 @ClassRule -066 public static final HBaseClassTestRule CLASS_RULE = -067 HBaseClassTestRule.forClass(TestBucketCache.class); -068 -069 private static final Random RAND = new Random(); -070 -071 @Parameterized.Parameters(name = "{index}: blockSize={0}, bucketSizes={1}") -072 public static Iterable data() { -073return Arrays.asList(new Object[][] { -074{ 8192, null }, // TODO: why is 8k the default blocksize for these tests? -075{ -07616 * 1024, -077new int[] { 2 * 1024 + 1024, 4 * 1024 + 1024, 8 * 1024 + 1024, 16 * 1024 + 1024, -07828 * 1024 + 1024, 32 * 1024 + 1024, 64 * 1024 + 1024, 96 * 1024 + 1024, -079128 * 1024 + 1024 } } }); -080 } -081 -082 @Parameterized.Parameter(0) -083 public int constructedBlockSize; -084 -085 @Parameterized.Parameter(1) -086 public int[] constructedBlockSizes; -087 -088 BucketCache cache; -089 final int CACHE_SIZE = 100; -090 final int NUM_BLOCKS = 100; -091 final int BLOCK_SIZE = CACHE_SIZE / NUM_BLOCKS; -092 final int NUM_THREADS = 100; -093 final int NUM_QUERIES = 1; +022import static org.junit.Assert.assertNull; +023import static org.junit.Assert.assertTrue; +024 +025import java.io.FileNotFoundException; +026import java.io.IOException; +027import java.nio.ByteBuffer; +028import java.util.ArrayList; +029import java.util.Arrays; +030import java.util.List; +031import java.util.Map; +032import java.util.Random; +033import java.util.Set; +034import java.util.concurrent.locks.ReentrantReadWriteLock; +035import org.apache.hadoop.conf.Configuration; +036import org.apache.hadoop.fs.Path; +037import org.apache.hadoop.hbase.HBaseClassTestRule; +038import org.apache.hadoop.hbase.HBaseConfiguration; +039import org.apache.hadoop.hbase.HBaseTestingUtility; +040import org.apache.hadoop.hbase.HConstants; +041import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; +042import org.apache.hadoop.hbase.io.hfile.BlockType; +043import org.apache.hadoop.hbase.io.hfile.CacheTestUtils; +044import org.apache.hadoop.hbase.io.hfile.CacheTestUtils.HFileBlockPair; +045import org.apache.hadoop.hbase.io.hfile.Cacheable; +046import org.apache.hadoop.hbase.io.hfile.HFileBlock; +047import org.apache.hadoop.hbase.io.hfile.HFileContext; +048import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; +049import org.apache.hadoop.hbase.io.hfile.bucket.BucketAlloca


[01/50] hbase-site git commit: Published site at 26babcf013de696b899d76a3c39434b794440d8d.

2018-05-15 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 58454ffaa -> da4482ac2


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/da4482ac/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.MatchLastRowKeyFilter.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.MatchLastRowKeyFilter.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.MatchLastRowKeyFilter.html
new file mode 100644
index 000..6f38b3f
--- /dev/null
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.MatchLastRowKeyFilter.html
@@ -0,0 +1,334 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+Source code
+
+
+
+
+001/**
+002 * Licensed to the Apache Software 
Foundation (ASF) under one
+003 * or more contributor license 
agreements.  See the NOTICE file
+004 * distributed with this work for 
additional information
+005 * regarding copyright ownership.  The 
ASF licenses this file
+006 * to you under the Apache License, 
Version 2.0 (the
+007 * "License"); you may not use this file 
except in compliance
+008 * with the License.  You may obtain a 
copy of the License at
+009 *
+010 * 
http://www.apache.org/licenses/LICENSE-2.0
+011 *
+012 * Unless required by applicable law or 
agreed to in writing, software
+013 * distributed under the License is 
distributed on an "AS IS" BASIS,
+014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+015 * See the License for the specific 
language governing permissions and
+016 * limitations under the License.
+017 */
+018package 
org.apache.hadoop.hbase.regionserver;
+019
+020import static 
org.junit.Assert.assertEquals;
+021import static 
org.junit.Assert.assertFalse;
+022import static 
org.junit.Assert.assertTrue;
+023
+024import java.io.IOException;
+025import java.util.ArrayList;
+026import java.util.List;
+027import 
java.util.concurrent.ThreadLocalRandom;
+028import org.apache.hadoop.hbase.Cell;
+029import 
org.apache.hadoop.hbase.HBaseClassTestRule;
+030import 
org.apache.hadoop.hbase.HBaseTestingUtility;
+031import 
org.apache.hadoop.hbase.TableName;
+032import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+033import 
org.apache.hadoop.hbase.client.Put;
+034import 
org.apache.hadoop.hbase.client.Result;
+035import 
org.apache.hadoop.hbase.client.Scan;
+036import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+037import 
org.apache.hadoop.hbase.filter.Filter;
+038import 
org.apache.hadoop.hbase.filter.FilterBase;
+039import 
org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl;
+040import 
org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope;
+041import 
org.apache.hadoop.hbase.testclassification.MediumTests;
+042import 
org.apache.hadoop.hbase.testclassification.RegionServerTests;
+043import 
org.apache.hadoop.hbase.util.Bytes;
+044import org.junit.AfterClass;
+045import org.junit.BeforeClass;
+046import org.junit.ClassRule;
+047import org.junit.Ignore;
+048import org.junit.Test;
+049import 
org.junit.experimental.categories.Category;
+050
+051@Category({ RegionServerTests.class, 
MediumTests.class })
+052public class TestSwitchToStreamRead {
+053
+054  @ClassRule
+055  public static final HBaseClassTestRule 
CLASS_RULE =
+056
HBaseClassTestRule.forClass(TestSwitchToStreamRead.class);
+057
+058  private static final 
HBaseTestingUtility UTIL = new HBaseTestingUtility();
+059
+060  private static TableName TABLE_NAME = 
TableName.valueOf("stream");
+061
+062  private static byte[] FAMILY = 
Bytes.toBytes("cf");
+063
+064  private static byte[] QUAL = 
Bytes.toBytes("cq");
+065
+066  private static String VALUE_PREFIX;
+067
+068  private static HRegion REGION;
+069
+070  @BeforeClass
+071  public static void setUp() throws 
IOException {
+072
UTIL.getConfiguration().setLong(StoreScanner.STORESCANNER_PREAD_MAX_BYTES, 
2048);
+073StringBuilder sb = new 
StringBuilder(256);
+074for (int i = 0; i < 255; i++) {
+075  sb.append((char) 
ThreadLocalRandom.current().nextInt('A', 'z' + 1));
+076}
+077VALUE_PREFIX = 
sb.append("-").toString();
+078REGION = UTIL.createLocalHRegion(
+079  
TableDescriptorBuilder.newBuilder(TABLE_NAME)
+080.setColumnFamily(
+081  
ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setBlocksize(1024).build())
+082.build(),
+083  null, null);
+084for (int i = 0; i < 900; i++) {
+085  REGION
+086.put(new 
Put(Bytes.toBytes(i)).addColumn(FAMILY, QUAL, Bytes.toBytes(VALUE_PREFIX + 
i)));
+087}
+088REGION.flush(true);
+089for (int i = 900; i < 1000; i++) 
{
+090  REGION
+091.put(new 
Put(Bytes.toBytes(i)).addColumn(FAMILY, QUAL, Bytes.toBytes(VALUE_PREFIX + 
i)));
+092}
+093  }
+094
+095  @AfterClass
+096  public static void tearDown() throws 
IOException {
+097 

hbase git commit: HBASE-20457 Return immediately for a scan rpc call when we want to switch from pread to stream

2018-05-15 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/branch-2 b7def9b69 -> 60b8344cf


HBASE-20457 Return immediately for a scan rpc call when we want to switch from 
pread to stream


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/60b8344c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/60b8344c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/60b8344c

Branch: refs/heads/branch-2
Commit: 60b8344cf1ae5d481b38b85696cb6ed3c7f1260d
Parents: b7def9b
Author: zhangduo 
Authored: Thu Apr 26 17:54:13 2018 +0800
Committer: zhangduo 
Committed: Tue May 15 21:09:04 2018 +0800

--
 .../RpcRetryingCallerWithReadReplicas.java  |   1 +
 .../hbase/regionserver/ScannerContext.java  |  27 ++--
 .../hadoop/hbase/regionserver/StoreScanner.java |  17 ++-
 .../regionserver/TestSwitchToStreamRead.java| 141 +--
 4 files changed, 164 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/60b8344c/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
index 4a31cff..a0be0bf 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
@@ -279,6 +279,7 @@ public class RpcRetryingCallerWithReadReplicas {
   throws RetriesExhaustedException, DoNotRetryIOException {
 Throwable t = e.getCause();
 assert t != null; // That's what ExecutionException is about: holding an 
exception
+t.printStackTrace();
 
 if (t instanceof RetriesExhaustedException) {
   throw (RetriesExhaustedException) t;

http://git-wip-us.apache.org/repos/asf/hbase/blob/60b8344c/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
index 9b19a40..4c5923b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
@@ -99,6 +99,12 @@ public class ScannerContext {
 
   private Cell lastPeekedCell = null;
 
+  // Set this to true will have the same behavior with reaching the time limit.
+  // This is used when you want to make the current RSRpcService.scan returns 
immediately. For
+  // example, when we want to switch from pread to stream, we can only do it 
after the rpc call is
+  // returned.
+  private boolean returnImmediately;
+
   /**
* Tracks the relevant server side metrics during scans. null when metrics 
should not be tracked
*/
@@ -278,7 +284,8 @@ public class ScannerContext {
* @return true if the time limit can be enforced in the checker's scope
*/
   boolean hasTimeLimit(LimitScope checkerScope) {
-return limits.canEnforceTimeLimitFromScope(checkerScope) && 
limits.getTime() > 0;
+return limits.canEnforceTimeLimitFromScope(checkerScope) &&
+  (limits.getTime() > 0 || returnImmediately);
   }
 
   /**
@@ -338,7 +345,8 @@ public class ScannerContext {
* @return true when the limit is enforceable from the checker's scope and 
it has been reached
*/
   boolean checkTimeLimit(LimitScope checkerScope) {
-return hasTimeLimit(checkerScope) && (System.currentTimeMillis() >= 
limits.getTime());
+return hasTimeLimit(checkerScope) &&
+  (returnImmediately || System.currentTimeMillis() >= limits.getTime());
   }
 
   /**
@@ -358,6 +366,10 @@ public class ScannerContext {
 this.lastPeekedCell = lastPeekedCell;
   }
 
+  void returnImmediately() {
+this.returnImmediately = true;
+  }
+
   @Override
   public String toString() {
 StringBuilder sb = new StringBuilder();
@@ -570,11 +582,6 @@ public class ScannerContext {
 LimitFields() {
 }
 
-LimitFields(int batch, LimitScope sizeScope, long size, long heapSize, 
LimitScope timeScope,
-long time) {
-  setFields(batch, sizeScope, size, heapSize, timeScope, time);
-}
-
 void copy(LimitFields limitsToCopy) {
   if (limitsToCopy != null) {
 setFields(limitsToCopy.getBatch(), limitsToCopy.getSizeScope(), 
limitsToCopy.getDataSize(),
@@ -722,12 +729,6 @@ public class ScannerContext {
 // such AND data cells of Cells which are in on heap area.
 lo

hbase git commit: HBASE-20457 Return immediately for a scan rpc call when we want to switch from pread to stream

2018-05-15 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/master d2daada97 -> 26babcf01


HBASE-20457 Return immediately for a scan rpc call when we want to switch from 
pread to stream


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/26babcf0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/26babcf0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/26babcf0

Branch: refs/heads/master
Commit: 26babcf013de696b899d76a3c39434b794440d8d
Parents: d2daada
Author: zhangduo 
Authored: Thu Apr 26 17:54:13 2018 +0800
Committer: zhangduo 
Committed: Tue May 15 20:56:20 2018 +0800

--
 .../RpcRetryingCallerWithReadReplicas.java  |   1 +
 .../hbase/regionserver/ScannerContext.java  |  27 ++--
 .../hadoop/hbase/regionserver/StoreScanner.java |  17 ++-
 .../regionserver/TestSwitchToStreamRead.java| 141 +--
 4 files changed, 164 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/26babcf0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
index 4a31cff..a0be0bf 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
@@ -279,6 +279,7 @@ public class RpcRetryingCallerWithReadReplicas {
   throws RetriesExhaustedException, DoNotRetryIOException {
 Throwable t = e.getCause();
 assert t != null; // That's what ExecutionException is about: holding an 
exception
+t.printStackTrace();
 
 if (t instanceof RetriesExhaustedException) {
   throw (RetriesExhaustedException) t;

http://git-wip-us.apache.org/repos/asf/hbase/blob/26babcf0/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
index 10f9b24..cc6ec84 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
@@ -99,6 +99,12 @@ public class ScannerContext {
 
   private Cell lastPeekedCell = null;
 
+  // Set this to true will have the same behavior with reaching the time limit.
+  // This is used when you want to make the current RSRpcService.scan returns 
immediately. For
+  // example, when we want to switch from pread to stream, we can only do it 
after the rpc call is
+  // returned.
+  private boolean returnImmediately;
+
   /**
* Tracks the relevant server side metrics during scans. null when metrics 
should not be tracked
*/
@@ -247,7 +253,8 @@ public class ScannerContext {
* @return true if the time limit can be enforced in the checker's scope
*/
   boolean hasTimeLimit(LimitScope checkerScope) {
-return limits.canEnforceTimeLimitFromScope(checkerScope) && 
limits.getTime() > 0;
+return limits.canEnforceTimeLimitFromScope(checkerScope) &&
+  (limits.getTime() > 0 || returnImmediately);
   }
 
   /**
@@ -307,7 +314,8 @@ public class ScannerContext {
* @return true when the limit is enforceable from the checker's scope and 
it has been reached
*/
   boolean checkTimeLimit(LimitScope checkerScope) {
-return hasTimeLimit(checkerScope) && (System.currentTimeMillis() >= 
limits.getTime());
+return hasTimeLimit(checkerScope) &&
+  (returnImmediately || System.currentTimeMillis() >= limits.getTime());
   }
 
   /**
@@ -327,6 +335,10 @@ public class ScannerContext {
 this.lastPeekedCell = lastPeekedCell;
   }
 
+  void returnImmediately() {
+this.returnImmediately = true;
+  }
+
   @Override
   public String toString() {
 StringBuilder sb = new StringBuilder();
@@ -539,11 +551,6 @@ public class ScannerContext {
 LimitFields() {
 }
 
-LimitFields(int batch, LimitScope sizeScope, long size, long heapSize, 
LimitScope timeScope,
-long time) {
-  setFields(batch, sizeScope, size, heapSize, timeScope, time);
-}
-
 void copy(LimitFields limitsToCopy) {
   if (limitsToCopy != null) {
 setFields(limitsToCopy.getBatch(), limitsToCopy.getSizeScope(), 
limitsToCopy.getDataSize(),
@@ -691,12 +698,6 @@ public class ScannerContext {
 // such AND data cells of Cells which are in on heap area.
 long h

[hbase] Git Push Summary [forced push!] [Forced Update!]

2018-05-15 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/master 5d5c2d204 -> d2daada97 (forced update)


hbase git commit: HBASE-20576 Check remote WAL directory when creating peer and transiting peer to A

2018-05-15 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/HBASE-19064 774e6b6c7 -> 5d5c2d204


HBASE-20576 Check remote WAL directory when creating peer and transiting peer 
to A


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5d5c2d20
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5d5c2d20
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5d5c2d20

Branch: refs/heads/HBASE-19064
Commit: 5d5c2d204b1c4d42cf126e68ba1473f3fc90faec
Parents: 774e6b6
Author: zhangduo 
Authored: Tue May 15 15:07:40 2018 +0800
Committer: zhangduo 
Committed: Tue May 15 18:36:42 2018 +0800

--
 .../replication/ReplicationPeerManager.java | 19 +++--
 ...ransitPeerSyncReplicationStateProcedure.java | 73 +---
 .../replication/TestReplicationAdmin.java   | 57 ---
 3 files changed, 110 insertions(+), 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5d5c2d20/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
index e1d8b51..8e49137 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hbase.master.replication;
 
 import java.io.IOException;
+import java.net.URI;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.EnumSet;
@@ -31,6 +32,7 @@ import java.util.regex.Pattern;
 import java.util.stream.Collectors;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
@@ -45,7 +47,6 @@ import 
org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
 import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
 import org.apache.hadoop.hbase.replication.ReplicationUtils;
 import org.apache.hadoop.hbase.replication.SyncReplicationState;
-import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.zookeeper.ZKConfig;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -193,9 +194,9 @@ public class ReplicationPeerManager {
   }
 
   /**
-   * @return the old state, and whether the peer is enabled.
+   * @return the old desciption of the peer
*/
-  Pair 
preTransitPeerSyncReplicationState(String peerId,
+  ReplicationPeerDescription preTransitPeerSyncReplicationState(String peerId,
   SyncReplicationState state) throws DoNotRetryIOException {
 ReplicationPeerDescription desc = checkPeerExists(peerId);
 SyncReplicationState fromState = desc.getSyncReplicationState();
@@ -204,7 +205,7 @@ public class ReplicationPeerManager {
   throw new DoNotRetryIOException("Can not transit current cluster state 
from " + fromState +
 " to " + state + " for peer id=" + peerId);
 }
-return Pair.newPair(fromState, desc.isEnabled());
+return desc;
   }
 
   public void addPeer(String peerId, ReplicationPeerConfig peerConfig, boolean 
enabled)
@@ -384,6 +385,16 @@ public class ReplicationPeerManager {
   "Only support replicated table config for sync replication peer");
   }
 }
+Path remoteWALDir = new Path(peerConfig.getRemoteWALDir());
+if (!remoteWALDir.isAbsolute()) {
+  throw new DoNotRetryIOException(
+"The remote WAL directory " + peerConfig.getRemoteWALDir() + " is not 
absolute");
+}
+URI remoteWALDirUri = remoteWALDir.toUri();
+if (remoteWALDirUri.getScheme() == null || remoteWALDirUri.getAuthority() 
== null) {
+  throw new DoNotRetryIOException("The remote WAL directory " + 
peerConfig.getRemoteWALDir() +
+" is not qualified, you must provide scheme and authority");
+}
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/5d5c2d20/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
index 0175296..ebe7a93 100644
--- 
a/hbase-server/src/main/java/org/apach

[07/27] hbase git commit: HBASE-19747 Introduce a special WALProvider for synchronous replication

2018-05-15 Thread zhangduo
HBASE-19747 Introduce a special WALProvider for synchronous replication


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f0e3bec5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f0e3bec5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f0e3bec5

Branch: refs/heads/master
Commit: f0e3bec53138fa622673af63f474d4d148bb507c
Parents: 5f28505
Author: zhangduo 
Authored: Fri Jan 19 18:38:39 2018 +0800
Committer: zhangduo 
Committed: Tue May 15 08:39:04 2018 +0800

--
 .../hbase/regionserver/wal/AbstractFSWAL.java   |   7 +
 .../hbase/regionserver/wal/AsyncFSWAL.java  |   1 -
 .../hbase/regionserver/wal/DualAsyncFSWAL.java  |   4 +-
 .../hadoop/hbase/regionserver/wal/FSHLog.java   |   4 -
 .../regionserver/PeerActionListener.java|  33 +++
 .../SynchronousReplicationPeerProvider.java |  35 +++
 .../hadoop/hbase/wal/AbstractFSWALProvider.java |   1 +
 .../hadoop/hbase/wal/AsyncFSWALProvider.java|  18 +-
 .../hbase/wal/NettyAsyncFSWALConfigHelper.java  |   8 +-
 .../hbase/wal/RegionGroupingProvider.java   |  13 +-
 .../wal/SynchronousReplicationWALProvider.java  | 225 +++
 .../org/apache/hadoop/hbase/wal/WALFactory.java |  37 ++-
 .../org/apache/hadoop/hbase/wal/WALKeyImpl.java |  16 +-
 .../regionserver/TestCompactionPolicy.java  |   1 +
 .../regionserver/TestFailedAppendAndSync.java   | 122 +-
 .../hadoop/hbase/regionserver/TestHRegion.java  |  24 +-
 .../TestHRegionWithInMemoryFlush.java   |   7 -
 .../hbase/regionserver/TestRegionIncrement.java |  20 +-
 .../hbase/regionserver/TestWALLockup.java   |   1 +
 .../regionserver/wal/AbstractTestWALReplay.java |   1 +
 .../regionserver/wal/ProtobufLogTestHelper.java |  44 +++-
 .../hbase/regionserver/wal/TestAsyncFSWAL.java  |  13 +-
 .../regionserver/wal/TestAsyncWALReplay.java|   4 +-
 .../wal/TestCombinedAsyncWriter.java|   3 +-
 .../hbase/regionserver/wal/TestFSHLog.java  |  15 +-
 .../hbase/regionserver/wal/TestWALReplay.java   |   1 +
 .../apache/hadoop/hbase/wal/IOTestProvider.java |   2 -
 .../TestSynchronousReplicationWALProvider.java  | 153 +
 28 files changed, 659 insertions(+), 154 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f0e3bec5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
index ce8dafa..4816d77 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
@@ -430,6 +430,13 @@ public abstract class AbstractFSWAL 
implements WAL {
 this.implClassName = getClass().getSimpleName();
   }
 
+  /**
+   * Used to initialize the WAL. Usually just call rollWriter to create the 
first log writer.
+   */
+  public void init() throws IOException {
+rollWriter();
+  }
+
   @Override
   public void registerWALActionsListener(WALActionsListener listener) {
 this.listeners.add(listener);

http://git-wip-us.apache.org/repos/asf/hbase/blob/f0e3bec5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
index 0bee9d6..17133ed 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
@@ -248,7 +248,6 @@ public class AsyncFSWAL extends AbstractFSWAL {
 batchSize = conf.getLong(WAL_BATCH_SIZE, DEFAULT_WAL_BATCH_SIZE);
 waitOnShutdownInSeconds = 
conf.getInt(ASYNC_WAL_WAIT_ON_SHUTDOWN_IN_SECONDS,
   DEFAULT_ASYNC_WAL_WAIT_ON_SHUTDOWN_IN_SECONDS);
-rollWriter();
   }
 
   private static boolean waitingRoll(int epochAndState) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/f0e3bec5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java
index 42b0dae..0495337 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java
+++ 
b/hbase-server/s

[14/27] hbase git commit: HBASE-19957 General framework to transit sync replication state

2018-05-15 Thread zhangduo
HBASE-19957 General framework to transit sync replication state


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1c8063d5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1c8063d5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1c8063d5

Branch: refs/heads/master
Commit: 1c8063d500bd5f3fbe736f899c6ebaa122a5947b
Parents: 863ea47
Author: zhangduo 
Authored: Fri Feb 9 18:33:28 2018 +0800
Committer: zhangduo 
Committed: Tue May 15 08:39:04 2018 +0800

--
 .../replication/ReplicationPeerConfig.java  |   2 -
 .../replication/ReplicationPeerDescription.java |   5 +-
 .../hbase/replication/SyncReplicationState.java |  19 +-
 .../org/apache/hadoop/hbase/HConstants.java |   3 +
 .../src/main/protobuf/MasterProcedure.proto |  20 +-
 .../hbase/replication/ReplicationPeerImpl.java  |  45 -
 .../replication/ReplicationPeerStorage.java |  25 ++-
 .../hbase/replication/ReplicationPeers.java |  27 ++-
 .../replication/ZKReplicationPeerStorage.java   |  63 +--
 .../hbase/coprocessor/MasterObserver.java   |   7 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |   4 +-
 .../hbase/master/MasterCoprocessorHost.java |  12 +-
 .../replication/AbstractPeerProcedure.java  |  14 +-
 .../master/replication/ModifyPeerProcedure.java |  11 --
 .../replication/RefreshPeerProcedure.java   |  18 +-
 .../replication/ReplicationPeerManager.java |  89 +
 ...ransitPeerSyncReplicationStateProcedure.java | 181 ---
 .../hbase/regionserver/HRegionServer.java   |  35 ++--
 .../regionserver/ReplicationSourceService.java  |  11 +-
 .../regionserver/PeerActionListener.java|   4 +-
 .../regionserver/PeerProcedureHandler.java  |  16 +-
 .../regionserver/PeerProcedureHandlerImpl.java  |  52 +-
 .../regionserver/RefreshPeerCallable.java   |   7 +
 .../replication/regionserver/Replication.java   |  22 ++-
 .../regionserver/ReplicationSourceManager.java  |  41 +++--
 .../SyncReplicationPeerInfoProvider.java|  43 +
 .../SyncReplicationPeerInfoProviderImpl.java|  71 
 .../SyncReplicationPeerMappingManager.java  |  48 +
 .../SyncReplicationPeerProvider.java|  35 
 .../hbase/wal/SyncReplicationWALProvider.java   |  35 ++--
 .../org/apache/hadoop/hbase/wal/WALFactory.java |  47 ++---
 .../replication/TestReplicationAdmin.java   |   3 +-
 .../TestReplicationSourceManager.java   |   5 +-
 .../wal/TestSyncReplicationWALProvider.java |  36 ++--
 34 files changed, 743 insertions(+), 313 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1c8063d5/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
index 997a155..cc7b4bc 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
@@ -15,7 +15,6 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.hbase.replication;
 
 import java.util.Collection;
@@ -25,7 +24,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.TreeMap;
-
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.util.Bytes;

http://git-wip-us.apache.org/repos/asf/hbase/blob/1c8063d5/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java
index 2d077c5..b0c27bb 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java
@@ -20,7 +20,10 @@ package org.apache.hadoop.hbase.replication;
 import org.apache.yetus.audience.InterfaceAudience;
 
 /**
- * The POJO equivalent of ReplicationProtos.ReplicationPeerDescription
+ * The POJO equivalent of ReplicationProtos.ReplicationPeerDescription.
+ * 
+ * To developer, here we do not store the new sync replication state since it 
is just an
+ * intermediate state and this class is public.
  */
 @InterfaceAudience.Public
 public clas

[04/27] hbase git commit: HBASE-19943 Only allow removing sync replication peer which is in DA state

2018-05-15 Thread zhangduo
HBASE-19943 Only allow removing sync replication peer which is in DA state


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/72a07b44
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/72a07b44
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/72a07b44

Branch: refs/heads/master
Commit: 72a07b4401c6514b5c534c533c2576c0ee36a747
Parents: aafbb8c
Author: huzheng 
Authored: Thu Mar 1 18:34:02 2018 +0800
Committer: zhangduo 
Committed: Tue May 15 08:39:04 2018 +0800

--
 .../replication/ReplicationPeerManager.java | 14 -
 .../hbase/wal/SyncReplicationWALProvider.java   |  2 +-
 .../replication/TestReplicationAdmin.java   | 63 
 .../hbase/replication/TestSyncReplication.java  |  2 +-
 4 files changed, 78 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/72a07b44/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
index 0dc922d..41dd6e3 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
@@ -120,8 +120,20 @@ public class ReplicationPeerManager {
 return desc;
   }
 
+  private void checkPeerInDAStateIfSyncReplication(String peerId) throws 
DoNotRetryIOException {
+ReplicationPeerDescription desc = peers.get(peerId);
+if (desc != null && desc.getPeerConfig().isSyncReplication()
+&& 
!SyncReplicationState.DOWNGRADE_ACTIVE.equals(desc.getSyncReplicationState())) {
+  throw new DoNotRetryIOException("Couldn't remove synchronous replication 
peer with state="
+  + desc.getSyncReplicationState()
+  + ", Transit the synchronous replication state to be 
DOWNGRADE_ACTIVE firstly.");
+}
+  }
+
   ReplicationPeerConfig preRemovePeer(String peerId) throws 
DoNotRetryIOException {
-return checkPeerExists(peerId).getPeerConfig();
+ReplicationPeerDescription pd = checkPeerExists(peerId);
+checkPeerInDAStateIfSyncReplication(peerId);
+return pd.getPeerConfig();
   }
 
   void preEnablePeer(String peerId) throws DoNotRetryIOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/72a07b44/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
index ac4b4cd..282aa21 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
@@ -142,7 +142,7 @@ public class SyncReplicationWALProvider implements 
WALProvider, PeerActionListen
   @Override
   public WAL getWAL(RegionInfo region) throws IOException {
 if (region == null) {
-  return provider.getWAL(region);
+  return provider.getWAL(null);
 }
 Optional> peerIdAndRemoteWALDir =
   peerInfoProvider.getPeerIdAndRemoteWALDir(region);

http://git-wip-us.apache.org/repos/asf/hbase/blob/72a07b44/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
index 0ad476f..486ab51 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
@@ -254,6 +254,62 @@ public class TestReplicationAdmin {
   }
 
   @Test
+  public void testRemovePeerWithNonDAState() throws Exception {
+TableName tableName = TableName.valueOf(name.getMethodName());
+TEST_UTIL.createTable(tableName, Bytes.toBytes("family"));
+ReplicationPeerConfigBuilder builder = ReplicationPeerConfig.newBuilder();
+
+String rootDir = "hdfs://srv1:/hbase";
+builder.setClusterKey(KEY_ONE);
+builder.setRemoteWALDir(rootDir);
+builder.setReplicateAllUserTables(false);
+Map> tableCfs = new HashMap<>();
+tableCfs.put(tableName, new ArrayList<>());
+builder.setTabl

[20/27] hbase git commit: HBASE-20163 Forbid major compaction when standby cluster replay the remote wals

2018-05-15 Thread zhangduo
HBASE-20163 Forbid major compaction when standby cluster replay the remote wals


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0e706ebc
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0e706ebc
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0e706ebc

Branch: refs/heads/master
Commit: 0e706ebcff748195ab2d5365bc89d30b01362f0a
Parents: c788ac6
Author: Guanghao Zhang 
Authored: Thu Apr 12 14:44:25 2018 +0800
Committer: zhangduo 
Committed: Tue May 15 08:39:04 2018 +0800

--
 .../hadoop/hbase/regionserver/HRegion.java  | 18 
 .../hbase/regionserver/HRegionServer.java   |  2 +-
 .../regionserver/RegionServerServices.java  |  5 +++
 .../ForbidMajorCompactionChecker.java   | 44 
 .../hadoop/hbase/MockRegionServerServices.java  |  6 +++
 .../hadoop/hbase/master/MockRegionServer.java   |  6 +++
 6 files changed, 80 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0e706ebc/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index d86565e..6aa4b27 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -144,6 +144,7 @@ import 
org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope;
 import org.apache.hadoop.hbase.regionserver.ScannerContext.NextState;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
 import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
+import 
org.apache.hadoop.hbase.regionserver.compactions.ForbidMajorCompactionChecker;
 import 
org.apache.hadoop.hbase.regionserver.throttle.CompactionThroughputControllerFactory;
 import 
org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController;
 import org.apache.hadoop.hbase.regionserver.throttle.StoreHotnessProtector;
@@ -1980,6 +1981,14 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 return compact(compaction, store, throughputController, null);
   }
 
+  private boolean shouldForbidMajorCompaction() {
+if (rsServices != null && rsServices.getReplicationSourceService() != 
null) {
+  return 
rsServices.getReplicationSourceService().getSyncReplicationPeerInfoProvider()
+  .checkState(getRegionInfo(), ForbidMajorCompactionChecker.get());
+}
+return false;
+  }
+
   public boolean compact(CompactionContext compaction, HStore store,
   ThroughputController throughputController, User user) throws IOException 
{
 assert compaction != null && compaction.hasSelection();
@@ -1989,6 +1998,15 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   store.cancelRequestedCompaction(compaction);
   return false;
 }
+
+if (compaction.getRequest().isAllFiles() && shouldForbidMajorCompaction()) 
{
+  LOG.warn("Skipping major compaction on " + this
+  + " because this cluster is transiting sync replication state"
+  + " from STANDBY to DOWNGRADE_ACTIVE");
+  store.cancelRequestedCompaction(compaction);
+  return false;
+}
+
 MonitoredTask status = null;
 boolean requestNeedsCancellation = true;
 /*

http://git-wip-us.apache.org/repos/asf/hbase/blob/0e706ebc/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index af2f3b5..440a838 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -2472,7 +2472,7 @@ public class HRegionServer extends HasThread implements
* @return Return the object that implements the replication
* source executorService.
*/
-  @VisibleForTesting
+  @Override
   public ReplicationSourceService getReplicationSourceService() {
 return replicationSourceHandler;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/0e706ebc/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
 
b/hbase-server/src/

[15/27] hbase git commit: HBASE-19082 Reject read/write from client but accept write from replication in state S

2018-05-15 Thread zhangduo
HBASE-19082 Reject read/write from client but accept write from replication in 
state S


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2b58708e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2b58708e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2b58708e

Branch: refs/heads/master
Commit: 2b58708ea1de07e04dc27ce5c965154edb4e20e4
Parents: 1c8063d
Author: zhangduo 
Authored: Mon Feb 12 18:20:18 2018 +0800
Committer: zhangduo 
Committed: Tue May 15 08:39:04 2018 +0800

--
 .../org/apache/hadoop/hbase/HConstants.java |   3 -
 .../src/main/protobuf/MasterProcedure.proto |   3 +-
 .../hbase/replication/ReplicationUtils.java |   4 +
 ...ransitPeerSyncReplicationStateProcedure.java |  10 +
 .../hadoop/hbase/regionserver/HRegion.java  |   5 +-
 .../hbase/regionserver/HRegionServer.java   |   2 +-
 .../hbase/regionserver/RSRpcServices.java   |  88 ++--
 .../RejectRequestsFromClientStateChecker.java   |  44 
 .../regionserver/ReplicationSink.java   |  72 ---
 .../SyncReplicationPeerInfoProvider.java|  10 +-
 .../SyncReplicationPeerInfoProviderImpl.java|  19 +-
 .../hbase/wal/SyncReplicationWALProvider.java   |   3 +
 .../org/apache/hadoop/hbase/wal/WALFactory.java |   4 +-
 .../hbase/replication/TestSyncReplication.java  | 200 +++
 .../wal/TestSyncReplicationWALProvider.java |   8 +-
 15 files changed, 401 insertions(+), 74 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2b58708e/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
--
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 522c2cf..9241682 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -1355,9 +1355,6 @@ public final class HConstants {
 
   public static final String NOT_IMPLEMENTED = "Not implemented";
 
-  // TODO: need to find a better place to hold it.
-  public static final String SYNC_REPLICATION_ENABLED = 
"hbase.replication.sync.enabled";
-
   private HConstants() {
 // Can't be instantiated with this ctor.
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/2b58708e/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto 
b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
index 67c1b43..e8b940e 100644
--- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
@@ -397,7 +397,8 @@ enum PeerSyncReplicationStateTransitionState {
   REOPEN_ALL_REGIONS_IN_PEER = 5;
   TRANSIT_PEER_NEW_SYNC_REPLICATION_STATE = 6;
   REFRESH_PEER_SYNC_REPLICATION_STATE_ON_RS_END = 7;
-  POST_PEER_SYNC_REPLICATION_STATE_TRANSITION = 8;
+  CREATE_DIR_FOR_REMOTE_WAL = 8;
+  POST_PEER_SYNC_REPLICATION_STATE_TRANSITION = 9;
 }
 
 message PeerModificationStateData {

http://git-wip-us.apache.org/repos/asf/hbase/blob/2b58708e/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
index e4dea83..d94cb00 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
@@ -37,6 +37,10 @@ import org.apache.yetus.audience.InterfaceAudience;
 @InterfaceAudience.Private
 public final class ReplicationUtils {
 
+  public static final String SYNC_REPLICATION_ENABLED = 
"hbase.replication.sync.enabled";
+
+  public static final String REPLICATION_ATTR_NAME = "__rep__";
+
   private ReplicationUtils() {
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/2b58708e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
index 8fc932f..69404a0 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/

[19/27] hbase git commit: HBASE-19999 Remove the SYNC_REPLICATION_ENABLED flag

2018-05-15 Thread zhangduo
HBASE-1 Remove the SYNC_REPLICATION_ENABLED flag


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d5619c7c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d5619c7c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d5619c7c

Branch: refs/heads/master
Commit: d5619c7cf725fba57df5d4e0d3a13628c19036a7
Parents: c6fffd9
Author: Guanghao Zhang 
Authored: Fri Mar 9 11:30:25 2018 +0800
Committer: zhangduo 
Committed: Tue May 15 08:39:04 2018 +0800

--
 .../hbase/replication/ReplicationUtils.java  |  2 --
 .../hadoop/hbase/regionserver/HRegionServer.java | 13 -
 .../hbase/wal/SyncReplicationWALProvider.java| 19 ++-
 .../org/apache/hadoop/hbase/wal/WALFactory.java  | 18 --
 .../hbase/replication/TestSyncReplication.java   |  1 -
 .../master/TestRecoverStandbyProcedure.java  |  2 --
 .../wal/TestSyncReplicationWALProvider.java  |  2 --
 7 files changed, 38 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d5619c7c/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
index e402d0f..cb22f57 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
@@ -37,8 +37,6 @@ import org.apache.yetus.audience.InterfaceAudience;
 @InterfaceAudience.Private
 public final class ReplicationUtils {
 
-  public static final String SYNC_REPLICATION_ENABLED = 
"hbase.replication.sync.enabled";
-
   public static final String REPLICATION_ATTR_NAME = "__rep__";
 
   public static final String REMOTE_WAL_DIR_NAME = "remoteWALs";

http://git-wip-us.apache.org/repos/asf/hbase/blob/d5619c7c/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 2fb4f67..af2f3b5 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -1804,10 +1804,8 @@ public class HRegionServer extends HasThread implements
   private void setupWALAndReplication() throws IOException {
 boolean isMasterNoTableOrSystemTableOnly = this instanceof HMaster &&
   (!LoadBalancer.isTablesOnMaster(conf) || 
LoadBalancer.isSystemTablesOnlyOnMaster(conf));
-if (isMasterNoTableOrSystemTableOnly) {
-  conf.setBoolean(ReplicationUtils.SYNC_REPLICATION_ENABLED, false);
-}
-WALFactory factory = new WALFactory(conf, serverName.toString());
+WALFactory factory =
+new WALFactory(conf, serverName.toString(), 
!isMasterNoTableOrSystemTableOnly);
 if (!isMasterNoTableOrSystemTableOnly) {
   // TODO Replication make assumptions here based on the default 
filesystem impl
   Path oldLogDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
@@ -1926,11 +1924,8 @@ public class HRegionServer extends HasThread implements
 }
 this.executorService.startExecutorService(ExecutorType.RS_REFRESH_PEER,
   conf.getInt("hbase.regionserver.executor.refresh.peer.threads", 2));
-
-if (conf.getBoolean(ReplicationUtils.SYNC_REPLICATION_ENABLED, false)) {
-  
this.executorService.startExecutorService(ExecutorType.RS_REPLAY_SYNC_REPLICATION_WAL,
-
conf.getInt("hbase.regionserver.executor.replay.sync.replication.wal.threads", 
2));
-}
+
this.executorService.startExecutorService(ExecutorType.RS_REPLAY_SYNC_REPLICATION_WAL,
+  
conf.getInt("hbase.regionserver.executor.replay.sync.replication.wal.threads", 
1));
 
 Threads.setDaemonThreadRunning(this.walRoller.getThread(), getName() + 
".logRoller",
 uncaughtExceptionHandler);

http://git-wip-us.apache.org/repos/asf/hbase/blob/d5619c7c/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
index 282aa21..54287fe 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
+++

[18/27] hbase git commit: HBASE-19079 Support setting up two clusters with A and S stat

2018-05-15 Thread zhangduo
HBASE-19079 Support setting up two clusters with A and S stat


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c788ac6f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c788ac6f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c788ac6f

Branch: refs/heads/master
Commit: c788ac6f86ee1ddae74c769ea83ade4936083fea
Parents: d5619c7
Author: zhangduo 
Authored: Tue Apr 10 22:35:19 2018 +0800
Committer: zhangduo 
Committed: Tue May 15 08:39:04 2018 +0800

--
 .../replication/ReplicationPeerManager.java |   5 +-
 ...ransitPeerSyncReplicationStateProcedure.java |   2 +-
 .../hbase/regionserver/wal/DualAsyncFSWAL.java  |  14 ++
 .../hadoop/hbase/regionserver/wal/WALUtil.java  |  25 ++-
 .../hbase/replication/ChainWALEntryFilter.java  |  28 +--
 .../ReplaySyncReplicationWALCallable.java   |  27 ++-
 .../SyncReplicationPeerInfoProviderImpl.java|   6 +-
 .../hadoop/hbase/wal/AbstractFSWALProvider.java |  10 +-
 .../hbase/wal/SyncReplicationWALProvider.java   |  94 ++---
 .../org/apache/hadoop/hbase/wal/WALEdit.java|   8 +-
 .../org/apache/hadoop/hbase/wal/WALFactory.java |   2 +-
 .../replication/TestReplicationAdmin.java   |  33 +--
 .../regionserver/wal/TestWALDurability.java |   2 +
 .../replication/SyncReplicationTestBase.java| 185 +
 .../hbase/replication/TestSyncReplication.java  | 207 ---
 .../replication/TestSyncReplicationActive.java  |  64 ++
 .../replication/TestSyncReplicationStandBy.java |  96 +
 17 files changed, 521 insertions(+), 287 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c788ac6f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
index 41dd6e3..229549e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
@@ -68,8 +68,9 @@ public class ReplicationPeerManager {
 
   private final ImmutableMap>
 allowedTransition = 
Maps.immutableEnumMap(ImmutableMap.of(SyncReplicationState.ACTIVE,
-  EnumSet.of(SyncReplicationState.DOWNGRADE_ACTIVE), 
SyncReplicationState.STANDBY,
-  EnumSet.of(SyncReplicationState.DOWNGRADE_ACTIVE), 
SyncReplicationState.DOWNGRADE_ACTIVE,
+  EnumSet.of(SyncReplicationState.DOWNGRADE_ACTIVE, 
SyncReplicationState.STANDBY),
+  SyncReplicationState.STANDBY, 
EnumSet.of(SyncReplicationState.DOWNGRADE_ACTIVE),
+  SyncReplicationState.DOWNGRADE_ACTIVE,
   EnumSet.of(SyncReplicationState.STANDBY, SyncReplicationState.ACTIVE)));
 
   ReplicationPeerManager(ReplicationPeerStorage peerStorage, 
ReplicationQueueStorage queueStorage,

http://git-wip-us.apache.org/repos/asf/hbase/blob/c788ac6f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
index cc51890..5da2b0c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
@@ -171,7 +171,7 @@ public class TransitPeerSyncReplicationStateProcedure
 }
 return Flow.HAS_MORE_STATE;
   case REPLAY_REMOTE_WAL_IN_PEER:
-// TODO: replay remote wal when transiting from S to DA.
+addChildProcedure(new RecoverStandbyProcedure(peerId));
 
setNextState(PeerSyncReplicationStateTransitionState.REOPEN_ALL_REGIONS_IN_PEER);
 return Flow.HAS_MORE_STATE;
   case REOPEN_ALL_REGIONS_IN_PEER:

http://git-wip-us.apache.org/repos/asf/hbase/blob/c788ac6f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java
index 0495337..a98567a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWA

[23/27] hbase git commit: HBASE-19990 Create remote wal directory when transitting to state S

2018-05-15 Thread zhangduo
HBASE-19990 Create remote wal directory when transitting to state S


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/aafbb8cb
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/aafbb8cb
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/aafbb8cb

Branch: refs/heads/master
Commit: aafbb8cb248fa6981b1a2f9c13c70d7ffd0ad3dd
Parents: 2b58708
Author: zhangduo 
Authored: Wed Feb 14 16:01:16 2018 +0800
Committer: zhangduo 
Committed: Tue May 15 08:39:04 2018 +0800

--
 .../procedure2/ProcedureYieldException.java |  9 --
 .../hbase/replication/ReplicationUtils.java |  2 ++
 .../hadoop/hbase/master/MasterFileSystem.java   | 19 ++---
 .../master/procedure/MasterProcedureEnv.java|  5 
 ...ransitPeerSyncReplicationStateProcedure.java | 29 
 .../hbase/replication/TestSyncReplication.java  |  8 ++
 6 files changed, 55 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/aafbb8cb/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java
index 0487ac5b..dbb9981 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java
@@ -15,16 +15,21 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.hbase.procedure2;
 
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.yetus.audience.InterfaceStability;
 
-// TODO: Not used yet
+/**
+ * Indicate that a procedure wants to be rescheduled. Usually because there 
are something wrong but
+ * we do not want to fail the procedure.
+ * 
+ * TODO: need to support scheduling after a delay.
+ */
 @InterfaceAudience.Private
 @InterfaceStability.Stable
 public class ProcedureYieldException extends ProcedureException {
+
   /** default constructor */
   public ProcedureYieldException() {
 super();

http://git-wip-us.apache.org/repos/asf/hbase/blob/aafbb8cb/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
index d94cb00..e402d0f 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
@@ -41,6 +41,8 @@ public final class ReplicationUtils {
 
   public static final String REPLICATION_ATTR_NAME = "__rep__";
 
+  public static final String REMOTE_WAL_DIR_NAME = "remoteWALs";
+
   private ReplicationUtils() {
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/aafbb8cb/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
index 864be02..7ccbd71 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.log.HBaseMarkers;
 import org.apache.hadoop.hbase.mob.MobConstants;
 import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
 import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.replication.ReplicationUtils;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSTableDescriptors;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -133,7 +134,6 @@ public class MasterFileSystem {
* Idempotent.
*/
   private void createInitialFileSystemLayout() throws IOException {
-
 final String[] protectedSubDirs = new String[] {
 HConstants.BASE_NAMESPACE_DIR,
 HConstants.HFILE_ARCHIVE_DIRECTORY,
@@ -145,7 +145,8 @@ public class MasterFileSystem {
   HConstants.HREGION_LOGDIR_NAME,
   HConstants.HREGION_OLDLOGDIR_NAME,
   HConstants.CORRUPT_DIR_NAME,
-  WALProcedureStore.MASTER_PROCEDURE_LOGDIR
+  WALProcedureStore.MASTER_PROCEDURE_LOGDIR,
+  Re

[17/27] hbase git commit: HBASE-20434 Also remove remote wals when peer is in DA state

2018-05-15 Thread zhangduo
HBASE-20434 Also remove remote wals when peer is in DA state


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c22cc8be
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c22cc8be
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c22cc8be

Branch: refs/heads/master
Commit: c22cc8be0fb15afb52b71019c4abc714c6276885
Parents: 71be2a8
Author: zhangduo 
Authored: Wed Apr 25 17:12:23 2018 +0800
Committer: zhangduo 
Committed: Tue May 15 08:39:04 2018 +0800

--
 .../hbase/replication/ReplicationUtils.java |   4 +
 ...ransitPeerSyncReplicationStateProcedure.java |   2 +-
 .../regionserver/ReplicationSource.java |   7 +-
 .../regionserver/ReplicationSourceManager.java  |  86 ++--
 .../hadoop/hbase/wal/AbstractFSWALProvider.java |  19 ++--
 .../hbase/wal/SyncReplicationWALProvider.java   |  30 +-
 .../TestSyncReplicationRemoveRemoteWAL.java | 101 +++
 .../TestReplicationSourceManager.java   |  68 -
 8 files changed, 251 insertions(+), 66 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c22cc8be/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
index 66e9b01..069db7a 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
@@ -191,6 +191,10 @@ public final class ReplicationUtils {
 return new Path(remoteWALDir, peerId);
   }
 
+  public static Path getRemoteWALDirForPeer(Path remoteWALDir, String peerId) {
+return new Path(remoteWALDir, peerId);
+  }
+
   /**
* Do the sleeping logic
* @param msg Why we sleep

http://git-wip-us.apache.org/repos/asf/hbase/blob/c22cc8be/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
index 5da2b0c..99fd615 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
@@ -211,7 +211,7 @@ public class TransitPeerSyncReplicationStateProcedure
   case CREATE_DIR_FOR_REMOTE_WAL:
 MasterFileSystem mfs = env.getMasterFileSystem();
 Path remoteWALDir = new Path(mfs.getWALRootDir(), 
ReplicationUtils.REMOTE_WAL_DIR_NAME);
-Path remoteWALDirForPeer = new Path(remoteWALDir, peerId);
+Path remoteWALDirForPeer = 
ReplicationUtils.getRemoteWALDirForPeer(remoteWALDir, peerId);
 FileSystem walFs = mfs.getWALFileSystem();
 try {
   if (walFs.exists(remoteWALDirForPeer)) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/c22cc8be/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
index 1a27fc1..7313f13 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
@@ -549,14 +549,17 @@ public class ReplicationSource implements 
ReplicationSourceInterface {
 }
 
 /**
+ * 
  * Split a path to get the start time
+ * 
+ * 
  * For example: 10.20.20.171%3A60020.1277499063250
+ * 
  * @param p path to split
  * @return start time
  */
 private static long getTS(Path p) {
-  int tsIndex = p.getName().lastIndexOf('.') + 1;
-  return Long.parseLong(p.getName().substring(tsIndex));
+  return AbstractFSWALProvider.getWALStartTimeFromWALName(p.getName());
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/c22cc8be/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
-

  1   2   >