[28/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

2018-06-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
index aa48364..9549aa5 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
@@ -2830,843 +2830,858 @@
 2822   * @return true if master is in 
maintenanceMode
 2823   */
 2824  @Override
-2825  public boolean isInMaintenanceMode() 
{
-2826return 
maintenanceModeTracker.isInMaintenanceMode();
-2827  }
-2828
-2829  @VisibleForTesting
-2830  public void setInitialized(boolean 
isInitialized) {
-2831
procedureExecutor.getEnvironment().setEventReady(initialized, isInitialized);
-2832  }
-2833
-2834  @Override
-2835  public ProcedureEvent 
getInitializedEvent() {
-2836return initialized;
-2837  }
-2838
-2839  /**
-2840   * ServerCrashProcessingEnabled is set 
false before completing assignMeta to prevent processing
-2841   * of crashed servers.
-2842   * @return true if assignMeta has 
completed;
-2843   */
-2844  @Override
-2845  public boolean 
isServerCrashProcessingEnabled() {
-2846return 
serverCrashProcessingEnabled.isReady();
-2847  }
-2848
-2849  @VisibleForTesting
-2850  public void 
setServerCrashProcessingEnabled(final boolean b) {
-2851
procedureExecutor.getEnvironment().setEventReady(serverCrashProcessingEnabled, 
b);
-2852  }
-2853
-2854  public ProcedureEvent 
getServerCrashProcessingEnabledEvent() {
-2855return 
serverCrashProcessingEnabled;
-2856  }
-2857
-2858  /**
-2859   * Compute the average load across all 
region servers.
-2860   * Currently, this uses a very naive 
computation - just uses the number of
-2861   * regions being served, ignoring 
stats about number of requests.
-2862   * @return the average load
-2863   */
-2864  public double getAverageLoad() {
-2865if (this.assignmentManager == null) 
{
-2866  return 0;
-2867}
-2868
-2869RegionStates regionStates = 
this.assignmentManager.getRegionStates();
-2870if (regionStates == null) {
-2871  return 0;
-2872}
-2873return 
regionStates.getAverageLoad();
-2874  }
-2875
-2876  /*
-2877   * @return the count of region split 
plans executed
-2878   */
-2879  public long getSplitPlanCount() {
-2880return splitPlanCount;
-2881  }
-2882
-2883  /*
-2884   * @return the count of region merge 
plans executed
-2885   */
-2886  public long getMergePlanCount() {
-2887return mergePlanCount;
-2888  }
-2889
-2890  @Override
-2891  public boolean registerService(Service 
instance) {
-2892/*
-2893 * No stacking of instances is 
allowed for a single service name
-2894 */
-2895Descriptors.ServiceDescriptor 
serviceDesc = instance.getDescriptorForType();
-2896String serviceName = 
CoprocessorRpcUtils.getServiceName(serviceDesc);
-2897if 
(coprocessorServiceHandlers.containsKey(serviceName)) {
-2898  LOG.error("Coprocessor service 
"+serviceName+
-2899  " already registered, 
rejecting request from "+instance
-2900  );
-2901  return false;
-2902}
-2903
-2904
coprocessorServiceHandlers.put(serviceName, instance);
-2905if (LOG.isDebugEnabled()) {
-2906  LOG.debug("Registered master 
coprocessor service: service="+serviceName);
-2907}
-2908return true;
-2909  }
-2910
-2911  /**
-2912   * Utility for constructing an 
instance of the passed HMaster class.
-2913   * @param masterClass
-2914   * @return HMaster instance.
-2915   */
-2916  public static HMaster 
constructMaster(Class masterClass,
-2917  final Configuration conf)  {
-2918try {
-2919  Constructor c = masterClass.getConstructor(Configuration.class);
-2920  return c.newInstance(conf);
-2921} catch(Exception e) {
-2922  Throwable error = e;
-2923  if (e instanceof 
InvocationTargetException &&
-2924  
((InvocationTargetException)e).getTargetException() != null) {
-2925error = 
((InvocationTargetException)e).getTargetException();
-2926  }
-2927  throw new RuntimeException("Failed 
construction of Master: " + masterClass.toString() + ". "
-2928, error);
-2929}
-2930  }
-2931
-2932  /**
-2933   * @see 
org.apache.hadoop.hbase.master.HMasterCommandLine
-2934   */
-2935  public static void main(String [] 
args) {
-2936LOG.info("STARTING service " + 
HMaster.class.getSimpleName());
-2937VersionInfo.logVersion();
-2938new 
HMasterCommandLine(HMaster.class).doMain(args);
-2939  }
-2940
-2941  public HFileCleaner getHFileCleaner() 
{
-2942return this.hfileCleaner;
-2943  }
-2944
-2945  public LogCleaner getLogCleaner() {
-2946return this.logCleaner;
-2947  }
-2948
-2949  /**
-2950   * @return the underlying snapshot 
manager
-295

[38/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

2018-06-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.html
index 5c74619..988a49b 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.html
@@ -123,7 +123,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public abstract class RegionTransitionProcedure
+public abstract class RegionTransitionProcedure
 extends Procedure
 implements TableProcedureInterface, RemoteProcedureDispatcher.RemoteProcedure
 Base class for the Assign and Unassign Procedure.
@@ -538,7 +538,7 @@ implements 
 
 LOG
-private static final org.slf4j.Logger LOG
+private static final org.slf4j.Logger LOG
 
 
 
@@ -547,7 +547,7 @@ implements 
 
 aborted
-protected final https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicBoolean.html?is-external=true";
 title="class or interface in java.util.concurrent.atomic">AtomicBoolean aborted
+protected final https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicBoolean.html?is-external=true";
 title="class or interface in java.util.concurrent.atomic">AtomicBoolean aborted
 
 
 
@@ -556,7 +556,7 @@ implements 
 
 transitionState
-private org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState
 transitionState
+private org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState
 transitionState
 
 
 
@@ -565,7 +565,7 @@ implements 
 
 regionInfo
-private RegionInfo regionInfo
+private RegionInfo regionInfo
 This data member must be persisted. Expectation is that it 
is done by subclasses in their
  Procedure.serializeStateData(ProcedureStateSerializer)
 call, restoring regionInfo
  in their Procedure.deserializeStateData(ProcedureStateSerializer)
 method.
@@ -577,7 +577,7 @@ implements 
 
 attempt
-private int attempt
+private int attempt
 Like regionInfo,
 the expectation is that subclasses persist the value of this
  data member. It is used doing backoff when Procedure gets stuck.
 
@@ -588,7 +588,7 @@ implements 
 
 lock
-private volatile boolean lock
+private volatile boolean lock
 
 
 
@@ -605,7 +605,7 @@ implements 
 
 RegionTransitionProcedure
-public RegionTransitionProcedure()
+public RegionTransitionProcedure()
 
 
 
@@ -614,7 +614,7 @@ implements 
 
 RegionTransitionProcedure
-public RegionTransitionProcedure(RegionInfo regionInfo)
+public RegionTransitionProcedure(RegionInfo regionInfo)
 
 
 
@@ -631,7 +631,7 @@ implements 
 
 getRegionInfo
-public RegionInfo getRegionInfo()
+public RegionInfo getRegionInfo()
 
 
 
@@ -640,7 +640,7 @@ implements 
 
 setRegionInfo
-protected void setRegionInfo(RegionInfo regionInfo)
+protected void setRegionInfo(RegionInfo regionInfo)
 This setter is for subclasses to call in their
  Procedure.deserializeStateData(ProcedureStateSerializer)
 method. Expectation is that
  subclasses will persist `regioninfo` in their
@@ -654,7 +654,7 @@ implements 
 
 setAttempt
-protected void setAttempt(int attempt)
+protected void setAttempt(int attempt)
 This setter is for subclasses to call in their
  Procedure.deserializeStateData(ProcedureStateSerializer)
 method.
 
@@ -669,7 +669,7 @@ implements 
 
 getAttempt
-protected int getAttempt()
+protected int getAttempt()
 
 
 
@@ -678,7 +678,7 @@ implements 
 
 getTableName
-public TableName getTableName()
+public TableName getTableName()
 
 Specified by:
 getTableName in
 interface TableProcedureInterface
@@ -693,7 +693,7 @@ implements 
 
 isMeta
-public boolean isMeta()
+public boolean isMeta()
 
 
 
@@ -702,7 +702,7 @@ implements 
 
 toStringClassDetails
-public void toStringClassDetails(https://docs.oracle.com/javase/8/docs/api/java/lang/StringBuilder.html?is-external=true";
 title="class or interface in java.lang">StringBuilder sb)
+public void toStringClassDetails(https://docs.oracle.com/javase/8/docs/api/java/lang/StringBuilder.html?is-external=true";
 title="class or interface in java.lang">StringBuilder sb)
 Description copied from 
class: Procedure
 Extend the toString() information with the procedure details
  e.g. className and parameters
@@ -720,7 +720,7 @@ implements 
 
 getRegionState
-public RegionStates.RegionStateNode getRegionState(MasterProcedureEnv env)
+public RegionStates.RegionStateNode getRegionState(MasterProcedureEnv env)
 
 
 
@@ -729,7 +729,7 @@ implements 
 
 setTransitionState
-void setTransitionState(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState state)
+void setTransitionState(org.apache.

[41/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

2018-06-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
index 2e64b09..fa29c2b 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i
 
109":10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i
 
109":10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -611,69 +611,75 @@ implements RegionInfo regionInfo) 
 
 
+(package private) boolean
+isDeadServerProcessed(ServerName serverName)
+This is a very particular check.
+
+
+
 boolean
 isFailoverCleanupDone()
 Used by ServerCrashProcedure to make sure AssignmentManager 
has completed
  the failover cleanup before re-assigning regions of dead servers.
 
 
-
+
 boolean
 isMetaInitialized() 
 
-
+
 boolean
 isMetaLoaded() 
 
-
+
 boolean
 isMetaRegion(byte[] regionName) 
 
-
+
 private boolean
 isMetaRegion(RegionInfo regionInfo) 
 
-
+
 boolean
 isMetaRegionInTransition() 
 
-
+
 boolean
 isRunning() 
 
-
+
 boolean
 isTableDisabled(TableName tableName) 
 
-
+
 boolean
 isTableEnabled(TableName tableName) 
 
-
+
 void
 joinCluster() 
 
-
-void
+
+private void
 killRegionServer(RegionStates.ServerStateNode serverNode) 
 
-
-void
+
+private void
 killRegionServer(ServerName serverName) 
 
-
+
 private void
 loadMeta() 
 
-
+
 void
 markRegionAsClosed(RegionStates.RegionStateNode regionNode) 
 
-
+
 void
 markRegionAsClosing(RegionStates.RegionStateNode regionNode) 
 
-
+
 void
 markRegionAsMerged(RegionInfo child,
   ServerName serverName,
@@ -682,73 +688,73 @@ implements When called here, the merge has happened.
 
 
-
+
 void
 markRegionAsOpened(RegionStates.RegionStateNode regionNode) 
 
-
+
 void
 markRegionAsOpening(RegionStates.RegionStateNode regionNode) 
 
-
+
 void
 markRegionAsSplit(RegionInfo parent,
  ServerName serverName,
  RegionInfo daughterA,
  RegionInfo daughterB) 
 
-
+
 void
 move(RegionInfo regionInfo) 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Future.html?is-external=true";
 title="class or interface in 
java.util.concurrent">Future
 moveAsync(RegionPlan regionPlan) 
 
-
+
 void
 offlineRegion(RegionInfo regionInfo) 
 
-
+
 void
 onlineRegion(RegionInfo regionInfo,
 ServerName serverName) 
 
-
+
 private void
 processAssignmentPlans(https://do

[49/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

2018-06-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/apidocs/deprecated-list.html
--
diff --git a/apidocs/deprecated-list.html b/apidocs/deprecated-list.html
index 6463414..22e3d45 100644
--- a/apidocs/deprecated-list.html
+++ b/apidocs/deprecated-list.html
@@ -2249,7 +2249,7 @@
 
 
 org.apache.hadoop.hbase.client.Scan.setStopRow(byte[])
-use Scan.withStartRow(byte[])
 instead. This method may change the inclusive of
+use Scan.withStopRow(byte[])
 instead. This method may change the inclusive of
  the stop row to keep compatible with the old 
behavior.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/apidocs/index-all.html
--
diff --git a/apidocs/index-all.html b/apidocs/index-all.html
index c388812..415bbef 100644
--- a/apidocs/index-all.html
+++ b/apidocs/index-all.html
@@ -17051,7 +17051,7 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 setStopRow(byte[])
 - Method in class org.apache.hadoop.hbase.client.Scan
 
 Deprecated.
-use Scan.withStartRow(byte[])
 instead. This method may change the inclusive of
+use Scan.withStopRow(byte[])
 instead. This method may change the inclusive of
  the stop row to keep compatible with the old 
behavior.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/apidocs/org/apache/hadoop/hbase/client/Scan.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/client/Scan.html 
b/apidocs/org/apache/hadoop/hbase/client/Scan.html
index 5a02583..c424600 100644
--- a/apidocs/org/apache/hadoop/hbase/client/Scan.html
+++ b/apidocs/org/apache/hadoop/hbase/client/Scan.html
@@ -708,7 +708,7 @@ extends Scan
 setStopRow(byte[] stopRow)
 Deprecated. 
-use withStartRow(byte[])
 instead. This method may change the inclusive of
+use withStopRow(byte[])
 instead. This method may change the inclusive of
  the stop row to keep compatible with the old 
behavior.
 
 
@@ -1223,7 +1223,7 @@ public setStopRow
 https://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true";
 title="class or interface in java.lang">@Deprecated
 public Scan setStopRow(byte[] stopRow)
-Deprecated. use withStartRow(byte[])
 instead. This method may change the inclusive of
+Deprecated. use withStopRow(byte[])
 instead. This method may change the inclusive of
  the stop row to keep compatible with the old 
behavior.
 Set the stop row of the scan.
  

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/apidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/client/class-use/Scan.html 
b/apidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
index 14f5be1..3080d57 100644
--- a/apidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
+++ b/apidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
@@ -350,7 +350,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 Scan
 Scan.setStopRow(byte[] stopRow)
 Deprecated. 
-use withStartRow(byte[])
 instead. This method may change the inclusive of
+use withStopRow(byte[])
 instead. This method may change the inclusive of
  the stop row to keep compatible with the old 
behavior.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/apidocs/src-html/org/apache/hadoop/hbase/client/Scan.ReadType.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/Scan.ReadType.html 
b/apidocs/src-html/org/apache/hadoop/hbase/client/Scan.ReadType.html
index 070e349..efeb69a 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/client/Scan.ReadType.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/client/Scan.ReadType.html
@@ -491,7 +491,7 @@
 483   * @return this
 484   * @throws IllegalArgumentException if 
stopRow does not meet criteria for a row key (when length
 485   *   exceeds {@link 
HConstants#MAX_ROW_LENGTH})
-486   * @deprecated use {@link 
#withStartRow(byte[])} instead. This method may change the inclusive of
+486   * @deprecated use {@link 
#withStopRow(byte[])} instead. This method may change the inclusive of
 487   * the stop row to keep 
compatible with the old behavior.
 488   */
 489  @Deprecated

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/apidocs/src-html/org/apache/hadoop/hbase/client/Scan.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/Scan.html 
b/apidocs/src-html/org/apache/hadoop/hbase/client/Scan.html
index 070e349..efeb69a 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/client/Scan.html
+++ b/apidocs/src-html/org/apache/hadoop

[19/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

2018-06-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.AssignmentProcedureEvent.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.AssignmentProcedureEvent.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.AssignmentProcedureEvent.html
index 5420d82..6ea3672 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.AssignmentProcedureEvent.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.AssignmentProcedureEvent.html
@@ -316,687 +316,728 @@
 308}
 309  }
 310
-311  public enum ServerState { ONLINE, 
SPLITTING, OFFLINE }
-312  public static class ServerStateNode 
implements Comparable {
-313private final ServerReportEvent 
reportEvent;
-314
-315private final 
Set regions;
-316private final ServerName 
serverName;
-317
-318private volatile ServerState state = 
ServerState.ONLINE;
-319private volatile int versionNumber = 
0;
-320
-321public ServerStateNode(final 
ServerName serverName) {
-322  this.serverName = serverName;
-323  this.regions = 
ConcurrentHashMap.newKeySet();
-324  this.reportEvent = new 
ServerReportEvent(serverName);
-325}
-326
-327public ServerName getServerName() {
-328  return serverName;
-329}
+311  /**
+312   * Server State.
+313   */
+314  public enum ServerState {
+315/**
+316 * Initial state. Available.
+317 */
+318ONLINE,
+319
+320/**
+321 * Server expired/crashed. Currently 
undergoing WAL splitting.
+322 */
+323SPLITTING,
+324
+325/**
+326 * WAL splitting done.
+327 */
+328OFFLINE
+329  }
 330
-331public ServerState getState() {
-332  return state;
-333}
-334
-335public int getVersionNumber() {
-336  return versionNumber;
-337}
-338
-339public ProcedureEvent 
getReportEvent() {
-340  return reportEvent;
-341}
+331  /**
+332   * State of Server; list of hosted 
regions, etc.
+333   */
+334  public static class ServerStateNode 
implements Comparable {
+335private final ServerReportEvent 
reportEvent;
+336
+337private final 
Set regions;
+338private final ServerName 
serverName;
+339
+340private volatile ServerState state = 
ServerState.ONLINE;
+341private volatile int versionNumber = 
0;
 342
-343public boolean isInState(final 
ServerState... expected) {
-344  boolean expectedState = false;
-345  if (expected != null) {
-346for (int i = 0; i < 
expected.length; ++i) {
-347  expectedState |= (state == 
expected[i]);
-348}
-349  }
-350  return expectedState;
+343public ServerStateNode(final 
ServerName serverName) {
+344  this.serverName = serverName;
+345  this.regions = 
ConcurrentHashMap.newKeySet();
+346  this.reportEvent = new 
ServerReportEvent(serverName);
+347}
+348
+349public ServerName getServerName() {
+350  return serverName;
 351}
 352
-353public void setState(final 
ServerState state) {
-354  this.state = state;
+353public ServerState getState() {
+354  return state;
 355}
 356
-357public void setVersionNumber(final 
int versionNumber) {
-358  this.versionNumber = 
versionNumber;
+357public int getVersionNumber() {
+358  return versionNumber;
 359}
 360
-361public Set 
getRegions() {
-362  return regions;
+361public ProcedureEvent 
getReportEvent() {
+362  return reportEvent;
 363}
 364
-365public int getRegionCount() {
-366  return regions.size();
+365public boolean isOffline() {
+366  return 
this.state.equals(ServerState.OFFLINE);
 367}
 368
-369public ArrayList 
getRegionInfoList() {
-370  ArrayList hris = 
new ArrayList(regions.size());
-371  for (RegionStateNode region: 
regions) {
-372
hris.add(region.getRegionInfo());
-373  }
-374  return hris;
-375}
-376
-377public void addRegion(final 
RegionStateNode regionNode) {
-378  this.regions.add(regionNode);
-379}
-380
-381public void removeRegion(final 
RegionStateNode regionNode) {
-382  this.regions.remove(regionNode);
-383}
-384
-385@Override
-386public int compareTo(final 
ServerStateNode other) {
-387  return 
getServerName().compareTo(other.getServerName());
-388}
-389
-390@Override
-391public int hashCode() {
-392  return 
getServerName().hashCode();
+369public boolean isInState(final 
ServerState... expected) {
+370  boolean expectedState = false;
+371  if (expected != null) {
+372for (int i = 0; i < 
expected.length; ++i) {
+373  expectedState |= (state == 
expected[i])

[48/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

2018-06-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index 17459c2..5dd7b1b 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Checkstyle Results
 
@@ -281,10 +281,10 @@
  Warnings
  Errors
 
-3621
+3626
 0
 0
-16261
+16252
 
 Files
 
@@ -4987,7 +4987,7 @@
 org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java
 0
 0
-16
+6
 
 org/apache/hadoop/hbase/master/assignment/TestAssignmentOnRSCrash.java
 0
@@ -5787,7 +5787,7 @@
 org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
 0
 0
-4
+5
 
 org/apache/hadoop/hbase/procedure2/RemoteProcedureException.java
 0
@@ -10106,7 +10106,7 @@
 caseIndent: "2"
 basicOffset: "2"
 lineWrappingIndentation: "2"
-5236
+5227
  Error
 
 javadoc
@@ -64126,73 +64126,73 @@
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-2913
+2916
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-3089
+3092
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-3091
+3094
 
  Error
 sizes
 LineLength
 Line is longer than 100 characters (found 109).
-3161
+3164
 
  Error
 indentation
 Indentation
 'throws' has incorrect indentation level 2, expected level should be 
4.
-3182
+3185
 
  Error
 blocks
 NeedBraces
 'for' construct must use '{}'s.
-3212
+3215
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-3219
+3222
 
  Error
 indentation
 Indentation
 'throws' has incorrect indentation level 2, expected level should be 
4.
-3224
+3227
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-3253
+3256
 
  Error
 sizes
 LineLength
 Line is longer than 100 characters (found 111).
-3351
+3354
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-3497
+3512
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-3547
+3562
 
 org/apache/hadoop/hbase/master/HMasterCommandLine.java
 
@@ -64648,13 +64648,13 @@
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-1597
+1601
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-1658
+1662
 
 org/apache/hadoop/hbase/master/MasterServices.java
 
@@ -64855,61 +64855,61 @@
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-363
+364
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-371
+372
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-378
+379
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-386
+387
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-394
+395
 
  Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
-401
+402
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-402
+403
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-407
+408
 
  Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
-409
+410
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-410
+411
 
 org/apache/hadoop/hbase/master/MasterStatusServlet.java
 
@@ -66028,73 +66028,73 @@
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-686
+692
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-725
+731
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-756
+762
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-758
+764
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-770
+776
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-772
+778
 
  Error
 indentation
 Indentation
 'throws' has incorrect indentation level 2, expected level should be 
4.
-776
+782
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-823
+829
 
  Error
 sizes
 LineLength
 Line is longer than 100 characters (found 106).
-852
+858
 
  Error
 sizes
 LineLength
 Line is longer than 100 characters (found 106).
-865
+871
 
  Error
 sizes
 LineLength
 Line is longer than 100 characters (found 107).
-866
+872
 
  Error
 sizes
 LineLength
 Line is longer than 100 characters (found 103).
-888
+894
 
 org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java
 
@@ -67588,85 +67588,85 @@
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-1161

[33/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

2018-06-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.html
index 9328921..ad77e51 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.html
@@ -50,7 +50,7 @@ var activeTableTab = "activeTableTab";
 
 
 Prev Class
-Next Class
+Next Class
 
 
 Frames
@@ -270,7 +270,7 @@ public 
 
 Prev Class
-Next Class
+Next Class
 
 
 Frames

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/org/apache/hadoop/hbase/regionserver/SnapshotSegmentScanner.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/SnapshotSegmentScanner.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/SnapshotSegmentScanner.html
new file mode 100644
index 000..05a8970
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/SnapshotSegmentScanner.html
@@ -0,0 +1,508 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+
+
+
+SnapshotSegmentScanner (Apache HBase 3.0.0-SNAPSHOT API)
+
+
+
+
+
+var methods = {"i0":10,"i1":9,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10};
+var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev Class
+Next Class
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+Summary: 
+Nested | 
+Field | 
+Constr | 
+Method
+
+
+Detail: 
+Field | 
+Constr | 
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.regionserver
+Class 
SnapshotSegmentScanner
+
+
+
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.regionserver.NonLazyKeyValueScanner
+
+
+org.apache.hadoop.hbase.regionserver.NonReversedNonLazyKeyValueScanner
+
+
+org.apache.hadoop.hbase.regionserver.SnapshotSegmentScanner
+
+
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html?is-external=true";
 title="class or interface in java.io">Closeable, https://docs.oracle.com/javase/8/docs/api/java/lang/AutoCloseable.html?is-external=true";
 title="class or interface in java.lang">AutoCloseable, KeyValueScanner, 
Shipper
+
+
+
+@InterfaceAudience.Private
+public class SnapshotSegmentScanner
+extends NonReversedNonLazyKeyValueScanner
+A basic SegmentScanner used against an ImmutableScanner 
snapshot
+ Used flushing where we do a single pass, no reverse scanning or
+ inserts happening. Its a dumbed-down Scanner that can go fast.
+ Like CollectionBackedScanner
+ (but making it know about Segments was onerous).
+
+
+
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields 
+
+Modifier and Type
+Field and Description
+
+
+private Cell
+current 
+
+
+private https://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true";
 title="class or interface in java.util">Iterator
+iter 
+
+
+private ImmutableSegment
+segment 
+
+
+
+
+
+
+Fields inherited from 
interface org.apache.hadoop.hbase.regionserver.KeyValueScanner
+NO_NEXT_INDEXED_KEY
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors 
+
+Constructor and Description
+
+
+SnapshotSegmentScanner(ImmutableSegment segment) 
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All Methods Static Methods Instance Methods Concrete Methods 
+
+Modifier and Type
+Method and Description
+
+
+void
+close()
+Close the KeyValue scanner.
+
+
+
+private static https://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true";
 title="class or interface in java.util">Iterator
+createIterator(Segment segment) 
+
+
+long
+getScannerOrder()
+Get the order of this KeyValueScanner.
+
+
+
+Cell
+next()
+Return the next Cell in this scanner, iterating the 
scanner
+
+
+
+Cell
+peek()
+Look at the next Cell in this scanner, but do not iterate 
scanner.
+
+
+
+boolean
+reseek(Cell seekCell)
+Reseek the scanner at or after th

[35/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

2018-06-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/org/apache/hadoop/hbase/procedure2/NoServerDispatchException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/procedure2/NoServerDispatchException.html 
b/devapidocs/org/apache/hadoop/hbase/procedure2/NoServerDispatchException.html
new file mode 100644
index 000..505e685
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/procedure2/NoServerDispatchException.html
@@ -0,0 +1,282 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+
+
+
+NoServerDispatchException (Apache HBase 3.0.0-SNAPSHOT API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev Class
+Next Class
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+Summary: 
+Nested | 
+Field | 
+Constr | 
+Method
+
+
+Detail: 
+Field | 
+Constr | 
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.procedure2
+Class 
NoServerDispatchException
+
+
+
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">java.lang.Object
+
+
+https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true";
 title="class or interface in java.lang">java.lang.Throwable
+
+
+https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true";
 title="class or interface in java.lang">java.lang.Exception
+
+
+https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">java.io.IOException
+
+
+org.apache.hadoop.hbase.HBaseIOException
+
+
+org.apache.hadoop.hbase.procedure2.FailedRemoteDispatchException
+
+
+org.apache.hadoop.hbase.procedure2.NoServerDispatchException
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable
+
+
+
+@InterfaceAudience.Private
+public class NoServerDispatchException
+extends FailedRemoteDispatchException
+Used internally signaling failed queue of a remote 
procedure operation.
+ In particular, no dispatch Node was found for the passed server name
+ key.
+
+See Also:
+Serialized
 Form
+
+
+
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors 
+
+Constructor and Description
+
+
+NoServerDispatchException(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String msg) 
+
+
+
+
+
+
+
+
+
+Method Summary
+
+
+
+
+Methods inherited from class java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true";
 title="class or interface in java.lang">Throwable
+https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#addSuppressed-java.lang.Throwable-";
 title="class or interface in java.lang">addSuppressed, https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#fillInStackTrace--";
 title="class or interface in java.lang">fillInStackTrace, https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#getCause--";
 title="class or interface in java.lang">getCause, https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#getLocalizedMessage--";
 title="class or interface in java.lang">getLocalizedMessage, https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#getMessage--";
 title="class or interface in java.lang">getMessage, https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#getStackTrace--";
 title="class or
  interface in java.lang">getStackTrace, https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#getSuppressed--";
 title="class or interface in java.lang">getSuppressed, https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#initCause-java.lang.Throwable-";
 title="class or interface in java.lang">initCause, https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#printStackTrace--";
 title="class or interface in java.lang">printStackTrace, https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#printStackTrace-java.io.PrintStream-";
 title="class or interface in java.lang">printStackTrace, https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-exte

[46/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

2018-06-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/org/apache/hadoop/hbase/class-use/Cell.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/Cell.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/Cell.html
index 0f40fe4..f4a02d0 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/Cell.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/Cell.html
@@ -5216,39 +5216,43 @@ service.
 
 
 private Cell
-StoreFileScanner.delayedSeekKV 
+SnapshotSegmentScanner.current 
 
 
+private Cell
+StoreFileScanner.delayedSeekKV 
+
+
 protected Cell
 HRegion.RegionScannerImpl.joinedContinuationRow
 If the joined heap data gathering is interrupted due to 
scan limits, this will
  contain the row for which we are populating the values.
 
 
-
+
 private Cell
 SegmentScanner.last 
 
-
+
 private Cell
 StripeMultiFileWriter.BoundaryMultiWriter.lastCell 
 
-
+
 private Cell
 StripeMultiFileWriter.SizeMultiWriter.lastCell 
 
-
+
 private Cell
 ScannerContext.lastPeekedCell 
 
-
+
 static Cell
 KeyValueScanner.NO_NEXT_INDEXED_KEY
 The byte array represents for NO_NEXT_INDEXED_KEY;
  The actual value is irrelevant because this is always compared by 
reference.
 
 
-
+
 private Cell
 StoreScanner.prevCell 
 
@@ -5282,18 +5286,22 @@ service.
 SegmentScanner.iter 
 
 
+private https://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true";
 title="class or interface in java.util">Iterator
+SnapshotSegmentScanner.iter 
+
+
 private https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
 MemStoreCompactorSegmentsIterator.kvs 
 
-
+
 private https://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true";
 title="class or interface in java.util">Iterator
 MemStoreCompactorSegmentsIterator.kvsIterator 
 
-
+
 private https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true";
 title="class or interface in java.util">Optional
 HStoreFile.lastKey 
 
-
+
 private static https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
 HRegion.MOCKED_LIST
 A mocked list implementation - discards all updates.
@@ -5574,28 +5582,36 @@ service.
 
 
 Cell
-KeyValueHeap.peek() 
+SnapshotSegmentScanner.next() 
 
 
 Cell
+KeyValueHeap.peek() 
+
+
+Cell
 KeyValueScanner.peek()
 Look at the next Cell in this scanner, but do not iterate 
scanner.
 
 
-
+
 Cell
 SegmentScanner.peek()
 Look at the next Cell in this scanner, but do not iterate 
the scanner
 
 
-
+
 Cell
 StoreScanner.peek() 
 
-
+
 Cell
 StoreFileScanner.peek() 
 
+
+Cell
+SnapshotSegmentScanner.peek() 
+
 
 Cell
 CellSet.pollFirst() 
@@ -5686,36 +5702,36 @@ service.
 CellFlatMap.comparator() 
 
 
+private static https://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true";
 title="class or interface in java.util">Iterator
+SnapshotSegmentScanner.createIterator(Segment segment) 
+
+
 https://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true";
 title="class or interface in java.util">Iterator
 CellSet.descendingIterator() 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/NavigableSet.html?is-external=true";
 title="class or interface in java.util">NavigableSet
 CellFlatMap.descendingKeySet() 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true";
 title="class or interface in java.util">NavigableMap
 CellFlatMap.descendingMap() 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true";
 title="class or interface in java.util">NavigableMap
 CellFlatMap.descendingMap() 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/NavigableSet.html?is-external=true";
 title="class or interface in java.util">NavigableSet
 CellSet.descendingSet() 
 
-
-https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in java.util">SetMap.Entry>
-CellFlatMap.entrySet() 
-
 
 https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in java.util">SetMap.Entry>
 CellFlatMap.entrySet() 
 
 
-https://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true";
 title="class or interface in java.util">Map.Entry
-CellFlatMap.firstEntry() 
+https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in java.util">Set

[40/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

2018-06-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/org/apache/hadoop/hbase/master/assignment/FailedRemoteDispatchException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/FailedRemoteDispatchException.html
 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/FailedRemoteDispatchException.html
deleted file mode 100644
index ce0d4e8..000
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/FailedRemoteDispatchException.html
+++ /dev/null
@@ -1,276 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd";>
-
-
-
-
-
-FailedRemoteDispatchException (Apache HBase 3.0.0-SNAPSHOT API)
-
-
-
-
-
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-Prev Class
-Next Class
-
-
-Frames
-No Frames
-
-
-All Classes
-
-
-
-
-
-
-
-Summary: 
-Nested | 
-Field | 
-Constr | 
-Method
-
-
-Detail: 
-Field | 
-Constr | 
-Method
-
-
-
-
-
-
-
-
-org.apache.hadoop.hbase.master.assignment
-Class 
FailedRemoteDispatchException
-
-
-
-https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">java.lang.Object
-
-
-https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true";
 title="class or interface in java.lang">java.lang.Throwable
-
-
-https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true";
 title="class or interface in java.lang">java.lang.Exception
-
-
-https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">java.io.IOException
-
-
-org.apache.hadoop.hbase.HBaseIOException
-
-
-org.apache.hadoop.hbase.master.assignment.FailedRemoteDispatchException
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-All Implemented Interfaces:
-https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable
-
-
-
-@InterfaceAudience.Private
-public class FailedRemoteDispatchException
-extends HBaseIOException
-Used internally signaling failed queue of a remote procedure
- operation.
-
-See Also:
-Serialized
 Form
-
-
-
-
-
-
-
-
-
-
-
-
-Constructor Summary
-
-Constructors 
-
-Constructor and Description
-
-
-FailedRemoteDispatchException(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String msg) 
-
-
-
-
-
-
-
-
-
-Method Summary
-
-
-
-
-Methods inherited from class java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true";
 title="class or interface in java.lang">Throwable
-https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#addSuppressed-java.lang.Throwable-";
 title="class or interface in java.lang">addSuppressed, https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#fillInStackTrace--";
 title="class or interface in java.lang">fillInStackTrace, https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#getCause--";
 title="class or interface in java.lang">getCause, https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#getLocalizedMessage--";
 title="class or interface in java.lang">getLocalizedMessage, https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#getMessage--";
 title="class or interface in java.lang">getMessage, https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#getStackTrace--";
 title="class or
  interface in java.lang">getStackTrace, https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#getSuppressed--";
 title="class or interface in java.lang">getSuppressed, https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#initCause-java.lang.Throwable-";
 title="class or interface in java.lang">initCause, https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#printStackTrace--";
 title="class or interface in java.lang">printStackTrace, https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#printStackTrace-java.io.PrintStream-";
 title="class or interface in java.lang">printStackTrace, https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#printStackTrace-java.io.PrintWriter-";
 title="class or interface i

[47/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

2018-06-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/checkstyle.rss
--
diff --git a/checkstyle.rss b/checkstyle.rss
index ee0f491..8e289b6 100644
--- a/checkstyle.rss
+++ b/checkstyle.rss
@@ -25,8 +25,8 @@ under the License.
 en-us
 ©2007 - 2018 The Apache Software Foundation
 
-  File: 3621,
- Errors: 16261,
+  File: 3626,
+ Errors: 16252,
  Warnings: 0,
  Infos: 0
   
@@ -6444,6 +6444,20 @@ under the License.
   
   
 
+  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.master.TestServerCrashProcedureStuck.java";>org/apache/hadoop/hbase/master/TestServerCrashProcedureStuck.java
+
+
+  0
+
+
+  0
+
+
+  0
+
+  
+  
+
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.client.TestAsyncSingleRequestRpcRetryingCaller.java";>org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java
 
 
@@ -11022,6 +11036,20 @@ under the License.
   
   
 
+  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.procedure2.NoNodeDispatchException.java";>org/apache/hadoop/hbase/procedure2/NoNodeDispatchException.java
+
+
+  0
+
+
+  0
+
+
+  0
+
+  
+  
+
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore.java";>org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
 
 
@@ -17737,7 +17765,7 @@ under the License.
   0
 
 
-  16
+  6
 
   
   
@@ -18288,6 +18316,20 @@ under the License.
   
   
 
+  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.procedure2.NoServerDispatchException.java";>org/apache/hadoop/hbase/procedure2/NoServerDispatchException.java
+
+
+  0
+
+
+  0
+
+
+  0
+
+  
+  
+
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.client.TestIllegalTableDescriptor.java";>org/apache/hadoop/hbase/client/TestIllegalTableDescriptor.java
 
 
@@ -34402,6 +3,20 @@ under the License.
   
   
 
+  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.regionserver.SnapshotSegmentScanner.java";>org/apache/hadoop/hbase/regionserver/SnapshotSegmentScanner.java
+
+
+  0
+
+
+  0
+
+
+  0
+
+  
+  
+
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.regionserver.MetricsRegionWrapper.java";>org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
 
 
@@ -35209,7 +35265,7 @@ under the License.
   0
 
 
-  4
+  5
 
   
   
@@ -45014,20 +45070,6 @@ under the License.
   
   
 
-  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.master.assignment.FailedRemoteDispatchException.java";>org/apache/hadoop/hbase/master/assignment/FailedRemoteDispatchException.java
-
-
-  0
-
-
-  0
-
-
-  0
-
-  
-  
-
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.util.FSRegionScanner.java";>org/apache/hadoop/hbase/util/FSRegionScanner.java
 
 
@@ -49900,6 +49942,20 @@ under the License.
   
   
 
+  http://hbase.apache.org/checkstyle.html#org.apache.h

[43/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

2018-06-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.html 
b/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.html
index a7531a8..4e7ec73 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.html
@@ -1963,7 +1963,7 @@ implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.Master
 
 
 unassignRegion
-public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponse unassignRegion(org.apache.hbase.thirdparty.com.google.protobuf.RpcController controller,
+public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponse unassignRegion(org.apache.hbase.thirdparty.com.google.protobuf.RpcController controller,

 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest req)

  throws 
org.apache.hbase.thirdparty.com.google.protobuf.ServiceException
 
@@ -1980,7 +1980,7 @@ implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.Master
 
 
 reportRegionStateTransition
-public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse reportRegionStateTransition(org.apache.hbase.thirdparty.com.google.protobuf.RpcController c,
+public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse reportRegionStateTransition(org.apache.hbase.thirdparty.com.google.protobuf.RpcController c,

   
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest req)

throws 
org.apache.hbase.thirdparty.com.google.protobuf.ServiceException
 
@@ -1997,7 +1997,7 @@ implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.Master
 
 
 setQuota
-public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse setQuota(org.apache.hbase.thirdparty.com.google.protobuf.RpcController c,
+public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse setQuota(org.apache.hbase.thirdparty.com.google.protobuf.RpcController c,

 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest req)

  throws 
org.apache.hbase.thirdparty.com.google.protobuf.ServiceException
 
@@ -2014,7 +2014,7 @@ implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.Master
 
 
 getLastMajorCompactionTimestamp
-public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse getLastMajorCompactionTimestamp(org.apache.hbase.thirdparty.com.google.protobuf.RpcController controller,
+public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse getLastMajorCompactionTimestamp(org.apache.hbase.thirdparty.com.google.protobuf.RpcController controller,


org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request)

 throws 
org.apache.hbase.thirdparty.com.google.protobuf.ServiceException
 
@@ -2031,7 +2031,7 @@ implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.Master
 
 
 getLastMajorCompactionTimestampForRegion
-public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse getLastMajorCompactionTimestampForRegion(org.apache.hbase.thirdparty.com.google.protobuf.RpcController controller,
+public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse getLastMajorCompactionTimestampForRegion(org.apache.hbase.thirdparty.com.google.protobuf.RpcController controller,

 
org.apache.hadoop.hbase.shaded.protob

[51/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

2018-06-05 Thread git-site-role
Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/3469cbc0
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/3469cbc0
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/3469cbc0

Branch: refs/heads/asf-site
Commit: 3469cbc0b98955debbab4b54bd02446a7b892ab9
Parents: cddd306
Author: jenkins 
Authored: Tue Jun 5 14:48:27 2018 +
Committer: jenkins 
Committed: Tue Jun 5 14:48:27 2018 +

--
 acid-semantics.html | 4 +-
 apache_hbase_reference_guide.pdf|  8459 -
 apidocs/deprecated-list.html| 2 +-
 apidocs/index-all.html  | 2 +-
 .../org/apache/hadoop/hbase/client/Scan.html| 4 +-
 .../hadoop/hbase/client/class-use/Scan.html | 2 +-
 .../hadoop/hbase/client/Scan.ReadType.html  | 2 +-
 .../org/apache/hadoop/hbase/client/Scan.html| 2 +-
 book.html   | 5 +-
 bulk-loads.html | 4 +-
 checkstyle-aggregate.html   | 15498 -
 checkstyle.rss  |   106 +-
 coc.html| 4 +-
 dependencies.html   | 4 +-
 dependency-convergence.html | 4 +-
 dependency-info.html| 4 +-
 dependency-management.html  | 4 +-
 devapidocs/allclasses-frame.html| 6 +-
 devapidocs/allclasses-noframe.html  | 6 +-
 devapidocs/constant-values.html | 6 +-
 devapidocs/deprecated-list.html | 2 +-
 devapidocs/index-all.html   |91 +-
 .../apache/hadoop/hbase/HBaseIOException.html   | 2 +-
 .../hadoop/hbase/backup/package-tree.html   | 4 +-
 .../org/apache/hadoop/hbase/class-use/Cell.html |   212 +-
 .../hbase/class-use/HBaseIOException.html   |41 +-
 .../hadoop/hbase/class-use/ServerName.html  |24 +-
 .../org/apache/hadoop/hbase/client/Scan.html| 4 +-
 .../hadoop/hbase/client/class-use/Scan.html | 2 +-
 .../hadoop/hbase/client/package-tree.html   |24 +-
 .../hadoop/hbase/filter/package-tree.html   | 6 +-
 .../hadoop/hbase/io/hfile/package-tree.html | 6 +-
 .../apache/hadoop/hbase/ipc/package-tree.html   | 2 +-
 .../hadoop/hbase/mapreduce/package-tree.html| 4 +-
 .../org/apache/hadoop/hbase/master/HMaster.html |   135 +-
 .../hadoop/hbase/master/MasterRpcServices.html  |76 +-
 .../hadoop/hbase/master/MasterServices.html |53 +-
 .../hadoop/hbase/master/ServerManager.html  |60 +-
 .../AssignProcedure.CompareAssignProcedure.html | 4 +-
 ...signmentManager.RegionInTransitionChore.html | 6 +-
 ...ssignmentManager.RegionInTransitionStat.html |40 +-
 .../master/assignment/AssignmentManager.html|   264 +-
 .../FailedRemoteDispatchException.html  |   276 -
 .../assignment/GCMergedRegionsProcedure.html| 4 +-
 .../master/assignment/MoveRegionProcedure.html  |20 +-
 .../RegionStates.RegionFailedOpen.html  |22 +-
 .../assignment/RegionStates.ServerState.html|24 +-
 .../RegionStates.ServerStateNode.html   |68 +-
 .../hbase/master/assignment/RegionStates.html   |   182 +-
 .../assignment/RegionTransitionProcedure.html   |90 +-
 .../master/assignment/UnassignProcedure.html|77 +-
 .../FailedRemoteDispatchException.html  |   125 -
 .../class-use/RegionStates.RegionStateNode.html |24 +-
 .../class-use/RegionStates.ServerStateNode.html | 6 +-
 .../hbase/master/assignment/package-frame.html  | 4 -
 .../master/assignment/package-summary.html  |22 +-
 .../hbase/master/assignment/package-tree.html   |17 -
 .../hbase/master/assignment/package-use.html| 8 +-
 .../hbase/master/balancer/package-tree.html | 2 +-
 .../hadoop/hbase/master/package-tree.html   | 4 +-
 .../master/procedure/ServerCrashProcedure.html  |42 +-
 .../procedure/class-use/MasterProcedureEnv.html |48 +-
 .../hbase/master/procedure/package-tree.html| 2 +-
 .../replication/RefreshPeerProcedure.html   |48 +-
 .../hadoop/hbase/monitoring/package-tree.html   | 2 +-
 .../org/apache/hadoop/hbase/package-tree.html   |16 +-
 .../hbase/procedure2/DelayedProcedure.html  | 4 +-
 .../FailedRemoteDispatchException.html  |   280 +
 .../hadoop/hbase/procedure2/InlineChore.html| 4 +-
 .../hadoop/hbase/procedure2/LockType.html   | 4 +-
 .../procedure2/NoNodeDispatchException.html |   282 +
 .../procedure2/NoServerDispatchException.html   |   282 +
 .../NullTargetServerDispatchException.

[45/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

2018-06-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html
index a6c9234..89f25b0 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html
@@ -3506,7 +3506,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
  ServerName right) 
 
 
-void
+boolean
 ServerManager.expireServer(ServerName serverName) 
 
 
@@ -4162,7 +4162,9 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 RegionStates.ServerStateNode
-RegionStates.getOrCreateServer(ServerName serverName) 
+RegionStates.getOrCreateServer(ServerName serverName)
+Be judicious calling this method.
+
 
 
 (package private) static 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse
@@ -4213,17 +4215,31 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 RegionInfo regionInfo) 
 
 
+(package private) boolean
+AssignmentManager.isDeadServerProcessed(ServerName serverName)
+This is a very particular check.
+
+
+
 protected boolean
 RegionTransitionProcedure.isServerOnline(MasterProcedureEnv env,
   ServerName serverName) 
 
+
+private void
+AssignmentManager.killRegionServer(ServerName serverName) 
+
 
 void
-AssignmentManager.killRegionServer(ServerName serverName) 
+RegionStates.logSplit(ServerName serverName)
+Called after we've split all logs on a crashed Server.
+
 
 
 void
-RegionStates.logSplit(ServerName serverName) 
+RegionStates.logSplitting(ServerName serverName)
+Call this when we start log splitting a crashed 
Server.
+
 
 
 void

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/org/apache/hadoop/hbase/client/Scan.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/Scan.html 
b/devapidocs/org/apache/hadoop/hbase/client/Scan.html
index 595e718..9662819 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/Scan.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/Scan.html
@@ -846,7 +846,7 @@ extends Scan
 setStopRow(byte[] stopRow)
 Deprecated. 
-use withStartRow(byte[])
 instead. This method may change the inclusive of
+use withStopRow(byte[])
 instead. This method may change the inclusive of
  the stop row to keep compatible with the old 
behavior.
 
 
@@ -1595,7 +1595,7 @@ public setStopRow
 https://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true";
 title="class or interface in java.lang">@Deprecated
 public Scan setStopRow(byte[] stopRow)
-Deprecated. use withStartRow(byte[])
 instead. This method may change the inclusive of
+Deprecated. use withStopRow(byte[])
 instead. This method may change the inclusive of
  the stop row to keep compatible with the old 
behavior.
 Set the stop row of the scan.
  

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
index 4add23c..8b0d75a 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
@@ -570,7 +570,7 @@ service.
 Scan
 Scan.setStopRow(byte[] stopRow)
 Deprecated. 
-use withStartRow(byte[])
 instead. This method may change the inclusive of
+use withStopRow(byte[])
 instead. This method may change the inclusive of
  the stop row to keep compatible with the old 
behavior.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
index ba2cbd2..44b9e57 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
@@ -552,24 +552,24 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.client

[37/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

2018-06-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/org/apache/hadoop/hbase/master/assignment/class-use/RegionStates.RegionStateNode.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/class-use/RegionStates.RegionStateNode.html
 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/class-use/RegionStates.RegionStateNode.html
index cab0e78..f8ead79 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/class-use/RegionStates.RegionStateNode.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/class-use/RegionStates.RegionStateNode.html
@@ -279,26 +279,42 @@
 RegionStates.RegionStateNode regionNode) 
 
 
+private boolean
+UnassignProcedure.isSafeToProceed(MasterProcedureEnv env,
+   RegionStates.RegionStateNode regionNode,
+   https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException exception)
+Our remote call failed but there are a few states where it 
is safe to proceed with the
+ unassign; e.g.
+
+
+
 protected boolean
 RegionTransitionProcedure.isServerOnline(MasterProcedureEnv env,
   RegionStates.RegionStateNode regionNode) 
 
-
+
 void
 AssignmentManager.markRegionAsClosed(RegionStates.RegionStateNode regionNode) 
 
-
+
 void
 AssignmentManager.markRegionAsClosing(RegionStates.RegionStateNode regionNode) 
 
-
+
 void
 AssignmentManager.markRegionAsOpened(RegionStates.RegionStateNode regionNode) 
 
-
+
 void
 AssignmentManager.markRegionAsOpening(RegionStates.RegionStateNode regionNode) 
 
+
+protected void
+UnassignProcedure.proceed(MasterProcedureEnv env,
+   RegionStates.RegionStateNode regionNode)
+Set it up so when procedure is unsuspended, we'll move to 
the procedure finish.
+
+
 
 protected void
 AssignmentManager.queueAssign(RegionStates.RegionStateNode regionNode)

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/org/apache/hadoop/hbase/master/assignment/class-use/RegionStates.ServerStateNode.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/class-use/RegionStates.ServerStateNode.html
 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/class-use/RegionStates.ServerStateNode.html
index 4c185ad..46ea3a4 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/class-use/RegionStates.ServerStateNode.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/class-use/RegionStates.ServerStateNode.html
@@ -121,7 +121,9 @@
 
 
 RegionStates.ServerStateNode
-RegionStates.getOrCreateServer(ServerName serverName) 
+RegionStates.getOrCreateServer(ServerName serverName)
+Be judicious calling this method.
+
 
 
 protected RegionStates.ServerStateNode
@@ -156,7 +158,7 @@
 RegionStates.ServerStateNode.compareTo(RegionStates.ServerStateNode other) 
 
 
-void
+private void
 AssignmentManager.killRegionServer(RegionStates.ServerStateNode serverNode) 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/org/apache/hadoop/hbase/master/assignment/package-frame.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/package-frame.html 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/package-frame.html
index f7aedd4..5ab0399 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/assignment/package-frame.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/assignment/package-frame.html
@@ -43,10 +43,6 @@
 
 RegionStates.ServerState
 
-Exceptions
-
-FailedRemoteDispatchException
-
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/org/apache/hadoop/hbase/master/assignment/package-summary.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/package-summary.html 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/package-summary.html
index c6bba43..3accf1c 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/assignment/package-summary.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/assignment/package-summary.html
@@ -179,7 +179,9 @@
 
 
 RegionStates.ServerStateNode
- 
+
+State of Server; list of hosted regions, etc.
+
 
 
 RegionStateStore
@@ -224,24 +226,8 @@
 
 
 RegionStates.ServerState
- 
-
-
-
-
-
-
-Exception Summary 
-
-Exception
-Description
-
-
-
-FailedRemoteDispatchException
 
-Used internally signaling failed queue of a remote procedure
- operation.
+Server State.
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/org/apache/hadoop/hbase/master/assignment/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignmen

hbase-site git commit: INFRA-10751 Empty commit

2018-06-05 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 3469cbc0b -> 8952bbd23


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/8952bbd2
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/8952bbd2
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/8952bbd2

Branch: refs/heads/asf-site
Commit: 8952bbd23181c75f2f7c12c98f83463039e28354
Parents: 3469cbc
Author: jenkins 
Authored: Tue Jun 5 14:48:51 2018 +
Committer: jenkins 
Committed: Tue Jun 5 14:48:51 2018 +

--

--




[44/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

2018-06-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/HMaster.html 
b/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
index 9e6fbef..b8d9f62 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
@@ -3799,13 +3799,16 @@ implements 
 
 isInMaintenanceMode
-public boolean isInMaintenanceMode()
+public boolean isInMaintenanceMode()
+throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 Report whether this master is in maintenance mode.
 
 Specified by:
 isInMaintenanceMode in
 interface MasterServices
 Returns:
 true if master is in maintenanceMode
+Throws:
+https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException - if the inquiry 
failed due to an IO problem
 
 
 
@@ -3815,7 +3818,7 @@ implements 
 
 setInitialized
-public void setInitialized(boolean isInitialized)
+public void setInitialized(boolean isInitialized)
 
 
 
@@ -3824,7 +3827,7 @@ implements 
 
 getInitializedEvent
-public ProcedureEvent getInitializedEvent()
+public ProcedureEvent getInitializedEvent()
 
 Specified by:
 getInitializedEvent in
 interface MasterServices
@@ -3839,7 +3842,7 @@ implements 
 
 isServerCrashProcessingEnabled
-public boolean isServerCrashProcessingEnabled()
+public boolean isServerCrashProcessingEnabled()
 ServerCrashProcessingEnabled is set false before completing 
assignMeta to prevent processing
  of crashed servers.
 
@@ -3856,7 +3859,7 @@ implements 
 
 setServerCrashProcessingEnabled
-public void setServerCrashProcessingEnabled(boolean b)
+public void setServerCrashProcessingEnabled(boolean b)
 
 
 
@@ -3865,7 +3868,7 @@ implements 
 
 getServerCrashProcessingEnabledEvent
-public ProcedureEvent getServerCrashProcessingEnabledEvent()
+public ProcedureEvent getServerCrashProcessingEnabledEvent()
 
 
 
@@ -3874,7 +3877,7 @@ implements 
 
 getAverageLoad
-public double getAverageLoad()
+public double getAverageLoad()
 Compute the average load across all region servers.
  Currently, this uses a very naive computation - just uses the number of
  regions being served, ignoring stats about number of requests.
@@ -3890,7 +3893,7 @@ implements 
 
 getSplitPlanCount
-public long getSplitPlanCount()
+public long getSplitPlanCount()
 
 
 
@@ -3899,7 +3902,7 @@ implements 
 
 getMergePlanCount
-public long getMergePlanCount()
+public long getMergePlanCount()
 
 
 
@@ -3908,7 +3911,7 @@ implements 
 
 registerService
-public boolean registerService(com.google.protobuf.Service instance)
+public boolean registerService(com.google.protobuf.Service instance)
 Description copied from 
interface: RegionServerServices
 Registers a new protocol buffer Service 
subclass as a coprocessor endpoint to be
  available for handling
@@ -3932,7 +3935,7 @@ implements 
 
 constructMaster
-public static HMaster constructMaster(https://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true";
 title="class or interface in java.lang">Class masterClass,
+public static HMaster constructMaster(https://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true";
 title="class or interface in java.lang">Class masterClass,
   
org.apache.hadoop.conf.Configuration conf)
 Utility for constructing an instance of the passed HMaster 
class.
 
@@ -3949,7 +3952,7 @@ implements 
 
 main
-public static void main(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String[] args)
+public static void main(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String[] args)
 
 See Also:
 HMasterCommandLine
@@ -3962,7 +3965,7 @@ implements 
 
 getHFileCleaner
-public HFileCleaner getHFileCleaner()
+public HFileCleaner getHFileCleaner()
 
 
 
@@ -3971,7 +3974,7 @@ implements 
 
 getLogCleaner
-public LogCleaner getLogCleaner()
+public LogCleaner getLogCleaner()
 
 
 
@@ -3980,7 +3983,7 @@ implements 
 
 getSnapshotManager
-public SnapshotManager getSnapshotManager()
+public SnapshotManager getSnapshotManager()
 
 Specified by:
 getSnapshotManager in
 interface MasterServices
@@ -3995,7 +3998,7 @@ implements 
 
 getMasterProcedureManagerHost
-public MasterProcedureManagerHost getMasterProcedureManagerHost()
+public MasterProcedureManagerHost getMasterProcedureManagerHost()
 
 Specified by:
 getMasterProcedureManagerHost in
 interface MasterServices
@@ -4010,7 +4013,7 @@ implements 
 
 getClusterSchema
-public ClusterSchema getClusterSchema()
+public ClusterS

[34/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

2018-06-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/FailedRemoteDispatchException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/FailedRemoteDispatchException.html
 
b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/FailedRemoteDispatchException.html
new file mode 100644
index 000..53eb717
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/FailedRemoteDispatchException.html
@@ -0,0 +1,179 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+
+
+
+Uses of Class 
org.apache.hadoop.hbase.procedure2.FailedRemoteDispatchException (Apache HBase 
3.0.0-SNAPSHOT API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+
+
+
+Uses of 
Classorg.apache.hadoop.hbase.procedure2.FailedRemoteDispatchException
+
+
+
+
+
+Packages that use FailedRemoteDispatchException 
+
+Package
+Description
+
+
+
+org.apache.hadoop.hbase.procedure2
+ 
+
+
+
+
+
+
+
+
+
+Uses of FailedRemoteDispatchException in org.apache.hadoop.hbase.procedure2
+
+Subclasses of FailedRemoteDispatchException in org.apache.hadoop.hbase.procedure2 
+
+Modifier and Type
+Class and Description
+
+
+
+class 
+NoNodeDispatchException
+Used internally signaling failed queue of a remote 
procedure operation.
+
+
+
+class 
+NoServerDispatchException
+Used internally signaling failed queue of a remote 
procedure operation.
+
+
+
+class 
+NullTargetServerDispatchException
+Used internally signaling failed queue of a remote 
procedure operation.
+
+
+
+
+
+
+
+
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+
+
+Copyright © 2007–2018 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
+
+

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/NoNodeDispatchException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/NoNodeDispatchException.html
 
b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/NoNodeDispatchException.html
new file mode 100644
index 000..7952270
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/NoNodeDispatchException.html
@@ -0,0 +1,168 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+
+
+
+Uses of Class 
org.apache.hadoop.hbase.procedure2.NoNodeDispatchException (Apache HBase 
3.0.0-SNAPSHOT API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+
+
+
+Uses 
of Classorg.apache.hadoop.hbase.procedure2.NoNodeDispatchException
+
+
+
+
+
+Packages that use NoNodeDispatchException 
+
+Package
+Description
+
+
+
+org.apache.hadoop.hbase.procedure2
+ 
+
+
+
+
+
+
+
+
+
+Uses of NoNodeDispatchException in org.apache.hadoop.hbase.procedure2
+
+Methods in org.apache.hadoop.hbase.procedure2
 that throw NoNodeDispatchException 
+
+Modifier and Type
+Method and Description
+
+
+
+void
+RemoteProcedureDispatcher.addOperationToNode(TRemote key,
+  RemoteProcedureDispatcher.RemoteProcedure rp)
+Add a remote rpc.
+
+
+
+
+
+
+
+
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+No Fr

[42/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

2018-06-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/org/apache/hadoop/hbase/master/ServerManager.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/ServerManager.html 
b/devapidocs/org/apache/hadoop/hbase/master/ServerManager.html
index 49b399a..af6c596 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/ServerManager.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/ServerManager.html
@@ -353,7 +353,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 
-void
+boolean
 expireServer(ServerName serverName) 
 
 
@@ -1126,7 +1126,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 expireServer
-public void expireServer(ServerName serverName)
+public boolean expireServer(ServerName serverName)
 
 
 
@@ -1135,7 +1135,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 moveFromOnlineToDeadServers
-public void moveFromOnlineToDeadServers(ServerName sn)
+public void moveFromOnlineToDeadServers(ServerName sn)
 
 
 
@@ -1144,7 +1144,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 processDeadServer
-public void processDeadServer(ServerName serverName,
+public void processDeadServer(ServerName serverName,
   boolean shouldSplitWal)
 
 
@@ -1154,7 +1154,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 processQueuedDeadServers
-void processQueuedDeadServers()
+void processQueuedDeadServers()
 Process the servers which died during master's 
initialization. It will be
  called after HMaster#assignMeta and AssignmentManager#joinCluster.
 
@@ -1165,7 +1165,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 removeServerFromDrainList
-public boolean removeServerFromDrainList(ServerName sn)
+public boolean removeServerFromDrainList(ServerName sn)
 
 
 
@@ -1174,7 +1174,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 addServerToDrainList
-public boolean addServerToDrainList(ServerName sn)
+public boolean addServerToDrainList(ServerName sn)
 Add the server to the drain list.
 
 Parameters:
@@ -1190,7 +1190,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 newRpcController
-private HBaseRpcController newRpcController()
+private HBaseRpcController newRpcController()
 
 
 
@@ -1199,7 +1199,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 sendRegionWarmup
-public void sendRegionWarmup(ServerName server,
+public void sendRegionWarmup(ServerName server,
  RegionInfo region)
 Sends a WARMUP RPC to the specified server to warmup the 
specified region.
  
@@ -1218,7 +1218,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 closeRegionSilentlyAndWait
-public static void closeRegionSilentlyAndWait(ClusterConnection connection,
+public static void closeRegionSilentlyAndWait(ClusterConnection connection,
   ServerName server,
   RegionInfo region,
   long timeout)
@@ -1239,7 +1239,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getRsAdmin
-public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface getRsAdmin(ServerName sn)
+public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface getRsAdmin(ServerName sn)

 throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 
 Parameters:
@@ -1258,7 +1258,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getMinToStart
-private int getMinToStart()
+private int getMinToStart()
 Calculate min necessary to start. This is not an absolute. 
It is just
  a friction that will cause us hang around a bit longer waiting on
  RegionServers to check-in.
@@ -1270,7 +1270,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 waitForRegionServers
-public void waitForRegionServers(MonitoredTask status)
+public void waitForRegionServers(MonitoredTask status)
   throws https://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true";
 title="class or interface in java.lang">InterruptedException
 Wait for the region servers to report in.
  We will wait until one of this condition is met:
@@ -1293,7 +1293,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getStrForMax
-private https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or i

[50/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

2018-06-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/apache_hbase_reference_guide.pdf
--
diff --git a/apache_hbase_reference_guide.pdf b/apache_hbase_reference_guide.pdf
index 9b527d9..488a1b7 100644
--- a/apache_hbase_reference_guide.pdf
+++ b/apache_hbase_reference_guide.pdf
@@ -5,8 +5,8 @@
 /Author (Apache HBase Team)
 /Creator (Asciidoctor PDF 1.5.0.alpha.15, based on Prawn 2.2.2)
 /Producer (Apache HBase Team)
-/ModDate (D:20180604142953+00'00')
-/CreationDate (D:20180604144614+00'00')
+/ModDate (D:20180605142953+00'00')
+/CreationDate (D:20180605144523+00'00')
 >>
 endobj
 2 0 obj
@@ -24,7 +24,7 @@ endobj
 3 0 obj
 << /Type /Pages
 /Count 727
-/Kids [7 0 R 12 0 R 14 0 R 16 0 R 18 0 R 20 0 R 22 0 R 24 0 R 44 0 R 47 0 R 50 
0 R 54 0 R 61 0 R 63 0 R 67 0 R 69 0 R 71 0 R 78 0 R 81 0 R 83 0 R 89 0 R 92 0 
R 94 0 R 96 0 R 103 0 R 110 0 R 115 0 R 117 0 R 133 0 R 138 0 R 146 0 R 155 0 R 
163 0 R 172 0 R 183 0 R 187 0 R 189 0 R 193 0 R 202 0 R 211 0 R 219 0 R 228 0 R 
233 0 R 242 0 R 250 0 R 259 0 R 272 0 R 279 0 R 289 0 R 297 0 R 305 0 R 312 0 R 
320 0 R 327 0 R 333 0 R 340 0 R 348 0 R 357 0 R 366 0 R 380 0 R 387 0 R 395 0 R 
402 0 R 410 0 R 419 0 R 429 0 R 437 0 R 444 0 R 453 0 R 465 0 R 475 0 R 482 0 R 
489 0 R 497 0 R 506 0 R 514 0 R 519 0 R 523 0 R 528 0 R 532 0 R 548 0 R 559 0 R 
563 0 R 578 0 R 583 0 R 588 0 R 590 0 R 592 0 R 595 0 R 597 0 R 599 0 R 607 0 R 
613 0 R 616 0 R 620 0 R 629 0 R 640 0 R 648 0 R 652 0 R 656 0 R 658 0 R 668 0 R 
683 0 R 690 0 R 701 0 R 711 0 R 722 0 R 734 0 R 754 0 R 763 0 R 767 0 R 773 0 R 
776 0 R 780 0 R 784 0 R 787 0 R 790 0 R 792 0 R 795 0 R 799 0 R 801 0 R 805 0 R 
811 0 R 816 0 R 820 0 R 823 0 R 829 0 R
  831 0 R 835 0 R 843 0 R 845 0 R 848 0 R 851 0 R 854 0 R 857 0 R 871 0 R 879 0 
R 890 0 R 901 0 R 907 0 R 917 0 R 928 0 R 931 0 R 935 0 R 938 0 R 943 0 R 952 0 
R 960 0 R 964 0 R 968 0 R 973 0 R 977 0 R 979 0 R 995 0 R 1006 0 R 1011 0 R 
1018 0 R 1021 0 R 1029 0 R 1037 0 R 1042 0 R 1047 0 R 1052 0 R 1054 0 R 1056 0 
R 1058 0 R 1068 0 R 1076 0 R 1080 0 R 1087 0 R 1094 0 R 1102 0 R 1106 0 R 1112 
0 R 1117 0 R 1125 0 R 1129 0 R 1134 0 R 1136 0 R 1142 0 R 1150 0 R 1156 0 R 
1163 0 R 1174 0 R 1178 0 R 1180 0 R 1182 0 R 1186 0 R 1189 0 R 1194 0 R 1197 0 
R 1209 0 R 1213 0 R 1219 0 R 1227 0 R 1232 0 R 1236 0 R 1240 0 R 1242 0 R 1245 
0 R 1248 0 R 1251 0 R 1255 0 R 1259 0 R 1263 0 R 1268 0 R 1272 0 R 1275 0 R 
1277 0 R 1287 0 R 1290 0 R 1298 0 R 1307 0 R 1313 0 R 1317 0 R 1319 0 R 1330 0 
R 1333 0 R 1339 0 R 1347 0 R 1350 0 R 1357 0 R 1365 0 R 1367 0 R 1369 0 R 1378 
0 R 1380 0 R 1382 0 R 1385 0 R 1387 0 R 1389 0 R 1391 0 R 1393 0 R 1396 0 R 
1400 0 R 1405 0 R 1407 0 R 1409 0 R 1411 0 R 1416 0 R 1423 0
  R 1429 0 R 1432 0 R 1434 0 R 1437 0 R 1441 0 R 1445 0 R 1448 0 R 1450 0 R 
1452 0 R 1455 0 R 1460 0 R 1466 0 R 1474 0 R 1488 0 R 1502 0 R 1505 0 R 1510 0 
R 1523 0 R 1528 0 R 1543 0 R 1551 0 R 1555 0 R 1564 0 R 1579 0 R 1593 0 R 1601 
0 R 1606 0 R 1617 0 R 1622 0 R 1628 0 R 1634 0 R 1646 0 R 1649 0 R 1658 0 R 
1661 0 R 1670 0 R 1676 0 R 1680 0 R 1692 0 R 1697 0 R 1703 0 R 1705 0 R 1712 0 
R 1720 0 R 1728 0 R 1732 0 R 1734 0 R 1736 0 R 1748 0 R 1754 0 R 1763 0 R 1769 
0 R 1782 0 R 1788 0 R 1794 0 R 1805 0 R 1811 0 R 1816 0 R 1821 0 R 1824 0 R 
1827 0 R 1832 0 R 1837 0 R 1844 0 R 1848 0 R 1853 0 R 1862 0 R 1867 0 R 1872 0 
R 1874 0 R 1883 0 R 1890 0 R 1896 0 R 1901 0 R 1905 0 R 1909 0 R 1914 0 R 1919 
0 R 1925 0 R 1927 0 R 1929 0 R 1932 0 R 1943 0 R 1946 0 R 1953 0 R 1961 0 R 
1966 0 R 1970 0 R 1975 0 R 1977 0 R 1980 0 R 1985 0 R 1988 0 R 1990 0 R 1993 0 
R 1996 0 R 1999 0 R 2009 0 R 2014 0 R 2019 0 R 2021 0 R 2029 0 R 2036 0 R 2043 
0 R 2049 0 R 2054 0 R 2056 0 R 2065 0 R 2075 0 R 2085 0 R 2091
  0 R 2098 0 R 2100 0 R 2105 0 R 2107 0 R 2109 0 R 2113 0 R 2116 0 R 2119 0 R 
2124 0 R 2128 0 R 2139 0 R 2142 0 R 2147 0 R 2150 0 R 2152 0 R 2157 0 R 2167 0 
R 2169 0 R 2171 0 R 2173 0 R 2175 0 R 2178 0 R 2180 0 R 2182 0 R 2185 0 R 2187 
0 R 2189 0 R 2193 0 R 2198 0 R 2207 0 R 2209 0 R 2211 0 R 2217 0 R 2219 0 R 
2224 0 R 2226 0 R 2228 0 R 2235 0 R 2240 0 R 2244 0 R 2249 0 R 2253 0 R 2255 0 
R 2257 0 R 2261 0 R 2264 0 R 2266 0 R 2268 0 R 2272 0 R 2274 0 R 2277 0 R 2279 
0 R 2281 0 R 2283 0 R 2290 0 R 2293 0 R 2298 0 R 2300 0 R 2302 0 R 2304 0 R 
2306 0 R 2314 0 R 2325 0 R 2339 0 R 2350 0 R 2354 0 R 2359 0 R 2363 0 R 2366 0 
R 2371 0 R 2377 0 R 2379 0 R 2382 0 R 2384 0 R 2386 0 R 2388 0 R 2393 0 R 2395 
0 R 2408 0 R 2411 0 R 2419 0 R 2425 0 R 2437 0 R 2451 0 R 2464 0 R 2483 0 R 
2485 0 R 2487 0 R 2491 0 R 2509 0 R 2515 0 R 2527 0 R 2531 0 R 2535 0 R 2544 0 
R 2556 0 R 2561 0 R 2571 0 R 2584 0 R 2603 0 R 2612 0 R 2615 0 R 2624 0 R 2641 
0 R 2648 0 R 2651 0 R 2656 0 R 2660 0 R 2663 0 R 2672 0 R 26
 81 0 R 2684 0 R 2686 0 R 2690 0 R 2704 0 R 2713 0 R 2718 0 R 2722 0 R 2725 0 R 
2727 0 R 2729 0 R 2731 0 R 2736 0 R 2749 0 R 2759 0 R 2767 0 R 2773 0 R 2778 0 
R 2789 0 R 2796 0 R 2802 0 R 2804 0 R 2813 0 R 2821 0 R 28

[39/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

2018-06-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.html 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.html
index 87df2e7..55e47d6 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":9,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":9,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -157,11 +157,15 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 static class 
-RegionStates.ServerState 
+RegionStates.ServerState
+Server State.
+
 
 
 static class 
-RegionStates.ServerStateNode 
+RegionStates.ServerStateNode
+State of Server; list of hosted regions, etc.
+
 
 
 
@@ -307,7 +311,9 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 RegionStates.ServerStateNode
-getOrCreateServer(ServerName serverName) 
+getOrCreateServer(ServerName serverName)
+Be judicious calling this method.
+
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">Map
@@ -452,35 +458,43 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 void
-logSplit(ServerName serverName) 
+logSplit(ServerName serverName)
+Called after we've split all logs on a crashed Server.
+
 
 
+void
+logSplitting(ServerName serverName)
+Call this when we start log splitting a crashed 
Server.
+
+
+
 static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 regionNamesToString(https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true";
 title="class or interface in 
java.util">Collection regions) 
 
-
+
 void
 removeFromFailedOpen(RegionInfo regionInfo) 
 
-
+
 void
 removeFromOfflineRegions(RegionInfo regionInfo) 
 
-
+
 RegionStates.ServerStateNode
 removeRegionFromServer(ServerName serverName,
   RegionStates.RegionStateNode regionNode) 
 
-
+
 protected void
 removeRegionInTransition(RegionStates.RegionStateNode regionNode,
 RegionTransitionProcedure procedure) 
 
-
+
 void
 removeServer(ServerName serverName) 
 
-
+
 void
 updateRegionState(RegionInfo regionInfo,
  RegionState.State state) 
@@ -540,7 +554,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 REGION_STATE_STAMP_COMPARATOR
-public static final RegionStates.RegionStateStampComparator
 REGION_STATE_STAMP_COMPARATOR
+public static final RegionStates.RegionStateStampComparator
 REGION_STATE_STAMP_COMPARATOR
 
 
 
@@ -549,7 +563,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 regionsMap
-private final https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentSkipListMap.html?is-external=true";
 title="class or interface in 
java.util.concurrent">ConcurrentSkipListMap 
regionsMap
+private final https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentSkipListMap.html?is-external=true";
 title="class or interface in 
java.util.concurrent">ConcurrentSkipListMap 
regionsMap
 RegionName -- i.e. RegionInfo.getRegionName() -- as bytes 
to RegionStates.RegionStateNode
 
 
@@ -559,7 +573,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 regionInTransition
-private final https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentSkipListMap.html?is-external=true";
 t

[25/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

2018-06-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterServices.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterServices.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterServices.html
index d1c6123..23d8d73 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterServices.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterServices.html
@@ -360,161 +360,162 @@
 352
 353  /**
 354   * @return true if master is in 
maintanceMode
-355   */
-356  boolean isInMaintenanceMode();
-357
-358  /**
-359   * Abort a procedure.
-360   * @param procId ID of the procedure
-361   * @param mayInterruptIfRunning if the 
proc completed at least one step, should it be aborted?
-362   * @return true if aborted, false if 
procedure already completed or does not exist
-363   * @throws IOException
-364   */
-365  public boolean abortProcedure(final 
long procId, final boolean mayInterruptIfRunning)
-366  throws IOException;
-367
-368  /**
-369   * Get procedures
-370   * @return procedure list
-371   * @throws IOException
-372   */
-373  public List> 
getProcedures() throws IOException;
-374
-375  /**
-376   * Get locks
-377   * @return lock list
-378   * @throws IOException
-379   */
-380  public List 
getLocks() throws IOException;
-381
-382  /**
-383   * Get list of table descriptors by 
namespace
-384   * @param name namespace name
-385   * @return descriptors
-386   * @throws IOException
-387   */
-388  public List 
listTableDescriptorsByNamespace(String name) throws IOException;
-389
-390  /**
-391   * Get list of table names by 
namespace
-392   * @param name namespace name
-393   * @return table names
-394   * @throws IOException
-395   */
-396  public List 
listTableNamesByNamespace(String name) throws IOException;
-397
-398  /**
-399   * @param table the table for which 
last successful major compaction time is queried
-400   * @return the timestamp of the last 
successful major compaction for the passed table,
-401   * or 0 if no HFile resulting from a 
major compaction exists
-402   * @throws IOException
-403   */
-404  public long 
getLastMajorCompactionTimestamp(TableName table) throws IOException;
-405
-406  /**
-407   * @param regionName
-408   * @return the timestamp of the last 
successful major compaction for the passed region
-409   * or 0 if no HFile resulting from a 
major compaction exists
-410   * @throws IOException
-411   */
-412  public long 
getLastMajorCompactionTimestampForRegion(byte[] regionName) throws 
IOException;
-413
-414  /**
-415   * @return load balancer
-416   */
-417  public LoadBalancer 
getLoadBalancer();
-418
-419  boolean 
isSplitOrMergeEnabled(MasterSwitchType switchType);
-420
-421  /**
-422   * @return Favored Nodes Manager
-423   */
-424  public FavoredNodesManager 
getFavoredNodesManager();
-425
-426  /**
-427   * Add a new replication peer for 
replicating data to slave cluster
-428   * @param peerId a short name that 
identifies the peer
-429   * @param peerConfig configuration for 
the replication slave cluster
-430   * @param enabled peer state, true if 
ENABLED and false if DISABLED
-431   */
-432  long addReplicationPeer(String peerId, 
ReplicationPeerConfig peerConfig, boolean enabled)
-433  throws ReplicationException, 
IOException;
-434
-435  /**
-436   * Removes a peer and stops the 
replication
-437   * @param peerId a short name that 
identifies the peer
-438   */
-439  long removeReplicationPeer(String 
peerId) throws ReplicationException, IOException;
-440
-441  /**
-442   * Restart the replication stream to 
the specified peer
-443   * @param peerId a short name that 
identifies the peer
-444   */
-445  long enableReplicationPeer(String 
peerId) throws ReplicationException, IOException;
-446
-447  /**
-448   * Stop the replication stream to the 
specified peer
-449   * @param peerId a short name that 
identifies the peer
-450   */
-451  long disableReplicationPeer(String 
peerId) throws ReplicationException, IOException;
-452
-453  /**
-454   * Returns the configured 
ReplicationPeerConfig for the specified peer
-455   * @param peerId a short name that 
identifies the peer
-456   * @return ReplicationPeerConfig for 
the peer
-457   */
-458  ReplicationPeerConfig 
getReplicationPeerConfig(String peerId) throws ReplicationException,
-459  IOException;
-460
-461  /**
-462   * Returns the {@link 
ReplicationPeerManager}.
-463   */
-464  ReplicationPeerManager 
getReplicationPeerManager();
-465
-466  /**
-467   * Update the peerConfig for the 
specified peer
-468   * @param peerId a short name that 
identifies the peer
-469   * @param peerConfig new config for the 
peer
-470   */
-471  long updateReplicationPeerConfig(String 
peerId, ReplicationPeerConfig peerConfig)
-472  throws ReplicationE

[31/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

2018-06-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
index aa48364..9549aa5 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
@@ -2830,843 +2830,858 @@
 2822   * @return true if master is in 
maintenanceMode
 2823   */
 2824  @Override
-2825  public boolean isInMaintenanceMode() 
{
-2826return 
maintenanceModeTracker.isInMaintenanceMode();
-2827  }
-2828
-2829  @VisibleForTesting
-2830  public void setInitialized(boolean 
isInitialized) {
-2831
procedureExecutor.getEnvironment().setEventReady(initialized, isInitialized);
-2832  }
-2833
-2834  @Override
-2835  public ProcedureEvent 
getInitializedEvent() {
-2836return initialized;
-2837  }
-2838
-2839  /**
-2840   * ServerCrashProcessingEnabled is set 
false before completing assignMeta to prevent processing
-2841   * of crashed servers.
-2842   * @return true if assignMeta has 
completed;
-2843   */
-2844  @Override
-2845  public boolean 
isServerCrashProcessingEnabled() {
-2846return 
serverCrashProcessingEnabled.isReady();
-2847  }
-2848
-2849  @VisibleForTesting
-2850  public void 
setServerCrashProcessingEnabled(final boolean b) {
-2851
procedureExecutor.getEnvironment().setEventReady(serverCrashProcessingEnabled, 
b);
-2852  }
-2853
-2854  public ProcedureEvent 
getServerCrashProcessingEnabledEvent() {
-2855return 
serverCrashProcessingEnabled;
-2856  }
-2857
-2858  /**
-2859   * Compute the average load across all 
region servers.
-2860   * Currently, this uses a very naive 
computation - just uses the number of
-2861   * regions being served, ignoring 
stats about number of requests.
-2862   * @return the average load
-2863   */
-2864  public double getAverageLoad() {
-2865if (this.assignmentManager == null) 
{
-2866  return 0;
-2867}
-2868
-2869RegionStates regionStates = 
this.assignmentManager.getRegionStates();
-2870if (regionStates == null) {
-2871  return 0;
-2872}
-2873return 
regionStates.getAverageLoad();
-2874  }
-2875
-2876  /*
-2877   * @return the count of region split 
plans executed
-2878   */
-2879  public long getSplitPlanCount() {
-2880return splitPlanCount;
-2881  }
-2882
-2883  /*
-2884   * @return the count of region merge 
plans executed
-2885   */
-2886  public long getMergePlanCount() {
-2887return mergePlanCount;
-2888  }
-2889
-2890  @Override
-2891  public boolean registerService(Service 
instance) {
-2892/*
-2893 * No stacking of instances is 
allowed for a single service name
-2894 */
-2895Descriptors.ServiceDescriptor 
serviceDesc = instance.getDescriptorForType();
-2896String serviceName = 
CoprocessorRpcUtils.getServiceName(serviceDesc);
-2897if 
(coprocessorServiceHandlers.containsKey(serviceName)) {
-2898  LOG.error("Coprocessor service 
"+serviceName+
-2899  " already registered, 
rejecting request from "+instance
-2900  );
-2901  return false;
-2902}
-2903
-2904
coprocessorServiceHandlers.put(serviceName, instance);
-2905if (LOG.isDebugEnabled()) {
-2906  LOG.debug("Registered master 
coprocessor service: service="+serviceName);
-2907}
-2908return true;
-2909  }
-2910
-2911  /**
-2912   * Utility for constructing an 
instance of the passed HMaster class.
-2913   * @param masterClass
-2914   * @return HMaster instance.
-2915   */
-2916  public static HMaster 
constructMaster(Class masterClass,
-2917  final Configuration conf)  {
-2918try {
-2919  Constructor c = masterClass.getConstructor(Configuration.class);
-2920  return c.newInstance(conf);
-2921} catch(Exception e) {
-2922  Throwable error = e;
-2923  if (e instanceof 
InvocationTargetException &&
-2924  
((InvocationTargetException)e).getTargetException() != null) {
-2925error = 
((InvocationTargetException)e).getTargetException();
-2926  }
-2927  throw new RuntimeException("Failed 
construction of Master: " + masterClass.toString() + ". "
-2928, error);
-2929}
-2930  }
-2931
-2932  /**
-2933   * @see 
org.apache.hadoop.hbase.master.HMasterCommandLine
-2934   */
-2935  public static void main(String [] 
args) {
-2936LOG.info("STARTING service " + 
HMaster.class.getSimpleName());
-2937VersionInfo.logVersion();
-2938new 
HMasterCommandLine(HMaster.class).doMain(args);
-2939  }
-2940
-2941  public HFileCleaner getHFileCleaner() 
{
-2942return this.hfileCleaner;
-2943  }
-2944
-2945  public LogCleaner getLogCleaner() {

[04/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

2018-06-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.RemoteProcedure.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.RemoteProcedure.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.RemoteProcedure.html
index 594ef24..17d5c40 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.RemoteProcedure.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.RemoteProcedure.html
@@ -170,241 +170,242 @@
 162  }
 163
 164  /**
-165   * Add a remote rpc. Be sure to check 
result for successful add.
+165   * Add a remote rpc.
 166   * @param key the node identifier
-167   * @return True if we successfully 
added the operation.
-168   */
-169  public boolean addOperationToNode(final 
TRemote key, RemoteProcedure rp) {
+167   */
+168  public void addOperationToNode(final 
TRemote key, RemoteProcedure rp)
+169  throws 
NullTargetServerDispatchException, NoServerDispatchException, 
NoNodeDispatchException {
 170if (key == null) {
-171  // Key is remote server name. Be 
careful. It could have been nulled by a concurrent
-172  // ServerCrashProcedure shutting 
down outstanding RPC requests. See remoteCallFailed.
-173  return false;
-174}
-175assert key != null : "found null key 
for node";
-176BufferNode node = nodeMap.get(key);
-177if (node == null) {
-178  return false;
-179}
-180node.add(rp);
-181// Check our node still in the map; 
could have been removed by #removeNode.
-182return nodeMap.containsValue(node);
-183  }
-184
-185  /**
-186   * Remove a remote node
-187   * @param key the node identifier
-188   */
-189  public boolean removeNode(final TRemote 
key) {
-190final BufferNode node = 
nodeMap.remove(key);
-191if (node == null) return false;
-192node.abortOperationsInQueue();
-193return true;
-194  }
-195
-196  // 

-197  //  Task Helpers
-198  // 

-199  protected Future 
submitTask(Callable task) {
-200return threadPool.submit(task);
-201  }
-202
-203  protected Future 
submitTask(Callable task, long delay, TimeUnit unit) {
-204final FutureTask 
futureTask = new FutureTask(task);
-205timeoutExecutor.add(new 
DelayedTask(futureTask, delay, unit));
-206return futureTask;
-207  }
-208
-209  protected abstract void 
remoteDispatch(TRemote key, Set operations);
-210  protected abstract void 
abortPendingOperations(TRemote key, Set operations);
-211
-212  /**
-213   * Data structure with reference to 
remote operation.
-214   */
-215  public static abstract class 
RemoteOperation {
-216private final RemoteProcedure 
remoteProcedure;
-217
-218protected RemoteOperation(final 
RemoteProcedure remoteProcedure) {
-219  this.remoteProcedure = 
remoteProcedure;
-220}
-221
-222public RemoteProcedure 
getRemoteProcedure() {
-223  return remoteProcedure;
-224}
-225  }
-226
-227  /**
-228   * Remote procedure reference.
-229   */
-230  public interface 
RemoteProcedure {
-231/**
-232 * For building the remote 
operation.
-233 */
-234RemoteOperation remoteCallBuild(TEnv 
env, TRemote remote);
-235
-236/**
-237 * Called when the executeProcedure 
call is failed.
-238 */
-239void remoteCallFailed(TEnv env, 
TRemote remote, IOException exception);
-240
-241/**
-242 * Called when RS tells the remote 
procedure is succeeded through the
-243 * {@code reportProcedureDone} 
method.
-244 */
-245void remoteOperationCompleted(TEnv 
env);
-246
-247/**
-248 * Called when RS tells the remote 
procedure is failed through the {@code reportProcedureDone}
-249 * method.
-250 */
-251void remoteOperationFailed(TEnv env, 
RemoteProcedureException error);
-252  }
-253
-254  /**
-255   * Account of what procedures are 
running on remote node.
-256   * @param 
-257   * @param 
-258   */
-259  public interface RemoteNode {
-260TRemote getKey();
-261void add(RemoteProcedure operation);
-262void dispatch();
-263  }
-264
-265  protected 
ArrayListMultimap, RemoteOperation> 
buildAndGroupRequestByType(final TEnv env,
-266  final TRemote remote, final 
Set remoteProcedures) {
-267final 
ArrayListMultimap, RemoteOperation> requestByType = 
ArrayListMultimap.create();
-268for (RemoteProcedure proc: 
remoteProcedures) {
-269  RemoteOperation operation = 
proc.remoteCallBuild(env, remote);

[30/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

2018-06-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
index aa48364..9549aa5 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
@@ -2830,843 +2830,858 @@
 2822   * @return true if master is in 
maintenanceMode
 2823   */
 2824  @Override
-2825  public boolean isInMaintenanceMode() 
{
-2826return 
maintenanceModeTracker.isInMaintenanceMode();
-2827  }
-2828
-2829  @VisibleForTesting
-2830  public void setInitialized(boolean 
isInitialized) {
-2831
procedureExecutor.getEnvironment().setEventReady(initialized, isInitialized);
-2832  }
-2833
-2834  @Override
-2835  public ProcedureEvent 
getInitializedEvent() {
-2836return initialized;
-2837  }
-2838
-2839  /**
-2840   * ServerCrashProcessingEnabled is set 
false before completing assignMeta to prevent processing
-2841   * of crashed servers.
-2842   * @return true if assignMeta has 
completed;
-2843   */
-2844  @Override
-2845  public boolean 
isServerCrashProcessingEnabled() {
-2846return 
serverCrashProcessingEnabled.isReady();
-2847  }
-2848
-2849  @VisibleForTesting
-2850  public void 
setServerCrashProcessingEnabled(final boolean b) {
-2851
procedureExecutor.getEnvironment().setEventReady(serverCrashProcessingEnabled, 
b);
-2852  }
-2853
-2854  public ProcedureEvent 
getServerCrashProcessingEnabledEvent() {
-2855return 
serverCrashProcessingEnabled;
-2856  }
-2857
-2858  /**
-2859   * Compute the average load across all 
region servers.
-2860   * Currently, this uses a very naive 
computation - just uses the number of
-2861   * regions being served, ignoring 
stats about number of requests.
-2862   * @return the average load
-2863   */
-2864  public double getAverageLoad() {
-2865if (this.assignmentManager == null) 
{
-2866  return 0;
-2867}
-2868
-2869RegionStates regionStates = 
this.assignmentManager.getRegionStates();
-2870if (regionStates == null) {
-2871  return 0;
-2872}
-2873return 
regionStates.getAverageLoad();
-2874  }
-2875
-2876  /*
-2877   * @return the count of region split 
plans executed
-2878   */
-2879  public long getSplitPlanCount() {
-2880return splitPlanCount;
-2881  }
-2882
-2883  /*
-2884   * @return the count of region merge 
plans executed
-2885   */
-2886  public long getMergePlanCount() {
-2887return mergePlanCount;
-2888  }
-2889
-2890  @Override
-2891  public boolean registerService(Service 
instance) {
-2892/*
-2893 * No stacking of instances is 
allowed for a single service name
-2894 */
-2895Descriptors.ServiceDescriptor 
serviceDesc = instance.getDescriptorForType();
-2896String serviceName = 
CoprocessorRpcUtils.getServiceName(serviceDesc);
-2897if 
(coprocessorServiceHandlers.containsKey(serviceName)) {
-2898  LOG.error("Coprocessor service 
"+serviceName+
-2899  " already registered, 
rejecting request from "+instance
-2900  );
-2901  return false;
-2902}
-2903
-2904
coprocessorServiceHandlers.put(serviceName, instance);
-2905if (LOG.isDebugEnabled()) {
-2906  LOG.debug("Registered master 
coprocessor service: service="+serviceName);
-2907}
-2908return true;
-2909  }
-2910
-2911  /**
-2912   * Utility for constructing an 
instance of the passed HMaster class.
-2913   * @param masterClass
-2914   * @return HMaster instance.
-2915   */
-2916  public static HMaster 
constructMaster(Class masterClass,
-2917  final Configuration conf)  {
-2918try {
-2919  Constructor c = masterClass.getConstructor(Configuration.class);
-2920  return c.newInstance(conf);
-2921} catch(Exception e) {
-2922  Throwable error = e;
-2923  if (e instanceof 
InvocationTargetException &&
-2924  
((InvocationTargetException)e).getTargetException() != null) {
-2925error = 
((InvocationTargetException)e).getTargetException();
-2926  }
-2927  throw new RuntimeException("Failed 
construction of Master: " + masterClass.toString() + ". "
-2928, error);
-2929}
-2930  }
-2931
-2932  /**
-2933   * @see 
org.apache.hadoop.hbase.master.HMasterCommandLine
-2934   */
-2935  public static void main(String [] 
args) {
-2936LOG.info("STARTING service " + 
HMaster.class.getSimpleName());
-2937VersionInfo.logVersion();
-2938new 
HMasterCommandLine(HMaster.class).doMain(args);
-2939  }
-2940
-2941  public HFileCleaner getHFileCleaner() 
{
-2942return this.hfileCleaner;
-2943  }
-2944
-2945  public LogCleaner getLogCleaner

[23/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

2018-06-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
index fe1e077..90c31f4 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
@@ -1072,894 +1072,913 @@
 1064
 1065  protected boolean 
waitServerReportEvent(final ServerName serverName, final Procedure proc) {
 1066final ServerStateNode serverNode = 
regionStates.getOrCreateServer(serverName);
-1067return 
serverNode.getReportEvent().suspendIfNotReady(proc);
-1068  }
-1069
-1070  protected void 
wakeServerReportEvent(final ServerStateNode serverNode) {
-1071
serverNode.getReportEvent().wake(getProcedureScheduler());
-1072  }
-1073
-1074  // 

-1075  //  RIT chore
-1076  // 

-1077  private static class 
RegionInTransitionChore extends 
ProcedureInMemoryChore {
-1078public RegionInTransitionChore(final 
int timeoutMsec) {
-1079  super(timeoutMsec);
-1080}
-1081
-1082@Override
-1083protected void periodicExecute(final 
MasterProcedureEnv env) {
-1084  final AssignmentManager am = 
env.getAssignmentManager();
-1085
-1086  final RegionInTransitionStat 
ritStat = am.computeRegionInTransitionStat();
-1087  if 
(ritStat.hasRegionsOverThreshold()) {
-1088for (RegionState hri: 
ritStat.getRegionOverThreshold()) {
-1089  
am.handleRegionOverStuckWarningThreshold(hri.getRegion());
-1090}
-1091  }
-1092
-1093  // update metrics
-1094  
am.updateRegionsInTransitionMetrics(ritStat);
-1095}
-1096  }
-1097
-1098  public RegionInTransitionStat 
computeRegionInTransitionStat() {
-1099final RegionInTransitionStat rit = 
new RegionInTransitionStat(getConfiguration());
-1100rit.update(this);
-1101return rit;
-1102  }
-1103
-1104  public static class 
RegionInTransitionStat {
-1105private final int ritThreshold;
+1067if (serverNode == null) {
+1068  LOG.warn("serverName=null; {}", 
proc);
+1069}
+1070return 
serverNode.getReportEvent().suspendIfNotReady(proc);
+1071  }
+1072
+1073  protected void 
wakeServerReportEvent(final ServerStateNode serverNode) {
+1074
serverNode.getReportEvent().wake(getProcedureScheduler());
+1075  }
+1076
+1077  // 

+1078  //  RIT chore
+1079  // 

+1080  private static class 
RegionInTransitionChore extends 
ProcedureInMemoryChore {
+1081public RegionInTransitionChore(final 
int timeoutMsec) {
+1082  super(timeoutMsec);
+1083}
+1084
+1085@Override
+1086protected void periodicExecute(final 
MasterProcedureEnv env) {
+1087  final AssignmentManager am = 
env.getAssignmentManager();
+1088
+1089  final RegionInTransitionStat 
ritStat = am.computeRegionInTransitionStat();
+1090  if 
(ritStat.hasRegionsOverThreshold()) {
+1091for (RegionState hri: 
ritStat.getRegionOverThreshold()) {
+1092  
am.handleRegionOverStuckWarningThreshold(hri.getRegion());
+1093}
+1094  }
+1095
+1096  // update metrics
+1097  
am.updateRegionsInTransitionMetrics(ritStat);
+1098}
+1099  }
+1100
+1101  public RegionInTransitionStat 
computeRegionInTransitionStat() {
+1102final RegionInTransitionStat rit = 
new RegionInTransitionStat(getConfiguration());
+1103rit.update(this);
+1104return rit;
+1105  }
 1106
-1107private HashMap ritsOverThreshold = null;
-1108private long statTimestamp;
-1109private long oldestRITTime = 0;
-1110private int totalRITsTwiceThreshold 
= 0;
-private int totalRITs = 0;
-1112
-1113@VisibleForTesting
-1114public RegionInTransitionStat(final 
Configuration conf) {
-1115  this.ritThreshold =
-1116
conf.getInt(METRICS_RIT_STUCK_WARNING_THRESHOLD, 
DEFAULT_RIT_STUCK_WARNING_THRESHOLD);
-1117}
-1118
-1119public int getRITThreshold() {
-1120  return ritThreshold;
-1121}
-1122
-1123public long getTimestamp() {
-1124  return statTimestamp;
-1125}
-1126
-1127public int getTotalRITs() {
-1128  return totalRITs;
-1129}
-1130
-1131public long getOldestRITTim

[12/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

2018-06-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.html
index 5420d82..6ea3672 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.html
@@ -316,687 +316,728 @@
 308}
 309  }
 310
-311  public enum ServerState { ONLINE, 
SPLITTING, OFFLINE }
-312  public static class ServerStateNode 
implements Comparable {
-313private final ServerReportEvent 
reportEvent;
-314
-315private final 
Set regions;
-316private final ServerName 
serverName;
-317
-318private volatile ServerState state = 
ServerState.ONLINE;
-319private volatile int versionNumber = 
0;
-320
-321public ServerStateNode(final 
ServerName serverName) {
-322  this.serverName = serverName;
-323  this.regions = 
ConcurrentHashMap.newKeySet();
-324  this.reportEvent = new 
ServerReportEvent(serverName);
-325}
-326
-327public ServerName getServerName() {
-328  return serverName;
-329}
+311  /**
+312   * Server State.
+313   */
+314  public enum ServerState {
+315/**
+316 * Initial state. Available.
+317 */
+318ONLINE,
+319
+320/**
+321 * Server expired/crashed. Currently 
undergoing WAL splitting.
+322 */
+323SPLITTING,
+324
+325/**
+326 * WAL splitting done.
+327 */
+328OFFLINE
+329  }
 330
-331public ServerState getState() {
-332  return state;
-333}
-334
-335public int getVersionNumber() {
-336  return versionNumber;
-337}
-338
-339public ProcedureEvent 
getReportEvent() {
-340  return reportEvent;
-341}
+331  /**
+332   * State of Server; list of hosted 
regions, etc.
+333   */
+334  public static class ServerStateNode 
implements Comparable {
+335private final ServerReportEvent 
reportEvent;
+336
+337private final 
Set regions;
+338private final ServerName 
serverName;
+339
+340private volatile ServerState state = 
ServerState.ONLINE;
+341private volatile int versionNumber = 
0;
 342
-343public boolean isInState(final 
ServerState... expected) {
-344  boolean expectedState = false;
-345  if (expected != null) {
-346for (int i = 0; i < 
expected.length; ++i) {
-347  expectedState |= (state == 
expected[i]);
-348}
-349  }
-350  return expectedState;
+343public ServerStateNode(final 
ServerName serverName) {
+344  this.serverName = serverName;
+345  this.regions = 
ConcurrentHashMap.newKeySet();
+346  this.reportEvent = new 
ServerReportEvent(serverName);
+347}
+348
+349public ServerName getServerName() {
+350  return serverName;
 351}
 352
-353public void setState(final 
ServerState state) {
-354  this.state = state;
+353public ServerState getState() {
+354  return state;
 355}
 356
-357public void setVersionNumber(final 
int versionNumber) {
-358  this.versionNumber = 
versionNumber;
+357public int getVersionNumber() {
+358  return versionNumber;
 359}
 360
-361public Set 
getRegions() {
-362  return regions;
+361public ProcedureEvent 
getReportEvent() {
+362  return reportEvent;
 363}
 364
-365public int getRegionCount() {
-366  return regions.size();
+365public boolean isOffline() {
+366  return 
this.state.equals(ServerState.OFFLINE);
 367}
 368
-369public ArrayList 
getRegionInfoList() {
-370  ArrayList hris = 
new ArrayList(regions.size());
-371  for (RegionStateNode region: 
regions) {
-372
hris.add(region.getRegionInfo());
-373  }
-374  return hris;
-375}
-376
-377public void addRegion(final 
RegionStateNode regionNode) {
-378  this.regions.add(regionNode);
-379}
-380
-381public void removeRegion(final 
RegionStateNode regionNode) {
-382  this.regions.remove(regionNode);
-383}
-384
-385@Override
-386public int compareTo(final 
ServerStateNode other) {
-387  return 
getServerName().compareTo(other.getServerName());
-388}
-389
-390@Override
-391public int hashCode() {
-392  return 
getServerName().hashCode();
+369public boolean isInState(final 
ServerState... expected) {
+370  boolean expectedState = false;
+371  if (expected != null) {
+372for (int i = 0; i < 
expected.length; ++i) {
+373  expectedState |= (state == 
expected[i]);
+374}
+375  }
+376  return expectedState;
+377}
+378
+379public void setState(final 
ServerState st

[10/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

2018-06-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.html
index ca94fbc..269fcc2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.html
@@ -207,114 +207,154 @@
 199  return false;
 200}
 201
-202// Mark the region as CLOSING.
-203
env.getAssignmentManager().markRegionAsClosing(regionNode);
-204
-205// Add the close region operation the 
the server dispatch queue.
-206if (!addToRemoteDispatcher(env, 
regionNode.getRegionLocation())) {
-207  // If addToRemoteDispatcher fails, 
it calls the callback #remoteCallFailed.
-208}
-209
-210// Return true to keep the procedure 
running.
-211return true;
-212  }
-213
-214  @Override
-215  protected void finishTransition(final 
MasterProcedureEnv env, final RegionStateNode regionNode)
-216  throws IOException {
-217AssignmentManager am = 
env.getAssignmentManager();
-218RegionInfo regionInfo = 
getRegionInfo();
-219
-220if (!removeAfterUnassigning) {
-221  
am.markRegionAsClosed(regionNode);
-222} else {
-223  // Remove from in-memory states
-224  
am.getRegionStates().deleteRegion(regionInfo);
-225  
env.getMasterServices().getServerManager().removeRegion(regionInfo);
-226  FavoredNodesManager fnm = 
env.getMasterServices().getFavoredNodesManager();
-227  if (fnm != null) {
-228
fnm.deleteFavoredNodesForRegions(Lists.newArrayList(regionInfo));
-229  }
-230}
-231  }
-232
-233  @Override
-234  public RemoteOperation 
remoteCallBuild(final MasterProcedureEnv env, final ServerName serverName) {
-235assert 
serverName.equals(getRegionState(env).getRegionLocation());
-236return new RegionCloseOperation(this, 
getRegionInfo(), this.destinationServer);
-237  }
-238
-239  @Override
-240  protected void reportTransition(final 
MasterProcedureEnv env, final RegionStateNode regionNode,
-241  final TransitionCode code, final 
long seqId) throws UnexpectedStateException {
-242switch (code) {
-243  case CLOSED:
-244
setTransitionState(RegionTransitionState.REGION_TRANSITION_FINISH);
-245break;
-246  default:
-247throw new 
UnexpectedStateException(String.format(
-248  "Received report unexpected 
transition state=%s for region=%s server=%s, expected CLOSED.",
-249  code, 
regionNode.getRegionInfo(), regionNode.getRegionLocation()));
-250}
-251  }
-252
-253  @Override
-254  protected boolean 
remoteCallFailed(final MasterProcedureEnv env, final RegionStateNode 
regionNode,
-255  final IOException exception) {
-256// TODO: Is there on-going rpc to 
cleanup?
-257if (exception instanceof 
ServerCrashException) {
-258  // This exception comes from 
ServerCrashProcedure AFTER log splitting.
-259  // SCP found this region as a RIT. 
Its call into here says it is ok to let this procedure go
-260  // complete. This complete will 
release lock on this region so subsequent action on region
-261  // can succeed; e.g. the assign 
that follows this unassign when a move (w/o wait on SCP
-262  // the assign could run w/o logs 
being split so data loss).
-263  try {
-264reportTransition(env, regionNode, 
TransitionCode.CLOSED, HConstants.NO_SEQNUM);
-265  } catch (UnexpectedStateException 
e) {
-266// Should never happen.
-267throw new RuntimeException(e);
-268  }
-269} else if (exception instanceof 
RegionServerAbortedException ||
-270exception instanceof 
RegionServerStoppedException ||
-271exception instanceof 
ServerNotRunningYetException) {
-272  // RS is aborting, we cannot 
offline the region since the region may need to do WAL
-273  // recovery. Until we see the RS 
expiration, we should retry.
-274  // TODO: This should be suspend 
like the below where we call expire on server?
-275  LOG.info("Ignoring; waiting on 
ServerCrashProcedure", exception);
-276} else if (exception instanceof 
NotServingRegionException) {
-277  LOG.info("IS THIS OK? ANY LOGS TO 
REPLAY; ACTING AS THOUGH ALL GOOD " + regionNode,
-278exception);
-279  
setTransitionState(RegionTransitionState.REGION_TRANSITION_FINISH);
-280} else {
-281  LOG.warn("Expiring server " + this 
+ "; " + regionNode.toShortString() +
-282", exception=" + exception);
-283  
env.getMasterServices().getServerManager().expireServer(regionNode.getRegionLocation());
-284  // Return false so this procedure 
stays in suspended state. It will be woken up by the
-

[17/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

2018-06-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateNode.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateNode.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateNode.html
index 5420d82..6ea3672 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateNode.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateNode.html
@@ -316,687 +316,728 @@
 308}
 309  }
 310
-311  public enum ServerState { ONLINE, 
SPLITTING, OFFLINE }
-312  public static class ServerStateNode 
implements Comparable {
-313private final ServerReportEvent 
reportEvent;
-314
-315private final 
Set regions;
-316private final ServerName 
serverName;
-317
-318private volatile ServerState state = 
ServerState.ONLINE;
-319private volatile int versionNumber = 
0;
-320
-321public ServerStateNode(final 
ServerName serverName) {
-322  this.serverName = serverName;
-323  this.regions = 
ConcurrentHashMap.newKeySet();
-324  this.reportEvent = new 
ServerReportEvent(serverName);
-325}
-326
-327public ServerName getServerName() {
-328  return serverName;
-329}
+311  /**
+312   * Server State.
+313   */
+314  public enum ServerState {
+315/**
+316 * Initial state. Available.
+317 */
+318ONLINE,
+319
+320/**
+321 * Server expired/crashed. Currently 
undergoing WAL splitting.
+322 */
+323SPLITTING,
+324
+325/**
+326 * WAL splitting done.
+327 */
+328OFFLINE
+329  }
 330
-331public ServerState getState() {
-332  return state;
-333}
-334
-335public int getVersionNumber() {
-336  return versionNumber;
-337}
-338
-339public ProcedureEvent 
getReportEvent() {
-340  return reportEvent;
-341}
+331  /**
+332   * State of Server; list of hosted 
regions, etc.
+333   */
+334  public static class ServerStateNode 
implements Comparable {
+335private final ServerReportEvent 
reportEvent;
+336
+337private final 
Set regions;
+338private final ServerName 
serverName;
+339
+340private volatile ServerState state = 
ServerState.ONLINE;
+341private volatile int versionNumber = 
0;
 342
-343public boolean isInState(final 
ServerState... expected) {
-344  boolean expectedState = false;
-345  if (expected != null) {
-346for (int i = 0; i < 
expected.length; ++i) {
-347  expectedState |= (state == 
expected[i]);
-348}
-349  }
-350  return expectedState;
+343public ServerStateNode(final 
ServerName serverName) {
+344  this.serverName = serverName;
+345  this.regions = 
ConcurrentHashMap.newKeySet();
+346  this.reportEvent = new 
ServerReportEvent(serverName);
+347}
+348
+349public ServerName getServerName() {
+350  return serverName;
 351}
 352
-353public void setState(final 
ServerState state) {
-354  this.state = state;
+353public ServerState getState() {
+354  return state;
 355}
 356
-357public void setVersionNumber(final 
int versionNumber) {
-358  this.versionNumber = 
versionNumber;
+357public int getVersionNumber() {
+358  return versionNumber;
 359}
 360
-361public Set 
getRegions() {
-362  return regions;
+361public ProcedureEvent 
getReportEvent() {
+362  return reportEvent;
 363}
 364
-365public int getRegionCount() {
-366  return regions.size();
+365public boolean isOffline() {
+366  return 
this.state.equals(ServerState.OFFLINE);
 367}
 368
-369public ArrayList 
getRegionInfoList() {
-370  ArrayList hris = 
new ArrayList(regions.size());
-371  for (RegionStateNode region: 
regions) {
-372
hris.add(region.getRegionInfo());
-373  }
-374  return hris;
-375}
-376
-377public void addRegion(final 
RegionStateNode regionNode) {
-378  this.regions.add(regionNode);
-379}
-380
-381public void removeRegion(final 
RegionStateNode regionNode) {
-382  this.regions.remove(regionNode);
-383}
-384
-385@Override
-386public int compareTo(final 
ServerStateNode other) {
-387  return 
getServerName().compareTo(other.getServerName());
-388}
-389
-390@Override
-391public int hashCode() {
-392  return 
getServerName().hashCode();
+369public boolean isInState(final 
ServerState... expected) {
+370  boolean expectedState = false;
+371  if (expected != null) {
+372for (int i = 0; i < 
expected.length; ++i) {
+373  expectedState |= (state == 
expected[i]);
+374}
+375  }
+376  return 

[26/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

2018-06-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.html
index 275ca84..b9a30c4 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.html
@@ -1499,782 +1499,786 @@
 1491  final RpcController controller,
 1492  final IsInMaintenanceModeRequest 
request) throws ServiceException {
 1493IsInMaintenanceModeResponse.Builder 
response = IsInMaintenanceModeResponse.newBuilder();
-1494
response.setInMaintenanceMode(master.isInMaintenanceMode());
-1495return response.build();
-1496  }
-1497
-1498  @Override
-1499  public UnassignRegionResponse 
unassignRegion(RpcController controller,
-1500  UnassignRegionRequest req) throws 
ServiceException {
-1501try {
-1502  final byte [] regionName = 
req.getRegion().getValue().toByteArray();
-1503  RegionSpecifierType type = 
req.getRegion().getType();
-1504  final boolean force = 
req.getForce();
-1505  UnassignRegionResponse urr = 
UnassignRegionResponse.newBuilder().build();
-1506
-1507  master.checkInitialized();
-1508  if (type != 
RegionSpecifierType.REGION_NAME) {
-1509LOG.warn("unassignRegion 
specifier type: expected: " + RegionSpecifierType.REGION_NAME
-1510  + " actual: " + type);
-1511  }
-1512  Pair 
pair =
-1513
MetaTableAccessor.getRegion(master.getConnection(), regionName);
-1514  if 
(Bytes.equals(RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName(),regionName))
 {
-1515pair = new 
Pair<>(RegionInfoBuilder.FIRST_META_REGIONINFO,
-1516
master.getMetaTableLocator().getMetaRegionLocation(master.getZooKeeper()));
-1517  }
-1518  if (pair == null) {
-1519throw new 
UnknownRegionException(Bytes.toString(regionName));
-1520  }
-1521
-1522  RegionInfo hri = 
pair.getFirst();
-1523  if (master.cpHost != null) {
-1524master.cpHost.preUnassign(hri, 
force);
-1525  }
-1526  
LOG.debug(master.getClientIdAuditPrefix() + " unassign " + 
hri.getRegionNameAsString()
-1527  + " in current location if it 
is online and reassign.force=" + force);
-1528  
master.getAssignmentManager().unassign(hri);
-1529  if (master.cpHost != null) {
-1530master.cpHost.postUnassign(hri, 
force);
-1531  }
-1532
-1533  return urr;
-1534} catch (IOException ioe) {
-1535  throw new ServiceException(ioe);
-1536}
-1537  }
-1538
-1539  @Override
-1540  public 
ReportRegionStateTransitionResponse reportRegionStateTransition(RpcController 
c,
-1541  ReportRegionStateTransitionRequest 
req) throws ServiceException {
-1542try {
-1543  master.checkServiceStarted();
-1544  return 
master.getAssignmentManager().reportRegionStateTransition(req);
-1545} catch (IOException ioe) {
-1546  throw new ServiceException(ioe);
-1547}
-1548  }
-1549
-1550  @Override
-1551  public SetQuotaResponse 
setQuota(RpcController c, SetQuotaRequest req)
-1552  throws ServiceException {
-1553try {
-1554  master.checkInitialized();
-1555  return 
master.getMasterQuotaManager().setQuota(req);
-1556} catch (Exception e) {
-1557  throw new ServiceException(e);
-1558}
-1559  }
-1560
-1561  @Override
-1562  public 
MajorCompactionTimestampResponse getLastMajorCompactionTimestamp(RpcController 
controller,
-1563  MajorCompactionTimestampRequest 
request) throws ServiceException {
-1564
MajorCompactionTimestampResponse.Builder response =
-1565
MajorCompactionTimestampResponse.newBuilder();
-1566try {
-1567  master.checkInitialized();
-1568  
response.setCompactionTimestamp(master.getLastMajorCompactionTimestamp(ProtobufUtil
-1569  
.toTableName(request.getTableName(;
-1570} catch (IOException e) {
-1571  throw new ServiceException(e);
-1572}
-1573return response.build();
-1574  }
-1575
-1576  @Override
-1577  public 
MajorCompactionTimestampResponse getLastMajorCompactionTimestampForRegion(
-1578  RpcController controller, 
MajorCompactionTimestampForRegionRequest request)
-1579  throws ServiceException {
-1580
MajorCompactionTimestampResponse.Builder response =
-1581
MajorCompactionTimestampResponse.newBuilder();
-1582try {
-1583  master.checkInitialized();
-1584  
response.setCompactionTimestamp(master.getLastMajorCompactionTimestampForRegion(request
-1585  
.getRegion().getValue().toByteArray()));
-1586} catch (IOException e) {
-1587  throw new ServiceException(e);
-1588}
-1589return response.build();
-1590  }
-1591
-1592  /**
-1593   *

[24/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

2018-06-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/master/ServerManager.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/ServerManager.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/ServerManager.html
index a88ff57..6458b43 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/ServerManager.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/ServerManager.html
@@ -565,502 +565,508 @@
 557  /*
 558   * Expire the passed server.  Add it to 
list of dead servers and queue a
 559   * shutdown processing.
-560   */
-561  public synchronized void 
expireServer(final ServerName serverName) {
-562if 
(serverName.equals(master.getServerName())) {
-563  if (!(master.isAborted() || 
master.isStopped())) {
-564master.stop("We lost our 
znode?");
-565  }
-566  return;
-567}
-568if 
(!master.isServerCrashProcessingEnabled()) {
-569  LOG.info("Master doesn't enable 
ServerShutdownHandler during initialization, "
-570  + "delay expiring server " + 
serverName);
-571  // Even we delay expire this 
server, we still need to handle Meta's RIT
-572  // that are against the crashed 
server; since when we do RecoverMetaProcedure,
-573  // the SCP is not enable yet and 
Meta's RIT may be suspend forever. See HBase-19287
-574  
master.getAssignmentManager().handleMetaRITOnCrashedServer(serverName);
-575  
this.queuedDeadServers.add(serverName);
-576  return;
-577}
-578if 
(this.deadservers.isDeadServer(serverName)) {
-579  // TODO: Can this happen?  It 
shouldn't be online in this case?
-580  LOG.warn("Expiration of " + 
serverName +
-581  " but server shutdown already 
in progress");
-582  return;
-583}
-584
moveFromOnlineToDeadServers(serverName);
-585
-586// If cluster is going down, yes, 
servers are going to be expiring; don't
-587// process as a dead server
-588if (isClusterShutdown()) {
-589  LOG.info("Cluster shutdown set; " + 
serverName +
-590" expired; onlineServers=" + 
this.onlineServers.size());
-591  if (this.onlineServers.isEmpty()) 
{
-592master.stop("Cluster shutdown 
set; onlineServer=0");
-593  }
-594  return;
-595}
-596LOG.info("Processing expiration of " 
+ serverName + " on " + this.master.getServerName());
-597
master.getAssignmentManager().submitServerCrash(serverName, true);
-598
-599// Tell our listeners that a server 
was removed
-600if (!this.listeners.isEmpty()) {
-601  for (ServerListener listener : 
this.listeners) {
-602
listener.serverRemoved(serverName);
-603  }
-604}
-605  }
-606
-607  @VisibleForTesting
-608  public void 
moveFromOnlineToDeadServers(final ServerName sn) {
-609synchronized (onlineServers) {
-610  if 
(!this.onlineServers.containsKey(sn)) {
-611LOG.warn("Expiration of " + sn + 
" but server not online");
-612  }
-613  // Remove the server from the known 
servers lists and update load info BUT
-614  // add to deadservers first; do 
this so it'll show in dead servers list if
-615  // not in online servers list.
-616  this.deadservers.add(sn);
-617  this.onlineServers.remove(sn);
-618  onlineServers.notifyAll();
-619}
-620this.rsAdmins.remove(sn);
-621  }
-622
-623  public synchronized void 
processDeadServer(final ServerName serverName, boolean shouldSplitWal) {
-624// When assignment manager is 
cleaning up the zookeeper nodes and rebuilding the
-625// in-memory region states, region 
servers could be down. Meta table can and
-626// should be re-assigned, log 
splitting can be done too. However, it is better to
-627// wait till the cleanup is done 
before re-assigning user regions.
-628//
-629// We should not wait in the server 
shutdown handler thread since it can clog
-630// the handler threads and meta table 
could not be re-assigned in case
-631// the corresponding server is down. 
So we queue them up here instead.
-632if 
(!master.getAssignmentManager().isFailoverCleanupDone()) {
-633  requeuedDeadServers.put(serverName, 
shouldSplitWal);
-634  return;
-635}
-636
-637this.deadservers.add(serverName);
-638
master.getAssignmentManager().submitServerCrash(serverName, shouldSplitWal);
-639  }
-640
-641  /**
-642   * Process the servers which died 
during master's initialization. It will be
-643   * called after HMaster#assignMeta and 
AssignmentManager#joinCluster.
-644   * */
-645  synchronized void 
processQueuedDeadServers() {
-646if 
(!master.isServerCrashProcessingEnabled()) {
-647  LOG.info("Master hasn't enabled 
ServerShutdownHandler");
-648}
-649Iterator 
serverIterator = queuedDeadServers.iterator();
-650while (serverIterator.hasNext()) {
-651  ServerName tmpServerNa

[02/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

2018-06-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.html
index 594ef24..17d5c40 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.html
@@ -170,241 +170,242 @@
 162  }
 163
 164  /**
-165   * Add a remote rpc. Be sure to check 
result for successful add.
+165   * Add a remote rpc.
 166   * @param key the node identifier
-167   * @return True if we successfully 
added the operation.
-168   */
-169  public boolean addOperationToNode(final 
TRemote key, RemoteProcedure rp) {
+167   */
+168  public void addOperationToNode(final 
TRemote key, RemoteProcedure rp)
+169  throws 
NullTargetServerDispatchException, NoServerDispatchException, 
NoNodeDispatchException {
 170if (key == null) {
-171  // Key is remote server name. Be 
careful. It could have been nulled by a concurrent
-172  // ServerCrashProcedure shutting 
down outstanding RPC requests. See remoteCallFailed.
-173  return false;
-174}
-175assert key != null : "found null key 
for node";
-176BufferNode node = nodeMap.get(key);
-177if (node == null) {
-178  return false;
-179}
-180node.add(rp);
-181// Check our node still in the map; 
could have been removed by #removeNode.
-182return nodeMap.containsValue(node);
-183  }
-184
-185  /**
-186   * Remove a remote node
-187   * @param key the node identifier
-188   */
-189  public boolean removeNode(final TRemote 
key) {
-190final BufferNode node = 
nodeMap.remove(key);
-191if (node == null) return false;
-192node.abortOperationsInQueue();
-193return true;
-194  }
-195
-196  // 

-197  //  Task Helpers
-198  // 

-199  protected Future 
submitTask(Callable task) {
-200return threadPool.submit(task);
-201  }
-202
-203  protected Future 
submitTask(Callable task, long delay, TimeUnit unit) {
-204final FutureTask 
futureTask = new FutureTask(task);
-205timeoutExecutor.add(new 
DelayedTask(futureTask, delay, unit));
-206return futureTask;
-207  }
-208
-209  protected abstract void 
remoteDispatch(TRemote key, Set operations);
-210  protected abstract void 
abortPendingOperations(TRemote key, Set operations);
-211
-212  /**
-213   * Data structure with reference to 
remote operation.
-214   */
-215  public static abstract class 
RemoteOperation {
-216private final RemoteProcedure 
remoteProcedure;
-217
-218protected RemoteOperation(final 
RemoteProcedure remoteProcedure) {
-219  this.remoteProcedure = 
remoteProcedure;
-220}
-221
-222public RemoteProcedure 
getRemoteProcedure() {
-223  return remoteProcedure;
-224}
-225  }
-226
-227  /**
-228   * Remote procedure reference.
-229   */
-230  public interface 
RemoteProcedure {
-231/**
-232 * For building the remote 
operation.
-233 */
-234RemoteOperation remoteCallBuild(TEnv 
env, TRemote remote);
-235
-236/**
-237 * Called when the executeProcedure 
call is failed.
-238 */
-239void remoteCallFailed(TEnv env, 
TRemote remote, IOException exception);
-240
-241/**
-242 * Called when RS tells the remote 
procedure is succeeded through the
-243 * {@code reportProcedureDone} 
method.
-244 */
-245void remoteOperationCompleted(TEnv 
env);
-246
-247/**
-248 * Called when RS tells the remote 
procedure is failed through the {@code reportProcedureDone}
-249 * method.
-250 */
-251void remoteOperationFailed(TEnv env, 
RemoteProcedureException error);
-252  }
-253
-254  /**
-255   * Account of what procedures are 
running on remote node.
-256   * @param 
-257   * @param 
-258   */
-259  public interface RemoteNode {
-260TRemote getKey();
-261void add(RemoteProcedure operation);
-262void dispatch();
-263  }
-264
-265  protected 
ArrayListMultimap, RemoteOperation> 
buildAndGroupRequestByType(final TEnv env,
-266  final TRemote remote, final 
Set remoteProcedures) {
-267final 
ArrayListMultimap, RemoteOperation> requestByType = 
ArrayListMultimap.create();
-268for (RemoteProcedure proc: 
remoteProcedures) {
-269  RemoteOperation operation = 
proc.remoteCallBuild(env, remote);
-270  
requestByType.put(operation.getClass(), operation);
-271}
-272  

[07/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

2018-06-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.DelayedTask.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.DelayedTask.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.DelayedTask.html
index 594ef24..17d5c40 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.DelayedTask.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.DelayedTask.html
@@ -170,241 +170,242 @@
 162  }
 163
 164  /**
-165   * Add a remote rpc. Be sure to check 
result for successful add.
+165   * Add a remote rpc.
 166   * @param key the node identifier
-167   * @return True if we successfully 
added the operation.
-168   */
-169  public boolean addOperationToNode(final 
TRemote key, RemoteProcedure rp) {
+167   */
+168  public void addOperationToNode(final 
TRemote key, RemoteProcedure rp)
+169  throws 
NullTargetServerDispatchException, NoServerDispatchException, 
NoNodeDispatchException {
 170if (key == null) {
-171  // Key is remote server name. Be 
careful. It could have been nulled by a concurrent
-172  // ServerCrashProcedure shutting 
down outstanding RPC requests. See remoteCallFailed.
-173  return false;
-174}
-175assert key != null : "found null key 
for node";
-176BufferNode node = nodeMap.get(key);
-177if (node == null) {
-178  return false;
-179}
-180node.add(rp);
-181// Check our node still in the map; 
could have been removed by #removeNode.
-182return nodeMap.containsValue(node);
-183  }
-184
-185  /**
-186   * Remove a remote node
-187   * @param key the node identifier
-188   */
-189  public boolean removeNode(final TRemote 
key) {
-190final BufferNode node = 
nodeMap.remove(key);
-191if (node == null) return false;
-192node.abortOperationsInQueue();
-193return true;
-194  }
-195
-196  // 

-197  //  Task Helpers
-198  // 

-199  protected Future 
submitTask(Callable task) {
-200return threadPool.submit(task);
-201  }
-202
-203  protected Future 
submitTask(Callable task, long delay, TimeUnit unit) {
-204final FutureTask 
futureTask = new FutureTask(task);
-205timeoutExecutor.add(new 
DelayedTask(futureTask, delay, unit));
-206return futureTask;
-207  }
-208
-209  protected abstract void 
remoteDispatch(TRemote key, Set operations);
-210  protected abstract void 
abortPendingOperations(TRemote key, Set operations);
-211
-212  /**
-213   * Data structure with reference to 
remote operation.
-214   */
-215  public static abstract class 
RemoteOperation {
-216private final RemoteProcedure 
remoteProcedure;
-217
-218protected RemoteOperation(final 
RemoteProcedure remoteProcedure) {
-219  this.remoteProcedure = 
remoteProcedure;
-220}
-221
-222public RemoteProcedure 
getRemoteProcedure() {
-223  return remoteProcedure;
-224}
-225  }
-226
-227  /**
-228   * Remote procedure reference.
-229   */
-230  public interface 
RemoteProcedure {
-231/**
-232 * For building the remote 
operation.
-233 */
-234RemoteOperation remoteCallBuild(TEnv 
env, TRemote remote);
-235
-236/**
-237 * Called when the executeProcedure 
call is failed.
-238 */
-239void remoteCallFailed(TEnv env, 
TRemote remote, IOException exception);
-240
-241/**
-242 * Called when RS tells the remote 
procedure is succeeded through the
-243 * {@code reportProcedureDone} 
method.
-244 */
-245void remoteOperationCompleted(TEnv 
env);
-246
-247/**
-248 * Called when RS tells the remote 
procedure is failed through the {@code reportProcedureDone}
-249 * method.
-250 */
-251void remoteOperationFailed(TEnv env, 
RemoteProcedureException error);
-252  }
-253
-254  /**
-255   * Account of what procedures are 
running on remote node.
-256   * @param 
-257   * @param 
-258   */
-259  public interface RemoteNode {
-260TRemote getKey();
-261void add(RemoteProcedure operation);
-262void dispatch();
-263  }
-264
-265  protected 
ArrayListMultimap, RemoteOperation> 
buildAndGroupRequestByType(final TEnv env,
-266  final TRemote remote, final 
Set remoteProcedures) {
-267final 
ArrayListMultimap, RemoteOperation> requestByType = 
ArrayListMultimap.create();
-268for (RemoteProcedure proc: 
remoteProcedures) {
-269  RemoteOperation operation = 
proc.remoteCallBuild(env, remote);
-270  
requestB

[11/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

2018-06-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.html
index 8b5905e..98e88a4 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.html
@@ -35,465 +35,461 @@
 027import 
org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode;
 028import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 029import 
org.apache.hadoop.hbase.master.procedure.TableProcedureInterface;
-030import 
org.apache.hadoop.hbase.procedure2.Procedure;
-031import 
org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
-032import 
org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
-033import 
org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteOperation;
-034import 
org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteProcedure;
-035import 
org.apache.hadoop.hbase.procedure2.RemoteProcedureException;
-036import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
-037import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState;
-038import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
-039import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-040import 
org.apache.yetus.audience.InterfaceAudience;
-041import org.slf4j.Logger;
-042import org.slf4j.LoggerFactory;
-043
-044/**
-045 * Base class for the Assign and Unassign 
Procedure.
-046 *
-047 * Locking:
-048 * Takes exclusive lock on the region 
being assigned/unassigned. Thus, there can only be one
-049 * RegionTransitionProcedure per region 
running at a time (see MasterProcedureScheduler).
-050 *
-051 * 

This procedure is asynchronous and responds to external events. -052 * The AssignmentManager will notify this procedure when the RS completes -053 * the operation and reports the transitioned state -054 * (see the Assign and Unassign class for more detail).

-055 * -056 *

Procedures move from the REGION_TRANSITION_QUEUE state when they are -057 * first submitted, to the REGION_TRANSITION_DISPATCH state when the request -058 * to remote server is sent and the Procedure is suspended waiting on external -059 * event to be woken again. Once the external event is triggered, Procedure -060 * moves to the REGION_TRANSITION_FINISH state.

-061 * -062 *

NOTE: {@link AssignProcedure} and {@link UnassignProcedure} should not be thought of -063 * as being asymmetric, at least currently. -064 *

    -065 *
  • {@link AssignProcedure} moves through all the above described states and implements methods -066 * associated with each while {@link UnassignProcedure} starts at state -067 * REGION_TRANSITION_DISPATCH and state REGION_TRANSITION_QUEUE is not supported.
  • -068 * -069 *
  • When any step in {@link AssignProcedure} fails, failure handler -070 * AssignProcedure#handleFailure(MasterProcedureEnv, RegionStateNode) re-attempts the -071 * assignment by setting the procedure state to REGION_TRANSITION_QUEUE and forces -072 * assignment to a different target server by setting {@link AssignProcedure#forceNewPlan}. When -073 * the number of attempts reaches threshold configuration 'hbase.assignment.maximum.attempts', -074 * the procedure is aborted. For {@link UnassignProcedure}, similar re-attempts are -075 * intentionally not implemented. It is a 'one shot' procedure. See its class doc for how it -076 * handles failure. -077 *
  • -078 *
  • If we find a region in an 'unexpected' state, we'll complain and retry with backoff forever. -079 * The 'unexpected' state needs to be fixed either by another running Procedure or by operator -080 * intervention (Regions in 'unexpected' state indicates bug or unexpected transition type). -081 * For this to work, subclasses need to persist the 'attempt' counter kept in this class when -082 * they do serializeStateData and restore it inside their deserializeStateData, just as they do -083 * for {@link #regionInfo}. -084 *
  • -085 *
-086 *

-087 * -088 *

TODO: Considering it is a priority doing all we can to get make a region available as soon as -089 * possible, re-attempting with any target makes sense if specified target fails in case of -090 * {@link AssignProcedure}. For {@link UnassignProcedure}, our concern is preventing data loss -091 * on failed unassign. See class doc for explanation. -092 */ -093@InterfaceAudience.Private -094public abstract class Regi


[36/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

2018-06-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.html
index e73b7da..46943f3 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.html
@@ -119,7 +119,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class RefreshPeerProcedure
+public class RefreshPeerProcedure
 extends Procedure
 implements PeerProcedureInterface, RemoteProcedureDispatcher.RemoteProcedure
 
@@ -353,7 +353,7 @@ implements 
 
 LOG
-private static final org.slf4j.Logger LOG
+private static final org.slf4j.Logger LOG
 
 
 
@@ -362,7 +362,7 @@ implements 
 
 peerId
-private https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String peerId
+private https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String peerId
 
 
 
@@ -371,7 +371,7 @@ implements 
 
 type
-private PeerProcedureInterface.PeerOperationType
 type
+private PeerProcedureInterface.PeerOperationType
 type
 
 
 
@@ -380,7 +380,7 @@ implements 
 
 targetServer
-private ServerName targetServer
+private ServerName targetServer
 
 
 
@@ -389,7 +389,7 @@ implements 
 
 dispatched
-private boolean dispatched
+private boolean dispatched
 
 
 
@@ -398,7 +398,7 @@ implements 
 
 event
-private ProcedureEvent 
event
+private ProcedureEvent 
event
 
 
 
@@ -407,7 +407,7 @@ implements 
 
 succ
-private boolean succ
+private boolean succ
 
 
 
@@ -424,7 +424,7 @@ implements 
 
 RefreshPeerProcedure
-public RefreshPeerProcedure()
+public RefreshPeerProcedure()
 
 
 
@@ -433,7 +433,7 @@ implements 
 
 RefreshPeerProcedure
-public RefreshPeerProcedure(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String peerId,
+public RefreshPeerProcedure(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String peerId,
 PeerProcedureInterface.PeerOperationType type,
 ServerName targetServer)
 
@@ -452,7 +452,7 @@ implements 
 
 getPeerId
-public https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String getPeerId()
+public https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String getPeerId()
 
 Specified by:
 getPeerId in
 interface PeerProcedureInterface
@@ -465,7 +465,7 @@ implements 
 
 getPeerOperationType
-public PeerProcedureInterface.PeerOperationType getPeerOperationType()
+public PeerProcedureInterface.PeerOperationType getPeerOperationType()
 
 Specified by:
 getPeerOperationType in
 interface PeerProcedureInterface
@@ -478,7 +478,7 @@ implements 
 
 toPeerModificationType
-private 
static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.PeerModificationType toPeerModificationType(PeerProcedureInterface.PeerOperationType type)
+private 
static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.PeerModificationType toPeerModificationType(PeerProcedureInterface.PeerOperationType type)
 
 
 
@@ -487,7 +487,7 @@ implements 
 
 toPeerOperationType
-private static PeerProcedureInterface.PeerOperationType toPeerOperationType(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.PeerModificationType type)
+private static PeerProcedureInterface.PeerOperationType toPeerOperationType(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.PeerModificationType type)
 
 
 
@@ -496,7 +496,7 @@ implements 
 
 remoteCallBuild
-public RemoteProcedureDispatcher.RemoteOperation remoteCallBuild(MasterProcedureEnv env,
+public RemoteProcedureDispatcher.RemoteOperation remoteCallBuild(MasterProcedureEnv env,
  ServerName remote)
 Description copied from 
interface: RemoteProcedureDispatcher.RemoteProcedure
 For building the remote operation.
@@ -512,7 +512,7 @@ implements 
 
 complete
-private void complete(MasterProcedureEnv env,
+private void complete(MasterProcedureEnv env,
   https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true";
 title="class or interface in java.lang">Throwable error)
 
 
@@ -522,7 +522,7 @@ implements 
 
 remoteCallFailed
-public void remoteCallFailed(MasterProcedureEnv env,
+public 

[13/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

2018-06-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerStateNode.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerStateNode.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerStateNode.html
index 5420d82..6ea3672 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerStateNode.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerStateNode.html
@@ -316,687 +316,728 @@
 308}
 309  }
 310
-311  public enum ServerState { ONLINE, 
SPLITTING, OFFLINE }
-312  public static class ServerStateNode 
implements Comparable {
-313private final ServerReportEvent 
reportEvent;
-314
-315private final 
Set regions;
-316private final ServerName 
serverName;
-317
-318private volatile ServerState state = 
ServerState.ONLINE;
-319private volatile int versionNumber = 
0;
-320
-321public ServerStateNode(final 
ServerName serverName) {
-322  this.serverName = serverName;
-323  this.regions = 
ConcurrentHashMap.newKeySet();
-324  this.reportEvent = new 
ServerReportEvent(serverName);
-325}
-326
-327public ServerName getServerName() {
-328  return serverName;
-329}
+311  /**
+312   * Server State.
+313   */
+314  public enum ServerState {
+315/**
+316 * Initial state. Available.
+317 */
+318ONLINE,
+319
+320/**
+321 * Server expired/crashed. Currently 
undergoing WAL splitting.
+322 */
+323SPLITTING,
+324
+325/**
+326 * WAL splitting done.
+327 */
+328OFFLINE
+329  }
 330
-331public ServerState getState() {
-332  return state;
-333}
-334
-335public int getVersionNumber() {
-336  return versionNumber;
-337}
-338
-339public ProcedureEvent 
getReportEvent() {
-340  return reportEvent;
-341}
+331  /**
+332   * State of Server; list of hosted 
regions, etc.
+333   */
+334  public static class ServerStateNode 
implements Comparable {
+335private final ServerReportEvent 
reportEvent;
+336
+337private final 
Set regions;
+338private final ServerName 
serverName;
+339
+340private volatile ServerState state = 
ServerState.ONLINE;
+341private volatile int versionNumber = 
0;
 342
-343public boolean isInState(final 
ServerState... expected) {
-344  boolean expectedState = false;
-345  if (expected != null) {
-346for (int i = 0; i < 
expected.length; ++i) {
-347  expectedState |= (state == 
expected[i]);
-348}
-349  }
-350  return expectedState;
+343public ServerStateNode(final 
ServerName serverName) {
+344  this.serverName = serverName;
+345  this.regions = 
ConcurrentHashMap.newKeySet();
+346  this.reportEvent = new 
ServerReportEvent(serverName);
+347}
+348
+349public ServerName getServerName() {
+350  return serverName;
 351}
 352
-353public void setState(final 
ServerState state) {
-354  this.state = state;
+353public ServerState getState() {
+354  return state;
 355}
 356
-357public void setVersionNumber(final 
int versionNumber) {
-358  this.versionNumber = 
versionNumber;
+357public int getVersionNumber() {
+358  return versionNumber;
 359}
 360
-361public Set 
getRegions() {
-362  return regions;
+361public ProcedureEvent 
getReportEvent() {
+362  return reportEvent;
 363}
 364
-365public int getRegionCount() {
-366  return regions.size();
+365public boolean isOffline() {
+366  return 
this.state.equals(ServerState.OFFLINE);
 367}
 368
-369public ArrayList 
getRegionInfoList() {
-370  ArrayList hris = 
new ArrayList(regions.size());
-371  for (RegionStateNode region: 
regions) {
-372
hris.add(region.getRegionInfo());
-373  }
-374  return hris;
-375}
-376
-377public void addRegion(final 
RegionStateNode regionNode) {
-378  this.regions.add(regionNode);
-379}
-380
-381public void removeRegion(final 
RegionStateNode regionNode) {
-382  this.regions.remove(regionNode);
-383}
-384
-385@Override
-386public int compareTo(final 
ServerStateNode other) {
-387  return 
getServerName().compareTo(other.getServerName());
-388}
-389
-390@Override
-391public int hashCode() {
-392  return 
getServerName().hashCode();
+369public boolean isInState(final 
ServerState... expected) {
+370  boolean expectedState = false;
+371  if (expected != null) {
+372for (int i = 0; i < 
expected.length; ++i) {
+373  expectedState |= (state == 
expected[i]);
+374}
+375  }
+376  return 

[16/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

2018-06-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateStampComparator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateStampComparator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateStampComparator.html
index 5420d82..6ea3672 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateStampComparator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateStampComparator.html
@@ -316,687 +316,728 @@
 308}
 309  }
 310
-311  public enum ServerState { ONLINE, 
SPLITTING, OFFLINE }
-312  public static class ServerStateNode 
implements Comparable {
-313private final ServerReportEvent 
reportEvent;
-314
-315private final 
Set regions;
-316private final ServerName 
serverName;
-317
-318private volatile ServerState state = 
ServerState.ONLINE;
-319private volatile int versionNumber = 
0;
-320
-321public ServerStateNode(final 
ServerName serverName) {
-322  this.serverName = serverName;
-323  this.regions = 
ConcurrentHashMap.newKeySet();
-324  this.reportEvent = new 
ServerReportEvent(serverName);
-325}
-326
-327public ServerName getServerName() {
-328  return serverName;
-329}
+311  /**
+312   * Server State.
+313   */
+314  public enum ServerState {
+315/**
+316 * Initial state. Available.
+317 */
+318ONLINE,
+319
+320/**
+321 * Server expired/crashed. Currently 
undergoing WAL splitting.
+322 */
+323SPLITTING,
+324
+325/**
+326 * WAL splitting done.
+327 */
+328OFFLINE
+329  }
 330
-331public ServerState getState() {
-332  return state;
-333}
-334
-335public int getVersionNumber() {
-336  return versionNumber;
-337}
-338
-339public ProcedureEvent 
getReportEvent() {
-340  return reportEvent;
-341}
+331  /**
+332   * State of Server; list of hosted 
regions, etc.
+333   */
+334  public static class ServerStateNode 
implements Comparable {
+335private final ServerReportEvent 
reportEvent;
+336
+337private final 
Set regions;
+338private final ServerName 
serverName;
+339
+340private volatile ServerState state = 
ServerState.ONLINE;
+341private volatile int versionNumber = 
0;
 342
-343public boolean isInState(final 
ServerState... expected) {
-344  boolean expectedState = false;
-345  if (expected != null) {
-346for (int i = 0; i < 
expected.length; ++i) {
-347  expectedState |= (state == 
expected[i]);
-348}
-349  }
-350  return expectedState;
+343public ServerStateNode(final 
ServerName serverName) {
+344  this.serverName = serverName;
+345  this.regions = 
ConcurrentHashMap.newKeySet();
+346  this.reportEvent = new 
ServerReportEvent(serverName);
+347}
+348
+349public ServerName getServerName() {
+350  return serverName;
 351}
 352
-353public void setState(final 
ServerState state) {
-354  this.state = state;
+353public ServerState getState() {
+354  return state;
 355}
 356
-357public void setVersionNumber(final 
int versionNumber) {
-358  this.versionNumber = 
versionNumber;
+357public int getVersionNumber() {
+358  return versionNumber;
 359}
 360
-361public Set 
getRegions() {
-362  return regions;
+361public ProcedureEvent 
getReportEvent() {
+362  return reportEvent;
 363}
 364
-365public int getRegionCount() {
-366  return regions.size();
+365public boolean isOffline() {
+366  return 
this.state.equals(ServerState.OFFLINE);
 367}
 368
-369public ArrayList 
getRegionInfoList() {
-370  ArrayList hris = 
new ArrayList(regions.size());
-371  for (RegionStateNode region: 
regions) {
-372
hris.add(region.getRegionInfo());
-373  }
-374  return hris;
-375}
-376
-377public void addRegion(final 
RegionStateNode regionNode) {
-378  this.regions.add(regionNode);
-379}
-380
-381public void removeRegion(final 
RegionStateNode regionNode) {
-382  this.regions.remove(regionNode);
-383}
-384
-385@Override
-386public int compareTo(final 
ServerStateNode other) {
-387  return 
getServerName().compareTo(other.getServerName());
-388}
-389
-390@Override
-391public int hashCode() {
-392  return 
getServerName().hashCode();
+369public boolean isInState(final 
ServerState... expected) {
+370  boolean expectedState = false;
+371  if (expected != null) {
+372for (int i = 0; i < 
expected.length; ++i) {
+373  expectedState |= (state == 
ex

[32/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

2018-06-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.html 
b/devapidocs/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.html
index 90fd656..85c6bf7 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.html
@@ -972,7 +972,7 @@ implements 
 
 removeReplicatorIfQueueIsEmpty
-public void removeReplicatorIfQueueIsEmpty(ServerName serverName)
+public void removeReplicatorIfQueueIsEmpty(ServerName serverName)
 throws ReplicationException
 Description copied from 
interface: ReplicationQueueStorage
 Remove the record of region server if the queue is 
empty.
@@ -990,7 +990,7 @@ implements 
 
 getListOfReplicators0
-private https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List getListOfReplicators0()
+private https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List getListOfReplicators0()
 throws 
org.apache.zookeeper.KeeperException
 
 Throws:
@@ -1004,7 +1004,7 @@ implements 
 
 getListOfReplicators
-public https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List getListOfReplicators()
+public https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List getListOfReplicators()
   throws ReplicationException
 Description copied from 
interface: ReplicationQueueStorage
 Get a list of all region servers that have outstanding 
replication queues. These servers could
@@ -1025,7 +1025,7 @@ implements 
 
 getWALsInQueue0
-private https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">ListString> getWALsInQueue0(ServerName serverName,
+private https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">ListString> getWALsInQueue0(ServerName serverName,
  https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String queueId)
   throws org.apache.zookeeper.KeeperException
 
@@ -1040,7 +1040,7 @@ implements 
 
 getWALsInQueue
-public https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">ListString> getWALsInQueue(ServerName serverName,
+public https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">ListString> getWALsInQueue(ServerName serverName,
https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String queueId)
 throws ReplicationException
 Description copied from 
interface: ReplicationQueueStorage
@@ -1064,7 +1064,7 @@ implements 
 
 getAllQueues0
-private https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">ListString> getAllQueues0(ServerName serverName)
+private https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">ListString> getAllQueues0(ServerName serverName)
 throws org.apache.zookeeper.KeeperException
 
 Throws:
@@ -1078,7 +1078,7 @@ implements 
 
 getAllQueues
-public https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List[14/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerState.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerState.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerState.html
index 5420d82..6ea3672 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerState.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerState.html
@@ -316,687 +316,728 @@
 308}
 309  }
 310
-311  public enum ServerState { ONLINE, 
SPLITTING, OFFLINE }
-312  public static class ServerStateNode 
implements Comparable {
-313private final ServerReportEvent 
reportEvent;
-314
-315private final 
Set regions;
-316private final ServerName 
serverName;
-317
-318private volatile ServerState state = 
ServerState.ONLINE;
-319private volatile int versionNumber = 
0;
-320
-321public ServerStateNode(final 
ServerName serverName) {
-322  this.serverName = serverName;
-323  this.regions = 
ConcurrentHashMap.newKeySet();
-324  this.reportEvent = new 
ServerReportEvent(serverName);
-325}
-326
-327public ServerName getServerName() {
-328  return serverName;
-329}
+311  /**
+312   * Server State.
+313   */
+314  public enum ServerState {
+315/**
+316 * Initial state. Available.
+317 */
+318ONLINE,
+319
+320/**
+321 * Server expired/crashed. Currently 
undergoing WAL splitting.
+322 */
+323SPLITTING,
+324
+325/**
+326 * WAL splitting done.
+327 */
+328OFFLINE
+329  }
 330
-331public ServerState getState() {
-332  return state;
-333}
-334
-335public int getVersionNumber() {
-336  return versionNumber;
-337}
-338
-339public ProcedureEvent 
getReportEvent() {
-340  return reportEvent;
-341}
+331  /**
+332   * State of Server; list of hosted 
regions, etc.
+333   */
+334  public static class ServerStateNode 
implements Comparable {
+335private final ServerReportEvent 
reportEvent;
+336
+337private final 
Set regions;
+338private final ServerName 
serverName;
+339
+340private volatile ServerState state = 
ServerState.ONLINE;
+341private volatile int versionNumber = 
0;
 342
-343public boolean isInState(final 
ServerState... expected) {
-344  boolean expectedState = false;
-345  if (expected != null) {
-346for (int i = 0; i < 
expected.length; ++i) {
-347  expectedState |= (state == 
expected[i]);
-348}
-349  }
-350  return expectedState;
+343public ServerStateNode(final 
ServerName serverName) {
+344  this.serverName = serverName;
+345  this.regions = 
ConcurrentHashMap.newKeySet();
+346  this.reportEvent = new 
ServerReportEvent(serverName);
+347}
+348
+349public ServerName getServerName() {
+350  return serverName;
 351}
 352
-353public void setState(final 
ServerState state) {
-354  this.state = state;
+353public ServerState getState() {
+354  return state;
 355}
 356
-357public void setVersionNumber(final 
int versionNumber) {
-358  this.versionNumber = 
versionNumber;
+357public int getVersionNumber() {
+358  return versionNumber;
 359}
 360
-361public Set 
getRegions() {
-362  return regions;
+361public ProcedureEvent 
getReportEvent() {
+362  return reportEvent;
 363}
 364
-365public int getRegionCount() {
-366  return regions.size();
+365public boolean isOffline() {
+366  return 
this.state.equals(ServerState.OFFLINE);
 367}
 368
-369public ArrayList 
getRegionInfoList() {
-370  ArrayList hris = 
new ArrayList(regions.size());
-371  for (RegionStateNode region: 
regions) {
-372
hris.add(region.getRegionInfo());
-373  }
-374  return hris;
-375}
-376
-377public void addRegion(final 
RegionStateNode regionNode) {
-378  this.regions.add(regionNode);
-379}
-380
-381public void removeRegion(final 
RegionStateNode regionNode) {
-382  this.regions.remove(regionNode);
-383}
-384
-385@Override
-386public int compareTo(final 
ServerStateNode other) {
-387  return 
getServerName().compareTo(other.getServerName());
-388}
-389
-390@Override
-391public int hashCode() {
-392  return 
getServerName().hashCode();
+369public boolean isInState(final 
ServerState... expected) {
+370  boolean expectedState = false;
+371  if (expected != null) {
+372for (int i = 0; i < 
expected.length; ++i) {
+373  expectedState |= (state == 
expected[i]);
+374}
+375  }
+376  return expectedState;
+377 

[21/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
index fe1e077..90c31f4 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
@@ -1072,894 +1072,913 @@
 1064
 1065  protected boolean 
waitServerReportEvent(final ServerName serverName, final Procedure proc) {
 1066final ServerStateNode serverNode = 
regionStates.getOrCreateServer(serverName);
-1067return 
serverNode.getReportEvent().suspendIfNotReady(proc);
-1068  }
-1069
-1070  protected void 
wakeServerReportEvent(final ServerStateNode serverNode) {
-1071
serverNode.getReportEvent().wake(getProcedureScheduler());
-1072  }
-1073
-1074  // 

-1075  //  RIT chore
-1076  // 

-1077  private static class 
RegionInTransitionChore extends 
ProcedureInMemoryChore {
-1078public RegionInTransitionChore(final 
int timeoutMsec) {
-1079  super(timeoutMsec);
-1080}
-1081
-1082@Override
-1083protected void periodicExecute(final 
MasterProcedureEnv env) {
-1084  final AssignmentManager am = 
env.getAssignmentManager();
-1085
-1086  final RegionInTransitionStat 
ritStat = am.computeRegionInTransitionStat();
-1087  if 
(ritStat.hasRegionsOverThreshold()) {
-1088for (RegionState hri: 
ritStat.getRegionOverThreshold()) {
-1089  
am.handleRegionOverStuckWarningThreshold(hri.getRegion());
-1090}
-1091  }
-1092
-1093  // update metrics
-1094  
am.updateRegionsInTransitionMetrics(ritStat);
-1095}
-1096  }
-1097
-1098  public RegionInTransitionStat 
computeRegionInTransitionStat() {
-1099final RegionInTransitionStat rit = 
new RegionInTransitionStat(getConfiguration());
-1100rit.update(this);
-1101return rit;
-1102  }
-1103
-1104  public static class 
RegionInTransitionStat {
-1105private final int ritThreshold;
+1067if (serverNode == null) {
+1068  LOG.warn("serverName=null; {}", 
proc);
+1069}
+1070return 
serverNode.getReportEvent().suspendIfNotReady(proc);
+1071  }
+1072
+1073  protected void 
wakeServerReportEvent(final ServerStateNode serverNode) {
+1074
serverNode.getReportEvent().wake(getProcedureScheduler());
+1075  }
+1076
+1077  // 

+1078  //  RIT chore
+1079  // 

+1080  private static class 
RegionInTransitionChore extends 
ProcedureInMemoryChore {
+1081public RegionInTransitionChore(final 
int timeoutMsec) {
+1082  super(timeoutMsec);
+1083}
+1084
+1085@Override
+1086protected void periodicExecute(final 
MasterProcedureEnv env) {
+1087  final AssignmentManager am = 
env.getAssignmentManager();
+1088
+1089  final RegionInTransitionStat 
ritStat = am.computeRegionInTransitionStat();
+1090  if 
(ritStat.hasRegionsOverThreshold()) {
+1091for (RegionState hri: 
ritStat.getRegionOverThreshold()) {
+1092  
am.handleRegionOverStuckWarningThreshold(hri.getRegion());
+1093}
+1094  }
+1095
+1096  // update metrics
+1097  
am.updateRegionsInTransitionMetrics(ritStat);
+1098}
+1099  }
+1100
+1101  public RegionInTransitionStat 
computeRegionInTransitionStat() {
+1102final RegionInTransitionStat rit = 
new RegionInTransitionStat(getConfiguration());
+1103rit.update(this);
+1104return rit;
+1105  }
 1106
-1107private HashMap ritsOverThreshold = null;
-1108private long statTimestamp;
-1109private long oldestRITTime = 0;
-1110private int totalRITsTwiceThreshold 
= 0;
-private int totalRITs = 0;
-1112
-1113@VisibleForTesting
-1114public RegionInTransitionStat(final 
Configuration conf) {
-1115  this.ritThreshold =
-1116
conf.getInt(METRICS_RIT_STUCK_WARNING_THRESHOLD, 
DEFAULT_RIT_STUCK_WARNING_THRESHOLD);
-1117}
-1118
-1119public int getRITThreshold() {
-1120  return ritThreshold;
-1121}
-1122
-1123public long getTimestamp() {
-1124  return statTimestamp;
-1125}
-1126
-1127public int getTotalRITs() {
-1128  return totalRITs;
-1129}
-1130
-1131public long getOldestRITTime() {
-1132  return oldestRITTime;
-1133}
-1134
-1135public int 
getTotalRITsOverThreshold() {
-1136  Ma

[20/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/FailedRemoteDispatchException.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/FailedRemoteDispatchException.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/FailedRemoteDispatchException.html
deleted file mode 100644
index 58257aa..000
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/FailedRemoteDispatchException.html
+++ /dev/null
@@ -1,105 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd";>
-
-
-Source code
-
-
-
-
-001/**
-002 * Licensed to the Apache Software 
Foundation (ASF) under one
-003 * or more contributor license 
agreements.  See the NOTICE file
-004 * distributed with this work for 
additional information
-005 * regarding copyright ownership.  The 
ASF licenses this file
-006 * to you under the Apache License, 
Version 2.0 (the
-007 * "License"); you may not use this file 
except in compliance
-008 * with the License.  You may obtain a 
copy of the License at
-009 *
-010 * 
http://www.apache.org/licenses/LICENSE-2.0
-011 *
-012 * Unless required by applicable law or 
agreed to in writing, software
-013 * distributed under the License is 
distributed on an "AS IS" BASIS,
-014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
-015 * See the License for the specific 
language governing permissions and
-016 * limitations under the License.
-017 */
-018package 
org.apache.hadoop.hbase.master.assignment;
-019
-020import 
org.apache.hadoop.hbase.HBaseIOException;
-021import 
org.apache.yetus.audience.InterfaceAudience;
-022
-023/**
-024 * Used internally signaling failed queue 
of a remote procedure
-025 * operation.
-026 */
-027@SuppressWarnings("serial")
-028@InterfaceAudience.Private
-029public class 
FailedRemoteDispatchException extends HBaseIOException {
-030  public 
FailedRemoteDispatchException(String msg) {
-031super(msg);
-032  }
-033}
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.html
index 6b7e383..09fe96e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.html
@@ -87,99 +87,102 @@
 079try {
 080  preflightChecks(env, true);
 081  checkOnline(env, 
this.plan.getRegionInfo());
-082} catch (HBaseIOException e) {
-083  LOG.warn(this.toString() + " 
FAILED because " + e.toString());
-084  return Flow.NO_MORE_STATE;
-085}
-086break;
-087  case MOVE_REGION_UNASSIGN:
-088addChildProcedure(new 
UnassignProcedure(plan.getRegionInfo(), plan.getSource(),
-089plan.getDestination(), 
true));
-090
setNextState(MoveRegionState.MOVE_REGION_ASSIGN);
-091break;
-092  case MOVE_REGION_ASSIGN:
-093AssignProcedure assignProcedure = 
plan.getDestination() == null ?
-094new 
AssignProcedure(plan.getRegionInfo()):
-095new 
AssignProcedure(plan.getRegionInfo(), plan.getDestination());
-096
addChildProcedure(assignProcedure);
-097return Flow.NO_MORE_STATE;
-098  default:
-099throw new 
UnsupportedOperationException("unhandled state=" + state);
-100}
-101return Flow.HAS_MORE_STATE;
-102  }
-103
-104  @Override
-105  protected void rollbackState(final 
MasterProcedureEnv env, final MoveRegionState state)
-106  throws IOException {
-107// no-op
-108  }
-109
-110  @Override
-111  public boolean abort(final 
MasterProcedureEnv env) {
-112return false;
-113  }
-114
-115  @Override
-116  public void toStringClassDetails(final 
StringBuilder sb) {
-117
sb.append(getClass().getSimpleName());
-118sb.append(" ");
-119sb.append(plan);
-120  }
-121
-122  @Override
-123  protected MoveRegionState 
getInitialState() {
-124return 
MoveRegionState.MOVE_REGION_UNASSIGN;
-125  }
-126
-127  @Override
-128  protected int getStateId(final 
MoveRegionState state) {
-129return state.getNumber();
-130  }
-131
-132  @Override
-133  protected MoveRegionState 
getState(final int stateId) {
-134return 
MoveRegionState.valueOf(stateId);
-135  }
-136
-137  @Override
-138  public TableName getTableName() {
-139return 
plan.getRegionInfo().getTable();
-140  }
-141
-142  @Override
-143  public TableOperationType 

[15/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerReportEvent.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerReportEvent.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerReportEvent.html
index 5420d82..6ea3672 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerReportEvent.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerReportEvent.html
@@ -316,687 +316,728 @@
 308}
 309  }
 310
-311  public enum ServerState { ONLINE, 
SPLITTING, OFFLINE }
-312  public static class ServerStateNode 
implements Comparable {
-313private final ServerReportEvent 
reportEvent;
-314
-315private final 
Set regions;
-316private final ServerName 
serverName;
-317
-318private volatile ServerState state = 
ServerState.ONLINE;
-319private volatile int versionNumber = 
0;
-320
-321public ServerStateNode(final 
ServerName serverName) {
-322  this.serverName = serverName;
-323  this.regions = 
ConcurrentHashMap.newKeySet();
-324  this.reportEvent = new 
ServerReportEvent(serverName);
-325}
-326
-327public ServerName getServerName() {
-328  return serverName;
-329}
+311  /**
+312   * Server State.
+313   */
+314  public enum ServerState {
+315/**
+316 * Initial state. Available.
+317 */
+318ONLINE,
+319
+320/**
+321 * Server expired/crashed. Currently 
undergoing WAL splitting.
+322 */
+323SPLITTING,
+324
+325/**
+326 * WAL splitting done.
+327 */
+328OFFLINE
+329  }
 330
-331public ServerState getState() {
-332  return state;
-333}
-334
-335public int getVersionNumber() {
-336  return versionNumber;
-337}
-338
-339public ProcedureEvent 
getReportEvent() {
-340  return reportEvent;
-341}
+331  /**
+332   * State of Server; list of hosted 
regions, etc.
+333   */
+334  public static class ServerStateNode 
implements Comparable {
+335private final ServerReportEvent 
reportEvent;
+336
+337private final 
Set regions;
+338private final ServerName 
serverName;
+339
+340private volatile ServerState state = 
ServerState.ONLINE;
+341private volatile int versionNumber = 
0;
 342
-343public boolean isInState(final 
ServerState... expected) {
-344  boolean expectedState = false;
-345  if (expected != null) {
-346for (int i = 0; i < 
expected.length; ++i) {
-347  expectedState |= (state == 
expected[i]);
-348}
-349  }
-350  return expectedState;
+343public ServerStateNode(final 
ServerName serverName) {
+344  this.serverName = serverName;
+345  this.regions = 
ConcurrentHashMap.newKeySet();
+346  this.reportEvent = new 
ServerReportEvent(serverName);
+347}
+348
+349public ServerName getServerName() {
+350  return serverName;
 351}
 352
-353public void setState(final 
ServerState state) {
-354  this.state = state;
+353public ServerState getState() {
+354  return state;
 355}
 356
-357public void setVersionNumber(final 
int versionNumber) {
-358  this.versionNumber = 
versionNumber;
+357public int getVersionNumber() {
+358  return versionNumber;
 359}
 360
-361public Set 
getRegions() {
-362  return regions;
+361public ProcedureEvent 
getReportEvent() {
+362  return reportEvent;
 363}
 364
-365public int getRegionCount() {
-366  return regions.size();
+365public boolean isOffline() {
+366  return 
this.state.equals(ServerState.OFFLINE);
 367}
 368
-369public ArrayList 
getRegionInfoList() {
-370  ArrayList hris = 
new ArrayList(regions.size());
-371  for (RegionStateNode region: 
regions) {
-372
hris.add(region.getRegionInfo());
-373  }
-374  return hris;
-375}
-376
-377public void addRegion(final 
RegionStateNode regionNode) {
-378  this.regions.add(regionNode);
-379}
-380
-381public void removeRegion(final 
RegionStateNode regionNode) {
-382  this.regions.remove(regionNode);
-383}
-384
-385@Override
-386public int compareTo(final 
ServerStateNode other) {
-387  return 
getServerName().compareTo(other.getServerName());
-388}
-389
-390@Override
-391public int hashCode() {
-392  return 
getServerName().hashCode();
+369public boolean isInState(final 
ServerState... expected) {
+370  boolean expectedState = false;
+371  if (expected != null) {
+372for (int i = 0; i < 
expected.length; ++i) {
+373  expectedState |= (state == 
expected[i]);
+374}
+375  }
+376   

[22/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
index fe1e077..90c31f4 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
@@ -1072,894 +1072,913 @@
 1064
 1065  protected boolean 
waitServerReportEvent(final ServerName serverName, final Procedure proc) {
 1066final ServerStateNode serverNode = 
regionStates.getOrCreateServer(serverName);
-1067return 
serverNode.getReportEvent().suspendIfNotReady(proc);
-1068  }
-1069
-1070  protected void 
wakeServerReportEvent(final ServerStateNode serverNode) {
-1071
serverNode.getReportEvent().wake(getProcedureScheduler());
-1072  }
-1073
-1074  // 

-1075  //  RIT chore
-1076  // 

-1077  private static class 
RegionInTransitionChore extends 
ProcedureInMemoryChore {
-1078public RegionInTransitionChore(final 
int timeoutMsec) {
-1079  super(timeoutMsec);
-1080}
-1081
-1082@Override
-1083protected void periodicExecute(final 
MasterProcedureEnv env) {
-1084  final AssignmentManager am = 
env.getAssignmentManager();
-1085
-1086  final RegionInTransitionStat 
ritStat = am.computeRegionInTransitionStat();
-1087  if 
(ritStat.hasRegionsOverThreshold()) {
-1088for (RegionState hri: 
ritStat.getRegionOverThreshold()) {
-1089  
am.handleRegionOverStuckWarningThreshold(hri.getRegion());
-1090}
-1091  }
-1092
-1093  // update metrics
-1094  
am.updateRegionsInTransitionMetrics(ritStat);
-1095}
-1096  }
-1097
-1098  public RegionInTransitionStat 
computeRegionInTransitionStat() {
-1099final RegionInTransitionStat rit = 
new RegionInTransitionStat(getConfiguration());
-1100rit.update(this);
-1101return rit;
-1102  }
-1103
-1104  public static class 
RegionInTransitionStat {
-1105private final int ritThreshold;
+1067if (serverNode == null) {
+1068  LOG.warn("serverName=null; {}", 
proc);
+1069}
+1070return 
serverNode.getReportEvent().suspendIfNotReady(proc);
+1071  }
+1072
+1073  protected void 
wakeServerReportEvent(final ServerStateNode serverNode) {
+1074
serverNode.getReportEvent().wake(getProcedureScheduler());
+1075  }
+1076
+1077  // 

+1078  //  RIT chore
+1079  // 

+1080  private static class 
RegionInTransitionChore extends 
ProcedureInMemoryChore {
+1081public RegionInTransitionChore(final 
int timeoutMsec) {
+1082  super(timeoutMsec);
+1083}
+1084
+1085@Override
+1086protected void periodicExecute(final 
MasterProcedureEnv env) {
+1087  final AssignmentManager am = 
env.getAssignmentManager();
+1088
+1089  final RegionInTransitionStat 
ritStat = am.computeRegionInTransitionStat();
+1090  if 
(ritStat.hasRegionsOverThreshold()) {
+1091for (RegionState hri: 
ritStat.getRegionOverThreshold()) {
+1092  
am.handleRegionOverStuckWarningThreshold(hri.getRegion());
+1093}
+1094  }
+1095
+1096  // update metrics
+1097  
am.updateRegionsInTransitionMetrics(ritStat);
+1098}
+1099  }
+1100
+1101  public RegionInTransitionStat 
computeRegionInTransitionStat() {
+1102final RegionInTransitionStat rit = 
new RegionInTransitionStat(getConfiguration());
+1103rit.update(this);
+1104return rit;
+1105  }
 1106
-1107private HashMap ritsOverThreshold = null;
-1108private long statTimestamp;
-1109private long oldestRITTime = 0;
-1110private int totalRITsTwiceThreshold 
= 0;
-private int totalRITs = 0;
-1112
-1113@VisibleForTesting
-1114public RegionInTransitionStat(final 
Configuration conf) {
-1115  this.ritThreshold =
-1116
conf.getInt(METRICS_RIT_STUCK_WARNING_THRESHOLD, 
DEFAULT_RIT_STUCK_WARNING_THRESHOLD);
-1117}
-1118
-1119public int getRITThreshold() {
-1120  return ritThreshold;
-1121}
-1122
-1123public long getTimestamp() {
-1124  return statTimestamp;
-1125}
-1126
-1127public int getTotalRITs() {
-1128  return totalRITs;
-1129}
-1130
-1131public long getOldestRITTime() {

[27/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
index 275ca84..b9a30c4 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
@@ -1499,782 +1499,786 @@
 1491  final RpcController controller,
 1492  final IsInMaintenanceModeRequest 
request) throws ServiceException {
 1493IsInMaintenanceModeResponse.Builder 
response = IsInMaintenanceModeResponse.newBuilder();
-1494
response.setInMaintenanceMode(master.isInMaintenanceMode());
-1495return response.build();
-1496  }
-1497
-1498  @Override
-1499  public UnassignRegionResponse 
unassignRegion(RpcController controller,
-1500  UnassignRegionRequest req) throws 
ServiceException {
-1501try {
-1502  final byte [] regionName = 
req.getRegion().getValue().toByteArray();
-1503  RegionSpecifierType type = 
req.getRegion().getType();
-1504  final boolean force = 
req.getForce();
-1505  UnassignRegionResponse urr = 
UnassignRegionResponse.newBuilder().build();
-1506
-1507  master.checkInitialized();
-1508  if (type != 
RegionSpecifierType.REGION_NAME) {
-1509LOG.warn("unassignRegion 
specifier type: expected: " + RegionSpecifierType.REGION_NAME
-1510  + " actual: " + type);
-1511  }
-1512  Pair 
pair =
-1513
MetaTableAccessor.getRegion(master.getConnection(), regionName);
-1514  if 
(Bytes.equals(RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName(),regionName))
 {
-1515pair = new 
Pair<>(RegionInfoBuilder.FIRST_META_REGIONINFO,
-1516
master.getMetaTableLocator().getMetaRegionLocation(master.getZooKeeper()));
-1517  }
-1518  if (pair == null) {
-1519throw new 
UnknownRegionException(Bytes.toString(regionName));
-1520  }
-1521
-1522  RegionInfo hri = 
pair.getFirst();
-1523  if (master.cpHost != null) {
-1524master.cpHost.preUnassign(hri, 
force);
-1525  }
-1526  
LOG.debug(master.getClientIdAuditPrefix() + " unassign " + 
hri.getRegionNameAsString()
-1527  + " in current location if it 
is online and reassign.force=" + force);
-1528  
master.getAssignmentManager().unassign(hri);
-1529  if (master.cpHost != null) {
-1530master.cpHost.postUnassign(hri, 
force);
-1531  }
-1532
-1533  return urr;
-1534} catch (IOException ioe) {
-1535  throw new ServiceException(ioe);
-1536}
-1537  }
-1538
-1539  @Override
-1540  public 
ReportRegionStateTransitionResponse reportRegionStateTransition(RpcController 
c,
-1541  ReportRegionStateTransitionRequest 
req) throws ServiceException {
-1542try {
-1543  master.checkServiceStarted();
-1544  return 
master.getAssignmentManager().reportRegionStateTransition(req);
-1545} catch (IOException ioe) {
-1546  throw new ServiceException(ioe);
-1547}
-1548  }
-1549
-1550  @Override
-1551  public SetQuotaResponse 
setQuota(RpcController c, SetQuotaRequest req)
-1552  throws ServiceException {
-1553try {
-1554  master.checkInitialized();
-1555  return 
master.getMasterQuotaManager().setQuota(req);
-1556} catch (Exception e) {
-1557  throw new ServiceException(e);
-1558}
-1559  }
-1560
-1561  @Override
-1562  public 
MajorCompactionTimestampResponse getLastMajorCompactionTimestamp(RpcController 
controller,
-1563  MajorCompactionTimestampRequest 
request) throws ServiceException {
-1564
MajorCompactionTimestampResponse.Builder response =
-1565
MajorCompactionTimestampResponse.newBuilder();
-1566try {
-1567  master.checkInitialized();
-1568  
response.setCompactionTimestamp(master.getLastMajorCompactionTimestamp(ProtobufUtil
-1569  
.toTableName(request.getTableName(;
-1570} catch (IOException e) {
-1571  throw new ServiceException(e);
-1572}
-1573return response.build();
-1574  }
-1575
-1576  @Override
-1577  public 
MajorCompactionTimestampResponse getLastMajorCompactionTimestampForRegion(
-1578  RpcController controller, 
MajorCompactionTimestampForRegionRequest request)
-1579  throws ServiceException {
-1580
MajorCompactionTimestampResponse.Builder response =
-1581
MajorCompactionTimestampResponse.newBuilder();
-1582try {
-1583  master.checkInitialized();
-1584  
response.setCompactionTimestamp(master.getLastMajorCompactionTimestampForRegion(request
-1585  
.getRegion().getValue().toByteArray()));
-1586} catch (IOException e) {
-1587  throw new ServiceE

[29/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
index aa48364..9549aa5 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
@@ -2830,843 +2830,858 @@
 2822   * @return true if master is in 
maintenanceMode
 2823   */
 2824  @Override
-2825  public boolean isInMaintenanceMode() 
{
-2826return 
maintenanceModeTracker.isInMaintenanceMode();
-2827  }
-2828
-2829  @VisibleForTesting
-2830  public void setInitialized(boolean 
isInitialized) {
-2831
procedureExecutor.getEnvironment().setEventReady(initialized, isInitialized);
-2832  }
-2833
-2834  @Override
-2835  public ProcedureEvent 
getInitializedEvent() {
-2836return initialized;
-2837  }
-2838
-2839  /**
-2840   * ServerCrashProcessingEnabled is set 
false before completing assignMeta to prevent processing
-2841   * of crashed servers.
-2842   * @return true if assignMeta has 
completed;
-2843   */
-2844  @Override
-2845  public boolean 
isServerCrashProcessingEnabled() {
-2846return 
serverCrashProcessingEnabled.isReady();
-2847  }
-2848
-2849  @VisibleForTesting
-2850  public void 
setServerCrashProcessingEnabled(final boolean b) {
-2851
procedureExecutor.getEnvironment().setEventReady(serverCrashProcessingEnabled, 
b);
-2852  }
-2853
-2854  public ProcedureEvent 
getServerCrashProcessingEnabledEvent() {
-2855return 
serverCrashProcessingEnabled;
-2856  }
-2857
-2858  /**
-2859   * Compute the average load across all 
region servers.
-2860   * Currently, this uses a very naive 
computation - just uses the number of
-2861   * regions being served, ignoring 
stats about number of requests.
-2862   * @return the average load
-2863   */
-2864  public double getAverageLoad() {
-2865if (this.assignmentManager == null) 
{
-2866  return 0;
-2867}
-2868
-2869RegionStates regionStates = 
this.assignmentManager.getRegionStates();
-2870if (regionStates == null) {
-2871  return 0;
-2872}
-2873return 
regionStates.getAverageLoad();
-2874  }
-2875
-2876  /*
-2877   * @return the count of region split 
plans executed
-2878   */
-2879  public long getSplitPlanCount() {
-2880return splitPlanCount;
-2881  }
-2882
-2883  /*
-2884   * @return the count of region merge 
plans executed
-2885   */
-2886  public long getMergePlanCount() {
-2887return mergePlanCount;
-2888  }
-2889
-2890  @Override
-2891  public boolean registerService(Service 
instance) {
-2892/*
-2893 * No stacking of instances is 
allowed for a single service name
-2894 */
-2895Descriptors.ServiceDescriptor 
serviceDesc = instance.getDescriptorForType();
-2896String serviceName = 
CoprocessorRpcUtils.getServiceName(serviceDesc);
-2897if 
(coprocessorServiceHandlers.containsKey(serviceName)) {
-2898  LOG.error("Coprocessor service 
"+serviceName+
-2899  " already registered, 
rejecting request from "+instance
-2900  );
-2901  return false;
-2902}
-2903
-2904
coprocessorServiceHandlers.put(serviceName, instance);
-2905if (LOG.isDebugEnabled()) {
-2906  LOG.debug("Registered master 
coprocessor service: service="+serviceName);
-2907}
-2908return true;
-2909  }
-2910
-2911  /**
-2912   * Utility for constructing an 
instance of the passed HMaster class.
-2913   * @param masterClass
-2914   * @return HMaster instance.
-2915   */
-2916  public static HMaster 
constructMaster(Class masterClass,
-2917  final Configuration conf)  {
-2918try {
-2919  Constructor c = masterClass.getConstructor(Configuration.class);
-2920  return c.newInstance(conf);
-2921} catch(Exception e) {
-2922  Throwable error = e;
-2923  if (e instanceof 
InvocationTargetException &&
-2924  
((InvocationTargetException)e).getTargetException() != null) {
-2925error = 
((InvocationTargetException)e).getTargetException();
-2926  }
-2927  throw new RuntimeException("Failed 
construction of Master: " + masterClass.toString() + ". "
-2928, error);
-2929}
-2930  }
-2931
-2932  /**
-2933   * @see 
org.apache.hadoop.hbase.master.HMasterCommandLine
-2934   */
-2935  public static void main(String [] 
args) {
-2936LOG.info("STARTING service " + 
HMaster.class.getSimpleName());
-2937VersionInfo.logVersion();
-2938new 
HMasterCommandLine(HMaster.class).doMain(args);
-2939  }
-2940
-2941  public HFileCleaner getHFileCleaner() 
{
-2942return this.hfileCleaner;
-2943  }
-2944
-2945  public LogCleaner getLogCleaner() {
-2946return this.logCleane

[18/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionFailedOpen.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionFailedOpen.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionFailedOpen.html
index 5420d82..6ea3672 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionFailedOpen.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionFailedOpen.html
@@ -316,687 +316,728 @@
 308}
 309  }
 310
-311  public enum ServerState { ONLINE, 
SPLITTING, OFFLINE }
-312  public static class ServerStateNode 
implements Comparable {
-313private final ServerReportEvent 
reportEvent;
-314
-315private final 
Set regions;
-316private final ServerName 
serverName;
-317
-318private volatile ServerState state = 
ServerState.ONLINE;
-319private volatile int versionNumber = 
0;
-320
-321public ServerStateNode(final 
ServerName serverName) {
-322  this.serverName = serverName;
-323  this.regions = 
ConcurrentHashMap.newKeySet();
-324  this.reportEvent = new 
ServerReportEvent(serverName);
-325}
-326
-327public ServerName getServerName() {
-328  return serverName;
-329}
+311  /**
+312   * Server State.
+313   */
+314  public enum ServerState {
+315/**
+316 * Initial state. Available.
+317 */
+318ONLINE,
+319
+320/**
+321 * Server expired/crashed. Currently 
undergoing WAL splitting.
+322 */
+323SPLITTING,
+324
+325/**
+326 * WAL splitting done.
+327 */
+328OFFLINE
+329  }
 330
-331public ServerState getState() {
-332  return state;
-333}
-334
-335public int getVersionNumber() {
-336  return versionNumber;
-337}
-338
-339public ProcedureEvent 
getReportEvent() {
-340  return reportEvent;
-341}
+331  /**
+332   * State of Server; list of hosted 
regions, etc.
+333   */
+334  public static class ServerStateNode 
implements Comparable {
+335private final ServerReportEvent 
reportEvent;
+336
+337private final 
Set regions;
+338private final ServerName 
serverName;
+339
+340private volatile ServerState state = 
ServerState.ONLINE;
+341private volatile int versionNumber = 
0;
 342
-343public boolean isInState(final 
ServerState... expected) {
-344  boolean expectedState = false;
-345  if (expected != null) {
-346for (int i = 0; i < 
expected.length; ++i) {
-347  expectedState |= (state == 
expected[i]);
-348}
-349  }
-350  return expectedState;
+343public ServerStateNode(final 
ServerName serverName) {
+344  this.serverName = serverName;
+345  this.regions = 
ConcurrentHashMap.newKeySet();
+346  this.reportEvent = new 
ServerReportEvent(serverName);
+347}
+348
+349public ServerName getServerName() {
+350  return serverName;
 351}
 352
-353public void setState(final 
ServerState state) {
-354  this.state = state;
+353public ServerState getState() {
+354  return state;
 355}
 356
-357public void setVersionNumber(final 
int versionNumber) {
-358  this.versionNumber = 
versionNumber;
+357public int getVersionNumber() {
+358  return versionNumber;
 359}
 360
-361public Set 
getRegions() {
-362  return regions;
+361public ProcedureEvent 
getReportEvent() {
+362  return reportEvent;
 363}
 364
-365public int getRegionCount() {
-366  return regions.size();
+365public boolean isOffline() {
+366  return 
this.state.equals(ServerState.OFFLINE);
 367}
 368
-369public ArrayList 
getRegionInfoList() {
-370  ArrayList hris = 
new ArrayList(regions.size());
-371  for (RegionStateNode region: 
regions) {
-372
hris.add(region.getRegionInfo());
-373  }
-374  return hris;
-375}
-376
-377public void addRegion(final 
RegionStateNode regionNode) {
-378  this.regions.add(regionNode);
-379}
-380
-381public void removeRegion(final 
RegionStateNode regionNode) {
-382  this.regions.remove(regionNode);
-383}
-384
-385@Override
-386public int compareTo(final 
ServerStateNode other) {
-387  return 
getServerName().compareTo(other.getServerName());
-388}
-389
-390@Override
-391public int hashCode() {
-392  return 
getServerName().hashCode();
+369public boolean isInState(final 
ServerState... expected) {
+370  boolean expectedState = false;
+371  if (expected != null) {
+372for (int i = 0; i < 
expected.length; ++i) {
+373  expectedState |= (state == 
expected[i]);
+374}
+375  }
+376  re

[05/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.RemoteOperation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.RemoteOperation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.RemoteOperation.html
index 594ef24..17d5c40 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.RemoteOperation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.RemoteOperation.html
@@ -170,241 +170,242 @@
 162  }
 163
 164  /**
-165   * Add a remote rpc. Be sure to check 
result for successful add.
+165   * Add a remote rpc.
 166   * @param key the node identifier
-167   * @return True if we successfully 
added the operation.
-168   */
-169  public boolean addOperationToNode(final 
TRemote key, RemoteProcedure rp) {
+167   */
+168  public void addOperationToNode(final 
TRemote key, RemoteProcedure rp)
+169  throws 
NullTargetServerDispatchException, NoServerDispatchException, 
NoNodeDispatchException {
 170if (key == null) {
-171  // Key is remote server name. Be 
careful. It could have been nulled by a concurrent
-172  // ServerCrashProcedure shutting 
down outstanding RPC requests. See remoteCallFailed.
-173  return false;
-174}
-175assert key != null : "found null key 
for node";
-176BufferNode node = nodeMap.get(key);
-177if (node == null) {
-178  return false;
-179}
-180node.add(rp);
-181// Check our node still in the map; 
could have been removed by #removeNode.
-182return nodeMap.containsValue(node);
-183  }
-184
-185  /**
-186   * Remove a remote node
-187   * @param key the node identifier
-188   */
-189  public boolean removeNode(final TRemote 
key) {
-190final BufferNode node = 
nodeMap.remove(key);
-191if (node == null) return false;
-192node.abortOperationsInQueue();
-193return true;
-194  }
-195
-196  // 

-197  //  Task Helpers
-198  // 

-199  protected Future 
submitTask(Callable task) {
-200return threadPool.submit(task);
-201  }
-202
-203  protected Future 
submitTask(Callable task, long delay, TimeUnit unit) {
-204final FutureTask 
futureTask = new FutureTask(task);
-205timeoutExecutor.add(new 
DelayedTask(futureTask, delay, unit));
-206return futureTask;
-207  }
-208
-209  protected abstract void 
remoteDispatch(TRemote key, Set operations);
-210  protected abstract void 
abortPendingOperations(TRemote key, Set operations);
-211
-212  /**
-213   * Data structure with reference to 
remote operation.
-214   */
-215  public static abstract class 
RemoteOperation {
-216private final RemoteProcedure 
remoteProcedure;
-217
-218protected RemoteOperation(final 
RemoteProcedure remoteProcedure) {
-219  this.remoteProcedure = 
remoteProcedure;
-220}
-221
-222public RemoteProcedure 
getRemoteProcedure() {
-223  return remoteProcedure;
-224}
-225  }
-226
-227  /**
-228   * Remote procedure reference.
-229   */
-230  public interface 
RemoteProcedure {
-231/**
-232 * For building the remote 
operation.
-233 */
-234RemoteOperation remoteCallBuild(TEnv 
env, TRemote remote);
-235
-236/**
-237 * Called when the executeProcedure 
call is failed.
-238 */
-239void remoteCallFailed(TEnv env, 
TRemote remote, IOException exception);
-240
-241/**
-242 * Called when RS tells the remote 
procedure is succeeded through the
-243 * {@code reportProcedureDone} 
method.
-244 */
-245void remoteOperationCompleted(TEnv 
env);
-246
-247/**
-248 * Called when RS tells the remote 
procedure is failed through the {@code reportProcedureDone}
-249 * method.
-250 */
-251void remoteOperationFailed(TEnv env, 
RemoteProcedureException error);
-252  }
-253
-254  /**
-255   * Account of what procedures are 
running on remote node.
-256   * @param 
-257   * @param 
-258   */
-259  public interface RemoteNode {
-260TRemote getKey();
-261void add(RemoteProcedure operation);
-262void dispatch();
-263  }
-264
-265  protected 
ArrayListMultimap, RemoteOperation> 
buildAndGroupRequestByType(final TEnv env,
-266  final TRemote remote, final 
Set remoteProcedures) {
-267final 
ArrayListMultimap, RemoteOperation> requestByType = 
ArrayListMultimap.create();
-268for (RemoteProcedure proc: 
remoteProcedures) {
-269  RemoteOperation operation = 
proc.remoteCallBuild(env, remote);

[08/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.BufferNode.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.BufferNode.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.BufferNode.html
index 594ef24..17d5c40 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.BufferNode.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.BufferNode.html
@@ -170,241 +170,242 @@
 162  }
 163
 164  /**
-165   * Add a remote rpc. Be sure to check 
result for successful add.
+165   * Add a remote rpc.
 166   * @param key the node identifier
-167   * @return True if we successfully 
added the operation.
-168   */
-169  public boolean addOperationToNode(final 
TRemote key, RemoteProcedure rp) {
+167   */
+168  public void addOperationToNode(final 
TRemote key, RemoteProcedure rp)
+169  throws 
NullTargetServerDispatchException, NoServerDispatchException, 
NoNodeDispatchException {
 170if (key == null) {
-171  // Key is remote server name. Be 
careful. It could have been nulled by a concurrent
-172  // ServerCrashProcedure shutting 
down outstanding RPC requests. See remoteCallFailed.
-173  return false;
-174}
-175assert key != null : "found null key 
for node";
-176BufferNode node = nodeMap.get(key);
-177if (node == null) {
-178  return false;
-179}
-180node.add(rp);
-181// Check our node still in the map; 
could have been removed by #removeNode.
-182return nodeMap.containsValue(node);
-183  }
-184
-185  /**
-186   * Remove a remote node
-187   * @param key the node identifier
-188   */
-189  public boolean removeNode(final TRemote 
key) {
-190final BufferNode node = 
nodeMap.remove(key);
-191if (node == null) return false;
-192node.abortOperationsInQueue();
-193return true;
-194  }
-195
-196  // 

-197  //  Task Helpers
-198  // 

-199  protected Future 
submitTask(Callable task) {
-200return threadPool.submit(task);
-201  }
-202
-203  protected Future 
submitTask(Callable task, long delay, TimeUnit unit) {
-204final FutureTask 
futureTask = new FutureTask(task);
-205timeoutExecutor.add(new 
DelayedTask(futureTask, delay, unit));
-206return futureTask;
-207  }
-208
-209  protected abstract void 
remoteDispatch(TRemote key, Set operations);
-210  protected abstract void 
abortPendingOperations(TRemote key, Set operations);
-211
-212  /**
-213   * Data structure with reference to 
remote operation.
-214   */
-215  public static abstract class 
RemoteOperation {
-216private final RemoteProcedure 
remoteProcedure;
-217
-218protected RemoteOperation(final 
RemoteProcedure remoteProcedure) {
-219  this.remoteProcedure = 
remoteProcedure;
-220}
-221
-222public RemoteProcedure 
getRemoteProcedure() {
-223  return remoteProcedure;
-224}
-225  }
-226
-227  /**
-228   * Remote procedure reference.
-229   */
-230  public interface 
RemoteProcedure {
-231/**
-232 * For building the remote 
operation.
-233 */
-234RemoteOperation remoteCallBuild(TEnv 
env, TRemote remote);
-235
-236/**
-237 * Called when the executeProcedure 
call is failed.
-238 */
-239void remoteCallFailed(TEnv env, 
TRemote remote, IOException exception);
-240
-241/**
-242 * Called when RS tells the remote 
procedure is succeeded through the
-243 * {@code reportProcedureDone} 
method.
-244 */
-245void remoteOperationCompleted(TEnv 
env);
-246
-247/**
-248 * Called when RS tells the remote 
procedure is failed through the {@code reportProcedureDone}
-249 * method.
-250 */
-251void remoteOperationFailed(TEnv env, 
RemoteProcedureException error);
-252  }
-253
-254  /**
-255   * Account of what procedures are 
running on remote node.
-256   * @param 
-257   * @param 
-258   */
-259  public interface RemoteNode {
-260TRemote getKey();
-261void add(RemoteProcedure operation);
-262void dispatch();
-263  }
-264
-265  protected 
ArrayListMultimap, RemoteOperation> 
buildAndGroupRequestByType(final TEnv env,
-266  final TRemote remote, final 
Set remoteProcedures) {
-267final 
ArrayListMultimap, RemoteOperation> requestByType = 
ArrayListMultimap.create();
-268for (RemoteProcedure proc: 
remoteProcedures) {
-269  RemoteOperation operation = 
proc.remoteCallBuild(env, remote);
-270  
requestByType

[09/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.html
index 49f4e5a..e1f22c3 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.html
@@ -30,188 +30,191 @@
 022import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 023import 
org.apache.hadoop.hbase.master.procedure.PeerProcedureInterface;
 024import 
org.apache.hadoop.hbase.master.procedure.RSProcedureDispatcher.ServerOperation;
-025import 
org.apache.hadoop.hbase.procedure2.Procedure;
-026import 
org.apache.hadoop.hbase.procedure2.ProcedureEvent;
-027import 
org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
-028import 
org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
-029import 
org.apache.hadoop.hbase.procedure2.ProcedureYieldException;
-030import 
org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteOperation;
-031import 
org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteProcedure;
-032import 
org.apache.hadoop.hbase.procedure2.RemoteProcedureException;
-033import 
org.apache.hadoop.hbase.replication.regionserver.RefreshPeerCallable;
-034import 
org.apache.yetus.audience.InterfaceAudience;
-035import org.slf4j.Logger;
-036import org.slf4j.LoggerFactory;
-037
-038import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-039import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.PeerModificationType;
-040import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RefreshPeerParameter;
-041import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RefreshPeerStateData;
-042
-043@InterfaceAudience.Private
-044public class RefreshPeerProcedure extends 
Procedure
-045implements PeerProcedureInterface, 
RemoteProcedure {
-046
-047  private static final Logger LOG = 
LoggerFactory.getLogger(RefreshPeerProcedure.class);
-048
-049  private String peerId;
-050
-051  private PeerOperationType type;
-052
-053  
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = 
"IS2_INCONSISTENT_SYNC",
-054  justification = "Will never change 
after construction")
-055  private ServerName targetServer;
-056
-057  private boolean dispatched;
-058
-059  private ProcedureEvent 
event;
-060
-061  private boolean succ;
-062
-063  public RefreshPeerProcedure() {
-064  }
-065
-066  public RefreshPeerProcedure(String 
peerId, PeerOperationType type, ServerName targetServer) {
-067this.peerId = peerId;
-068this.type = type;
-069this.targetServer = targetServer;
-070  }
-071
-072  @Override
-073  public String getPeerId() {
-074return peerId;
-075  }
-076
-077  @Override
-078  public PeerOperationType 
getPeerOperationType() {
-079return PeerOperationType.REFRESH;
-080  }
-081
-082  private static PeerModificationType 
toPeerModificationType(PeerOperationType type) {
-083switch (type) {
-084  case ADD:
-085return 
PeerModificationType.ADD_PEER;
-086  case REMOVE:
-087return 
PeerModificationType.REMOVE_PEER;
-088  case ENABLE:
-089return 
PeerModificationType.ENABLE_PEER;
-090  case DISABLE:
-091return 
PeerModificationType.DISABLE_PEER;
-092  case UPDATE_CONFIG:
-093return 
PeerModificationType.UPDATE_PEER_CONFIG;
-094  default:
-095throw new 
IllegalArgumentException("Unknown type: " + type);
-096}
-097  }
-098
-099  private static PeerOperationType 
toPeerOperationType(PeerModificationType type) {
-100switch (type) {
-101  case ADD_PEER:
-102return PeerOperationType.ADD;
-103  case REMOVE_PEER:
-104return 
PeerOperationType.REMOVE;
-105  case ENABLE_PEER:
-106return 
PeerOperationType.ENABLE;
-107  case DISABLE_PEER:
-108return 
PeerOperationType.DISABLE;
-109  case UPDATE_PEER_CONFIG:
-110return 
PeerOperationType.UPDATE_CONFIG;
-111  default:
-112throw new 
IllegalArgumentException("Unknown type: " + type);
-113}
-114  }
-115
-116  @Override
-117  public RemoteOperation 
remoteCallBuild(MasterProcedureEnv env, ServerName remote) {
-118assert targetServer.equals(remote);
-119return new ServerOperation(this, 
getProcId(), RefreshPeerCallable.class,
-120
RefreshPeerParameter.newBuilder().setPeerId(peerId).setType(toPeerModificationType(type))
-121
.setTargetServer(ProtobufUtil.toServerName(remote)).build().toByteArray());
-122  }
-123
-124  private void 
complete(MasterProcedure

[06/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.RemoteNode.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.RemoteNode.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.RemoteNode.html
index 594ef24..17d5c40 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.RemoteNode.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.RemoteNode.html
@@ -170,241 +170,242 @@
 162  }
 163
 164  /**
-165   * Add a remote rpc. Be sure to check 
result for successful add.
+165   * Add a remote rpc.
 166   * @param key the node identifier
-167   * @return True if we successfully 
added the operation.
-168   */
-169  public boolean addOperationToNode(final 
TRemote key, RemoteProcedure rp) {
+167   */
+168  public void addOperationToNode(final 
TRemote key, RemoteProcedure rp)
+169  throws 
NullTargetServerDispatchException, NoServerDispatchException, 
NoNodeDispatchException {
 170if (key == null) {
-171  // Key is remote server name. Be 
careful. It could have been nulled by a concurrent
-172  // ServerCrashProcedure shutting 
down outstanding RPC requests. See remoteCallFailed.
-173  return false;
-174}
-175assert key != null : "found null key 
for node";
-176BufferNode node = nodeMap.get(key);
-177if (node == null) {
-178  return false;
-179}
-180node.add(rp);
-181// Check our node still in the map; 
could have been removed by #removeNode.
-182return nodeMap.containsValue(node);
-183  }
-184
-185  /**
-186   * Remove a remote node
-187   * @param key the node identifier
-188   */
-189  public boolean removeNode(final TRemote 
key) {
-190final BufferNode node = 
nodeMap.remove(key);
-191if (node == null) return false;
-192node.abortOperationsInQueue();
-193return true;
-194  }
-195
-196  // 

-197  //  Task Helpers
-198  // 

-199  protected Future 
submitTask(Callable task) {
-200return threadPool.submit(task);
-201  }
-202
-203  protected Future 
submitTask(Callable task, long delay, TimeUnit unit) {
-204final FutureTask 
futureTask = new FutureTask(task);
-205timeoutExecutor.add(new 
DelayedTask(futureTask, delay, unit));
-206return futureTask;
-207  }
-208
-209  protected abstract void 
remoteDispatch(TRemote key, Set operations);
-210  protected abstract void 
abortPendingOperations(TRemote key, Set operations);
-211
-212  /**
-213   * Data structure with reference to 
remote operation.
-214   */
-215  public static abstract class 
RemoteOperation {
-216private final RemoteProcedure 
remoteProcedure;
-217
-218protected RemoteOperation(final 
RemoteProcedure remoteProcedure) {
-219  this.remoteProcedure = 
remoteProcedure;
-220}
-221
-222public RemoteProcedure 
getRemoteProcedure() {
-223  return remoteProcedure;
-224}
-225  }
-226
-227  /**
-228   * Remote procedure reference.
-229   */
-230  public interface 
RemoteProcedure {
-231/**
-232 * For building the remote 
operation.
-233 */
-234RemoteOperation remoteCallBuild(TEnv 
env, TRemote remote);
-235
-236/**
-237 * Called when the executeProcedure 
call is failed.
-238 */
-239void remoteCallFailed(TEnv env, 
TRemote remote, IOException exception);
-240
-241/**
-242 * Called when RS tells the remote 
procedure is succeeded through the
-243 * {@code reportProcedureDone} 
method.
-244 */
-245void remoteOperationCompleted(TEnv 
env);
-246
-247/**
-248 * Called when RS tells the remote 
procedure is failed through the {@code reportProcedureDone}
-249 * method.
-250 */
-251void remoteOperationFailed(TEnv env, 
RemoteProcedureException error);
-252  }
-253
-254  /**
-255   * Account of what procedures are 
running on remote node.
-256   * @param 
-257   * @param 
-258   */
-259  public interface RemoteNode {
-260TRemote getKey();
-261void add(RemoteProcedure operation);
-262void dispatch();
-263  }
-264
-265  protected 
ArrayListMultimap, RemoteOperation> 
buildAndGroupRequestByType(final TEnv env,
-266  final TRemote remote, final 
Set remoteProcedures) {
-267final 
ArrayListMultimap, RemoteOperation> requestByType = 
ArrayListMultimap.create();
-268for (RemoteProcedure proc: 
remoteProcedures) {
-269  RemoteOperation operation = 
proc.remoteCallBuild(env, remote);
-270  
requestByType

[01/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

Repository: hbase-site
Updated Branches:
  refs/heads/asf-site cddd30637 -> 3469cbc0b


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.html
index 338eb9c..55713d8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.html
@@ -401,10 +401,10 @@
 393" failed when creating the 
node for " + destServerName,
 394  e);
 395}
-396try {
-397  String oldQueueNode = 
getQueueNode(sourceServerName, queueId);
-398  List wals = 
ZKUtil.listChildrenNoWatch(zookeeper, oldQueueNode);
-399  String newQueueId = queueId + "-" + 
sourceServerName;
+396String newQueueId = queueId + "-" + 
sourceServerName;
+397try {
+398  String oldQueueNode = 
getQueueNode(sourceServerName, queueId);
+399  List wals = 
ZKUtil.listChildrenNoWatch(zookeeper, oldQueueNode);
 400  if (CollectionUtils.isEmpty(wals)) 
{
 401
ZKUtil.deleteNodeFailSilent(zookeeper, oldQueueNode);
 402LOG.info("Removed empty {}/{}", 
sourceServerName, queueId);
@@ -435,254 +435,255 @@
 427  return new Pair<>(newQueueId, 
logQueue);
 428} catch (NoNodeException | 
NodeExistsException | NotEmptyException | BadVersionException e) {
 429  // Multi call failed; it looks like 
some other regionserver took away the logs.
-430  // These exceptions mean that zk 
tells us the request can not be execute so it is safe to just
-431  // return a null. For other types 
of exception should be thrown out to notify the upper layer.
-432  LOG.info("Claim queue queueId={} 
from {} to {} failed with {}, someone else took the log?",
-433  queueId,sourceServerName, 
destServerName, e.toString());
-434  return null;
-435} catch (KeeperException | 
InterruptedException e) {
-436  throw new 
ReplicationException("Claim queue queueId=" + queueId + " from " +
-437sourceServerName + " to " + 
destServerName + " failed", e);
-438}
-439  }
-440
-441  @Override
-442  public void 
removeReplicatorIfQueueIsEmpty(ServerName serverName) throws 
ReplicationException {
-443try {
-444  
ZKUtil.deleteNodeFailSilent(zookeeper, getRsNode(serverName));
-445} catch (NotEmptyException e) {
-446  // keep silence to avoid logging 
too much.
-447} catch (KeeperException e) {
-448  throw new 
ReplicationException("Failed to remove replicator for " + serverName, e);
-449}
-450  }
-451
-452  private List 
getListOfReplicators0() throws KeeperException {
-453List children = 
ZKUtil.listChildrenNoWatch(zookeeper, queuesZNode);
-454if (children == null) {
-455  children = 
Collections.emptyList();
-456}
-457return 
children.stream().map(ServerName::parseServerName).collect(toList());
-458  }
-459
-460  @Override
-461  public List 
getListOfReplicators() throws ReplicationException {
-462try {
-463  return getListOfReplicators0();
-464} catch (KeeperException e) {
-465  throw new 
ReplicationException("Failed to get list of replicators", e);
-466}
-467  }
-468
-469  private List 
getWALsInQueue0(ServerName serverName, String queueId)
-470  throws KeeperException {
-471List children = 
ZKUtil.listChildrenNoWatch(zookeeper, getQueueNode(serverName,
-472queueId));
-473return children != null ? children : 
Collections.emptyList();
-474  }
-475
-476  @Override
-477  public List 
getWALsInQueue(ServerName serverName, String queueId)
-478  throws ReplicationException {
-479try {
-480  return getWALsInQueue0(serverName, 
queueId);
-481} catch (KeeperException e) {
-482  throw new ReplicationException(
-483  "Failed to get wals in queue 
(serverName=" + serverName + ", queueId=" + queueId + ")",
-484  e);
-485}
-486  }
-487
-488  private List 
getAllQueues0(ServerName serverName) throws KeeperException {
-489List children = 
ZKUtil.listChildrenNoWatch(zookeeper, getRsNode(serverName));
-490return children != null ? children : 
Collections.emptyList();
-491  }
-492
-493  @Override
-494  public List 
getAllQueues(ServerName serverName) throws ReplicationException {
-495try {
-496  return getAllQueues0(serverName);
-497} catch (KeeperException e) {
-498  throw new 
ReplicationException("Failed to get all queues (serverName=" + serverName + 
")", e);
-499}
-500  }
-501
-502  // will be overridden in UTs
-503  @VisibleForTesting
-504  protected int getQueuesZNodeCversion

[03/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.TimeoutExecutorThread.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.TimeoutExecutorThread.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.TimeoutExecutorThread.html
index 594ef24..17d5c40 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.TimeoutExecutorThread.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.TimeoutExecutorThread.html
@@ -170,241 +170,242 @@
 162  }
 163
 164  /**
-165   * Add a remote rpc. Be sure to check 
result for successful add.
+165   * Add a remote rpc.
 166   * @param key the node identifier
-167   * @return True if we successfully 
added the operation.
-168   */
-169  public boolean addOperationToNode(final 
TRemote key, RemoteProcedure rp) {
+167   */
+168  public void addOperationToNode(final 
TRemote key, RemoteProcedure rp)
+169  throws 
NullTargetServerDispatchException, NoServerDispatchException, 
NoNodeDispatchException {
 170if (key == null) {
-171  // Key is remote server name. Be 
careful. It could have been nulled by a concurrent
-172  // ServerCrashProcedure shutting 
down outstanding RPC requests. See remoteCallFailed.
-173  return false;
-174}
-175assert key != null : "found null key 
for node";
-176BufferNode node = nodeMap.get(key);
-177if (node == null) {
-178  return false;
-179}
-180node.add(rp);
-181// Check our node still in the map; 
could have been removed by #removeNode.
-182return nodeMap.containsValue(node);
-183  }
-184
-185  /**
-186   * Remove a remote node
-187   * @param key the node identifier
-188   */
-189  public boolean removeNode(final TRemote 
key) {
-190final BufferNode node = 
nodeMap.remove(key);
-191if (node == null) return false;
-192node.abortOperationsInQueue();
-193return true;
-194  }
-195
-196  // 

-197  //  Task Helpers
-198  // 

-199  protected Future 
submitTask(Callable task) {
-200return threadPool.submit(task);
-201  }
-202
-203  protected Future 
submitTask(Callable task, long delay, TimeUnit unit) {
-204final FutureTask 
futureTask = new FutureTask(task);
-205timeoutExecutor.add(new 
DelayedTask(futureTask, delay, unit));
-206return futureTask;
-207  }
-208
-209  protected abstract void 
remoteDispatch(TRemote key, Set operations);
-210  protected abstract void 
abortPendingOperations(TRemote key, Set operations);
-211
-212  /**
-213   * Data structure with reference to 
remote operation.
-214   */
-215  public static abstract class 
RemoteOperation {
-216private final RemoteProcedure 
remoteProcedure;
-217
-218protected RemoteOperation(final 
RemoteProcedure remoteProcedure) {
-219  this.remoteProcedure = 
remoteProcedure;
-220}
-221
-222public RemoteProcedure 
getRemoteProcedure() {
-223  return remoteProcedure;
-224}
-225  }
-226
-227  /**
-228   * Remote procedure reference.
-229   */
-230  public interface 
RemoteProcedure {
-231/**
-232 * For building the remote 
operation.
-233 */
-234RemoteOperation remoteCallBuild(TEnv 
env, TRemote remote);
-235
-236/**
-237 * Called when the executeProcedure 
call is failed.
-238 */
-239void remoteCallFailed(TEnv env, 
TRemote remote, IOException exception);
-240
-241/**
-242 * Called when RS tells the remote 
procedure is succeeded through the
-243 * {@code reportProcedureDone} 
method.
-244 */
-245void remoteOperationCompleted(TEnv 
env);
-246
-247/**
-248 * Called when RS tells the remote 
procedure is failed through the {@code reportProcedureDone}
-249 * method.
-250 */
-251void remoteOperationFailed(TEnv env, 
RemoteProcedureException error);
-252  }
-253
-254  /**
-255   * Account of what procedures are 
running on remote node.
-256   * @param 
-257   * @param 
-258   */
-259  public interface RemoteNode {
-260TRemote getKey();
-261void add(RemoteProcedure operation);
-262void dispatch();
-263  }
-264
-265  protected 
ArrayListMultimap, RemoteOperation> 
buildAndGroupRequestByType(final TEnv env,
-266  final TRemote remote, final 
Set remoteProcedures) {
-267final 
ArrayListMultimap, RemoteOperation> requestByType = 
ArrayListMultimap.create();
-268for (RemoteProcedure proc: 
remoteProcedures) {
-269  RemoteOperation operation = 
proc

hbase git commit: HBASE-20684 org.apache.hadoop.hbase.client.Scan#setStopRow javadoc uses incorrect method

Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 71b8e03a5 -> a56fd069e


HBASE-20684 org.apache.hadoop.hbase.client.Scan#setStopRow javadoc uses 
incorrect method

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a56fd069
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a56fd069
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a56fd069

Branch: refs/heads/branch-2.0
Commit: a56fd069eb855c74fdf3be64d219524334ec9406
Parents: 71b8e03
Author: EugeneNik 
Authored: Tue Jun 5 21:57:51 2018 +0800
Committer: zhangduo 
Committed: Tue Jun 5 21:58:11 2018 +0800

--
 .../src/main/java/org/apache/hadoop/hbase/client/Scan.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a56fd069/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
index b02bdc1..8835b31 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
@@ -480,7 +480,7 @@ public class Scan extends Query {
* @return this
* @throws IllegalArgumentException if stopRow does not meet criteria for a 
row key (when length
*   exceeds {@link HConstants#MAX_ROW_LENGTH})
-   * @deprecated use {@link #withStartRow(byte[])} instead. This method may 
change the inclusive of
+   * @deprecated use {@link #withStopRow(byte[])} instead. This method may 
change the inclusive of
* the stop row to keep compatible with the old behavior.
*/
   @Deprecated



hbase git commit: HBASE-20684 org.apache.hadoop.hbase.client.Scan#setStopRow javadoc uses incorrect method

Repository: hbase
Updated Branches:
  refs/heads/master 57c867172 -> 7d3750bd9


HBASE-20684 org.apache.hadoop.hbase.client.Scan#setStopRow javadoc uses 
incorrect method

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7d3750bd
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7d3750bd
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7d3750bd

Branch: refs/heads/master
Commit: 7d3750bd9fc9747623549c242cc4171e224b3eaf
Parents: 57c8671
Author: EugeneNik 
Authored: Tue Jun 5 21:57:51 2018 +0800
Committer: zhangduo 
Committed: Tue Jun 5 21:57:51 2018 +0800

--
 .../src/main/java/org/apache/hadoop/hbase/client/Scan.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7d3750bd/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
index 32fe2dc..d4aff04 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
@@ -483,7 +483,7 @@ public class Scan extends Query {
* @return this
* @throws IllegalArgumentException if stopRow does not meet criteria for a 
row key (when length
*   exceeds {@link HConstants#MAX_ROW_LENGTH})
-   * @deprecated use {@link #withStartRow(byte[])} instead. This method may 
change the inclusive of
+   * @deprecated use {@link #withStopRow(byte[])} instead. This method may 
change the inclusive of
* the stop row to keep compatible with the old behavior.
*/
   @Deprecated



hbase git commit: HBASE-20684 org.apache.hadoop.hbase.client.Scan#setStopRow javadoc uses incorrect method

Repository: hbase
Updated Branches:
  refs/heads/branch-2 0b0257b41 -> 323eccc65


HBASE-20684 org.apache.hadoop.hbase.client.Scan#setStopRow javadoc uses 
incorrect method

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/323eccc6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/323eccc6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/323eccc6

Branch: refs/heads/branch-2
Commit: 323eccc65bf747f7cf960a4810170a13035de711
Parents: 0b0257b
Author: EugeneNik 
Authored: Tue Jun 5 21:57:51 2018 +0800
Committer: zhangduo 
Committed: Tue Jun 5 21:58:06 2018 +0800

--
 .../src/main/java/org/apache/hadoop/hbase/client/Scan.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/323eccc6/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
index 32fe2dc..d4aff04 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
@@ -483,7 +483,7 @@ public class Scan extends Query {
* @return this
* @throws IllegalArgumentException if stopRow does not meet criteria for a 
row key (when length
*   exceeds {@link HConstants#MAX_ROW_LENGTH})
-   * @deprecated use {@link #withStartRow(byte[])} instead. This method may 
change the inclusive of
+   * @deprecated use {@link #withStopRow(byte[])} instead. This method may 
change the inclusive of
* the stop row to keep compatible with the old behavior.
*/
   @Deprecated



[31/50] [abbrv] hbase git commit: HBASE-19957 General framework to transit sync replication state

http://git-wip-us.apache.org/repos/asf/hbase/blob/1d067efe/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProvider.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProvider.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProvider.java
new file mode 100644
index 000..92f2c52
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProvider.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication.regionserver;
+
+import java.util.Optional;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * Get the information for a sync replication peer.
+ */
+@InterfaceAudience.Private
+public interface SyncReplicationPeerInfoProvider {
+
+  /**
+   * Return the peer id and remote WAL directory if the region is 
synchronously replicated and the
+   * state is {@link SyncReplicationState#ACTIVE}.
+   */
+  Optional> getPeerIdAndRemoteWALDir(RegionInfo info);
+
+  /**
+   * Check whether the give region is contained in a sync replication peer 
which is in the given
+   * state.
+   */
+  boolean isInState(RegionInfo info, SyncReplicationState state);
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/1d067efe/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProviderImpl.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProviderImpl.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProviderImpl.java
new file mode 100644
index 000..32159e6
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/SyncReplicationPeerInfoProviderImpl.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication.regionserver;
+
+import java.util.Optional;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.replication.ReplicationPeer;
+import org.apache.hadoop.hbase.replication.ReplicationPeers;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.yetus.audience.InterfaceAudience;
+
+@InterfaceAudience.Private
+class SyncReplicationPeerInfoProviderImpl implements 
SyncReplicationPeerInfoProvider {
+
+  private final ReplicationPeers replicationPeers;
+
+  private final SyncReplicationPeerMappingManager mapping;
+
+  SyncReplicationPeerInfoProviderImpl(ReplicationPeers replicationPeers,
+  SyncReplicationPeerMappingManager mapping) {
+this.replicationPeers = replicationPeers;
+this.mapping = mapping;
+  }
+
+  @Override
+  public Optional> getPeerIdAndRemoteWALDir(RegionInfo 
info) {
+String peerId = mapping.getPeerId(info);
+if (peerId == null) {
+  return Optional.empty();
+}
+ReplicationPeer peer = replication

[01/50] [abbrv] hbase git commit: HBASE-19724 Fixed Checkstyle errors in hbase-hadoop2-compat and enabled Checkstyle to fail on violations [Forced Update!]

Repository: hbase
Updated Branches:
  refs/heads/HBASE-19064 34b4dd2ef -> 55abf011b (forced update)


http://git-wip-us.apache.org/repos/asf/hbase/blob/d5ea9263/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java
--
diff --git 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java
 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java
index 7350d1e..fbcd9fc 100644
--- 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java
+++ 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java
@@ -22,13 +22,13 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicReference;
 
-import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.apache.hadoop.metrics2.MetricsExecutor;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.lib.MetricsExecutorImpl;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
 
@@ -41,7 +41,7 @@ import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesti
  * are package private.
  */
 @InterfaceAudience.Private
-public class JmxCacheBuster {
+public final class JmxCacheBuster {
   private static final Logger LOG = 
LoggerFactory.getLogger(JmxCacheBuster.class);
   private static AtomicReference fut = new 
AtomicReference<>(null);
   private static MetricsExecutor executor = new MetricsExecutorImpl();

http://git-wip-us.apache.org/repos/asf/hbase/blob/d5ea9263/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystemHelper.java
--
diff --git 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystemHelper.java
 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystemHelper.java
index 87b83e5..723e6d3 100644
--- 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystemHelper.java
+++ 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystemHelper.java
@@ -92,8 +92,8 @@ public class DefaultMetricsSystemHelper {
   }
 } catch (Exception ex) {
   if (LOG.isTraceEnabled()) {
-LOG.trace("Received exception while trying to access Hadoop Metrics 
classes via reflection.",
-ex);
+LOG.trace("Received exception while trying to access Hadoop Metrics 
classes via " +
+"reflection.", ex);
   }
 }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/d5ea9263/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java
--
diff --git 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java
 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java
index 62fa6ea..7e17ee9 100644
--- 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java
+++ 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java
@@ -21,15 +21,15 @@ package org.apache.hadoop.metrics2.lib;
 import java.util.Collection;
 import java.util.concurrent.ConcurrentMap;
 
-import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hbase.metrics.Interns;
 import org.apache.hadoop.metrics2.MetricsException;
 import org.apache.hadoop.metrics2.MetricsInfo;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.metrics2.MetricsTag;
 import org.apache.hadoop.metrics2.impl.MsInfo;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import org.apache.hbase.thirdparty.com.google.common.base.MoreObjects;
 import org.apache.hbase.thirdparty.com.google.common.collect.Maps;
@@ -56,14 +56,14 @@ public class DynamicMetricsRegistry {
   private final MetricsInfo metricsInfo;
   private final DefaultMetricsSystemHelper helper = new 
DefaultMetricsSystemHelper();
   private final static String[] histogramSuffixes = new String[]{
-  "_num_ops",
-  "_min",
-  "_max",
-  "_median",
-  "_75th_percentile",
-  "_90th_percentile",
-  "_95th_percentile",
-  "_99th_percentile"};
+"_num_ops",
+"_min",
+"_max",
+"_median",
+"_75th_percentile",
+"_90th_percentile",
+"_95th_perc

[15/50] [abbrv] hbase git commit: HBASE-20634 Reopen region while server crash can cause the procedure to be stuck

HBASE-20634 Reopen region while server crash can cause the procedure to be stuck

A reattempt at fixing HBASE-20173 [AMv2] DisableTableProcedure concurrent to 
ServerCrashProcedure can deadlock

The scenario is a SCP after processing WALs, goes to assign regions that
were on the crashed server but a concurrent Procedure gets in there
first and tries to unassign a region that was on the crashed server
(could be part of a move procedure or a disable table, etc.). The
unassign happens to run AFTER SCP has released all RPCs that
were going against the crashed server. The unassign fails because the
server is crashed. The unassign used to suspend itself only it would
never be woken up because the server it was going against had already
been processed. Worse, the SCP could not make progress because the
unassign was suspended with the lock on a region that it wanted to
assign held making it so it could make no progress.

In here, we add to the unassign recognition of the state where it is
running post SCP cleanup of RPCs. If present, unassign moves to finish
instead of suspending itself.

Includes a nice unit test made by Duo Zhang that reproduces nicely the
hung scenario.

M 
hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/FailedRemoteDispatchException.java
 Moved this class back to hbase-procedure where it belongs.

M 
hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NoNodeDispatchException.java
M 
hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NoServerDispatchException.java
M 
hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NullTargetServerDispatchException.java
 Specializiations on FRDE so we can be more particular when we say there
 was a problem.

M 
hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
 Change addOperationToNode so we throw exceptions that give more detail
 on issue rather than a mysterious true/false

M hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
 Undo SERVER_CRASH_HANDLE_RIT2. Bad idea (from HBASE-20173)

M hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
 Have expireServer return true if it actually queued an expiration. Used
 later in this patch.

M 
hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
 Hide methods that shouldn't be public. Add a particular check used out
 in unassign procedure failure processing.

M 
hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.java
 Check that server we're to move from is actually online (might
 catch a few silly move requests early).

M 
hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java
 Add doc on ServerState. Wasn't being used really. Now we actually stamp
 a Server OFFLINE after its WAL has been split. Means its safe to assign
 since all WALs have been processed. Add methods to update SPLITTING
 and to set it to OFFLINE after splitting done.

M 
hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java
 Change logging to be new-style and less repetitive of info.
 Cater to new way in which .addOperationToNode returns info (exceptions
 rather than true/false).

M 
hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java
 Add looking for the case where we failed assign AND we should not
 suspend because we will never be woken up because SCP is beyond
 doing this for all stuck RPCs.

 Some cleanup of the failure processing grouping where we can proceed.

 TODOs have been handled in this refactor including the TODO that
 wonders if it possible that there are concurrent fails coming in
 (Yes).

M 
hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
 Doc and removing the old HBASE-20173 'fix'.
 Also updating ServerStateNode post WAL splitting so it gets marked
 OFFLINE.

A 
hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestServerCrashProcedureStuck.java
 Nice test by Duo Zhang.

Signed-off-by: Umesh Agashe 
Signed-off-by: Duo Zhang 
Signed-off-by: Mike Drob 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a472f24d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a472f24d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a472f24d

Branch: refs/heads/HBASE-19064
Commit: a472f24d17718f85883d7bb7b9bd9bd64a8cb392
Parents: 0844691
Author: zhangduo 
Authored: Sun May 27 20:42:21 2018 +0800
Committer: Michael Stack 
Committed: Mon Jun 4 09:26:56 2018 -0700

--
 .../hbase/procedure2/BadProcedureException.java |   2 +-
 .../FailedRemoteDispatchException.java  |  33 
 .../procedure2/NoNodeDispatchException.java |  33 
 .../procedure2/NoServerDispatchException.java   |  33 
 .../NullTarg

[32/50] [abbrv] hbase git commit: HBASE-19957 General framework to transit sync replication state

HBASE-19957 General framework to transit sync replication state


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1d067efe
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1d067efe
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1d067efe

Branch: refs/heads/HBASE-19064
Commit: 1d067efe838623f7c752e481560edcdd0d96785d
Parents: b2d8844
Author: zhangduo 
Authored: Fri Feb 9 18:33:28 2018 +0800
Committer: zhangduo 
Committed: Tue Jun 5 17:24:38 2018 +0800

--
 .../replication/ReplicationPeerConfig.java  |   2 -
 .../replication/ReplicationPeerDescription.java |   5 +-
 .../hbase/replication/SyncReplicationState.java |  19 +-
 .../org/apache/hadoop/hbase/HConstants.java |   3 +
 .../src/main/protobuf/MasterProcedure.proto |  20 +-
 .../hbase/replication/ReplicationPeerImpl.java  |  45 -
 .../replication/ReplicationPeerStorage.java |  25 ++-
 .../hbase/replication/ReplicationPeers.java |  27 ++-
 .../replication/ZKReplicationPeerStorage.java   |  63 +--
 .../hbase/coprocessor/MasterObserver.java   |   7 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |   4 +-
 .../hbase/master/MasterCoprocessorHost.java |  12 +-
 .../replication/AbstractPeerProcedure.java  |  14 +-
 .../master/replication/ModifyPeerProcedure.java |  11 --
 .../replication/RefreshPeerProcedure.java   |  18 +-
 .../replication/ReplicationPeerManager.java |  89 +
 ...ransitPeerSyncReplicationStateProcedure.java | 181 ---
 .../hbase/regionserver/HRegionServer.java   |  35 ++--
 .../regionserver/ReplicationSourceService.java  |  11 +-
 .../regionserver/PeerActionListener.java|   4 +-
 .../regionserver/PeerProcedureHandler.java  |  16 +-
 .../regionserver/PeerProcedureHandlerImpl.java  |  52 +-
 .../regionserver/RefreshPeerCallable.java   |   7 +
 .../replication/regionserver/Replication.java   |  22 ++-
 .../regionserver/ReplicationSourceManager.java  |  41 +++--
 .../SyncReplicationPeerInfoProvider.java|  43 +
 .../SyncReplicationPeerInfoProviderImpl.java|  71 
 .../SyncReplicationPeerMappingManager.java  |  48 +
 .../SyncReplicationPeerProvider.java|  35 
 .../hbase/wal/SyncReplicationWALProvider.java   |  35 ++--
 .../org/apache/hadoop/hbase/wal/WALFactory.java |  47 ++---
 .../replication/TestReplicationAdmin.java   |   3 +-
 .../TestReplicationSourceManager.java   |   5 +-
 .../wal/TestSyncReplicationWALProvider.java |  36 ++--
 34 files changed, 743 insertions(+), 313 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1d067efe/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
index 997a155..cc7b4bc 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
@@ -15,7 +15,6 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.hbase.replication;
 
 import java.util.Collection;
@@ -25,7 +24,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.TreeMap;
-
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.util.Bytes;

http://git-wip-us.apache.org/repos/asf/hbase/blob/1d067efe/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java
index 2d077c5..b0c27bb 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java
@@ -20,7 +20,10 @@ package org.apache.hadoop.hbase.replication;
 import org.apache.yetus.audience.InterfaceAudience;
 
 /**
- * The POJO equivalent of ReplicationProtos.ReplicationPeerDescription
+ * The POJO equivalent of ReplicationProtos.ReplicationPeerDescription.
+ * 
+ * To developer, here we do not store the new sync replication state since it 
is just an
+ * intermediate state and this class is public.
  */
 @InterfaceAudience.Public
 public 

[40/50] [abbrv] hbase git commit: HBASE-20456 Support removing a ReplicationSourceShipper for a special wal group

HBASE-20456 Support removing a ReplicationSourceShipper for a special wal group


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/44d5ea7e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/44d5ea7e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/44d5ea7e

Branch: refs/heads/HBASE-19064
Commit: 44d5ea7eec3fa9393997485a0de27c0c76070e7d
Parents: dd986de
Author: zhangduo 
Authored: Tue Apr 24 22:01:21 2018 +0800
Committer: zhangduo 
Committed: Tue Jun 5 18:13:58 2018 +0800

--
 .../hbase/regionserver/wal/AsyncFSWAL.java  |  1 +
 .../RecoveredReplicationSource.java | 13 +---
 .../RecoveredReplicationSourceShipper.java  |  7 --
 .../regionserver/ReplicationSource.java | 13 +++-
 .../regionserver/ReplicationSourceManager.java  | 19 -
 .../regionserver/ReplicationSourceShipper.java  | 20 +++--
 .../ReplicationSourceWALReader.java |  9 ++-
 .../regionserver/WALEntryStream.java|  3 +-
 .../hadoop/hbase/wal/AbstractFSWALProvider.java | 28 ---
 .../hbase/wal/SyncReplicationWALProvider.java   | 10 ++-
 .../TestReplicationSourceManager.java   |  5 +-
 .../TestSyncReplicationShipperQuit.java | 81 
 .../regionserver/TestWALEntryStream.java|  4 +-
 13 files changed, 163 insertions(+), 50 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/44d5ea7e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
index d98ab75..9b4ce9c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
@@ -682,6 +682,7 @@ public class AsyncFSWAL extends AbstractFSWAL {
   protected void doShutdown() throws IOException {
 waitForSafePoint();
 closeWriter(this.writer);
+this.writer = null;
 closeExecutor.shutdown();
 try {
   if (!closeExecutor.awaitTermination(waitOnShutdownInSeconds, 
TimeUnit.SECONDS)) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/44d5ea7e/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java
index a21ca44..f1bb538 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java
@@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.replication.ReplicationPeer;
 import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
 import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
@@ -144,15 +143,9 @@ public class RecoveredReplicationSource extends 
ReplicationSource {
   }
 
   void tryFinish() {
-// use synchronize to make sure one last thread will clean the queue
-synchronized (workerThreads) {
-  Threads.sleep(100);// wait a short while for other worker thread to 
fully exit
-  boolean allTasksDone = workerThreads.values().stream().allMatch(w -> 
w.isFinished());
-  if (allTasksDone) {
-this.getSourceMetrics().clear();
-manager.removeRecoveredSource(this);
-LOG.info("Finished recovering queue {} with the following stats: {}", 
queueId, getStats());
-  }
+if (workerThreads.isEmpty()) {
+  this.getSourceMetrics().clear();
+  manager.finishRecoveredSource(this);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/44d5ea7e/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSourceShipper.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSourceShipper.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSourceShipper.java
index 91109cf..b0d4db0 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/reg

[07/50] [abbrv] hbase git commit: HBASE-19761:Fix Checkstyle errors in hbase-zookeeper

http://git-wip-us.apache.org/repos/asf/hbase/blob/1b98a96c/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTracker.java
--
diff --git 
a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTracker.java
 
b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTracker.java
index 18b81f4..915244e 100644
--- 
a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTracker.java
+++ 
b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTracker.java
@@ -65,7 +65,7 @@ public class MasterAddressTracker extends ZKNodeTracker {
* @param abortable abortable in case of fatal error
*/
   public MasterAddressTracker(ZKWatcher watcher, Abortable abortable) {
-super(watcher, watcher.znodePaths.masterAddressZNode, abortable);
+super(watcher, watcher.getZNodePaths().masterAddressZNode, abortable);
   }
 
   /**
@@ -101,7 +101,7 @@ public class MasterAddressTracker extends ZKNodeTracker {
* @return info port or 0 if timed out or exceptions
*/
   public int getBackupMasterInfoPort(final ServerName sn) {
-String backupZNode = 
ZNodePaths.joinZNode(watcher.znodePaths.backupMasterAddressesZNode,
+String backupZNode = 
ZNodePaths.joinZNode(watcher.getZNodePaths().backupMasterAddressesZNode,
   sn.toString());
 try {
   byte[] data = ZKUtil.getData(watcher, backupZNode);
@@ -147,7 +147,7 @@ public class MasterAddressTracker extends ZKNodeTracker {
   throws KeeperException, IOException {
 byte [] data;
 try {
-  data = ZKUtil.getData(zkw, zkw.znodePaths.masterAddressZNode);
+  data = ZKUtil.getData(zkw, zkw.getZNodePaths().masterAddressZNode);
 } catch (InterruptedException e) {
   throw new InterruptedIOException();
 }
@@ -178,7 +178,7 @@ public class MasterAddressTracker extends ZKNodeTracker {
   public static int getMasterInfoPort(final ZKWatcher zkw) throws 
KeeperException, IOException {
 byte[] data;
 try {
-  data = ZKUtil.getData(zkw, zkw.znodePaths.masterAddressZNode);
+  data = ZKUtil.getData(zkw, zkw.getZNodePaths().masterAddressZNode);
 } catch (InterruptedException e) {
   throw new InterruptedIOException();
 }
@@ -208,7 +208,7 @@ public class MasterAddressTracker extends ZKNodeTracker {
*/
   public static boolean setMasterAddress(final ZKWatcher zkw,
   final String znode, final ServerName master, int infoPort)
-  throws KeeperException {
+throws KeeperException {
 return ZKUtil.createEphemeralNodeAndWatch(zkw, znode, toByteArray(master, 
infoPort));
   }
 
@@ -265,10 +265,10 @@ public class MasterAddressTracker extends ZKNodeTracker {
 
 try {
   Stat stat = new Stat();
-  byte[] data = ZKUtil.getDataNoWatch(zkw, 
zkw.znodePaths.masterAddressZNode, stat);
+  byte[] data = ZKUtil.getDataNoWatch(zkw, 
zkw.getZNodePaths().masterAddressZNode, stat);
   ServerName sn = ProtobufUtil.parseServerNameFrom(data);
   if (sn != null && content.equals(sn.toString())) {
-return (ZKUtil.deleteNode(zkw, zkw.znodePaths.masterAddressZNode, 
stat.getVersion()));
+return (ZKUtil.deleteNode(zkw, zkw.getZNodePaths().masterAddressZNode, 
stat.getVersion()));
   }
 } catch (KeeperException e) {
   LOG.warn("Can't get or delete the master znode", e);

http://git-wip-us.apache.org/repos/asf/hbase/blob/1b98a96c/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterMaintenanceModeTracker.java
--
diff --git 
a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterMaintenanceModeTracker.java
 
b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterMaintenanceModeTracker.java
index 952da6f..9dafe5f 100644
--- 
a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterMaintenanceModeTracker.java
+++ 
b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterMaintenanceModeTracker.java
@@ -40,7 +40,7 @@ public class MasterMaintenanceModeTracker extends ZKListener {
   }
 
   private void update(String path) {
-if (path.startsWith(watcher.znodePaths.masterMaintZNode)) {
+if (path.startsWith(watcher.getZNodePaths().masterMaintZNode)) {
   update();
 }
   }
@@ -48,7 +48,8 @@ public class MasterMaintenanceModeTracker extends ZKListener {
   private void update() {
 try {
   List children =
-  ZKUtil.listChildrenAndWatchForNewChildren(watcher, 
watcher.znodePaths.masterMaintZNode);
+  ZKUtil.listChildrenAndWatchForNewChildren(watcher,
+  watcher.getZNodePaths().masterMaintZNode);
   hasChildren = (children != null && children.size() > 0);
 } catch (KeeperException e) {
   // Ignore the ZK keeper exception

http://git-wip-us.apache.org/repos/asf/hbase/blob/1b98a96c/hbase-zookeeper

[14/50] [abbrv] hbase git commit: HBASE-18948: Added a note in the Tag implementation details in security.adoc

HBASE-18948: Added a note in the Tag implementation details in security.adoc

Coprocessors that run server-side on RegionServers can perform get and set 
operations on cell Tags.
Tags are striped out at the RPC layer before the read response is sent back, so 
clients do not see these tags.

Signed-off-by: Josh Elser 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/08446916
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/08446916
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/08446916

Branch: refs/heads/HBASE-19064
Commit: 08446916a0e3b64025e0a70bb031bdfdc5b9e5ea
Parents: 7da0015
Author: Thiriguna Bharat Rao 
Authored: Fri May 25 21:40:11 2018 +0530
Committer: Josh Elser 
Committed: Mon Jun 4 11:03:58 2018 -0400

--
 src/main/asciidoc/_chapters/security.adoc | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/08446916/src/main/asciidoc/_chapters/security.adoc
--
diff --git a/src/main/asciidoc/_chapters/security.adoc 
b/src/main/asciidoc/_chapters/security.adoc
index c007053..dae6c53 100644
--- a/src/main/asciidoc/_chapters/security.adoc
+++ b/src/main/asciidoc/_chapters/security.adoc
@@ -662,6 +662,7 @@ You also need to enable the DataBlockEncoder for the column 
family, for encoding
 You can enable compression of each tag in the WAL, if WAL compression is also 
enabled, by setting the value of 
`hbase.regionserver.wal.tags.enablecompression` to `true` in _hbase-site.xml_.
 Tag compression uses dictionary encoding.
 
+Coprocessors that run server-side on RegionServers can perform get and set 
operations on cell Tags. Tags are stripped out at the RPC layer before the read 
response is sent back, so clients do not see these tags.
 Tag compression is not supported when using WAL encryption.
 
 [[hbase.accesscontrol.configuration]]



[02/50] [abbrv] hbase git commit: HBASE-19724 Fixed Checkstyle errors in hbase-hadoop2-compat and enabled Checkstyle to fail on violations

HBASE-19724 Fixed Checkstyle errors in hbase-hadoop2-compat and enabled 
Checkstyle to fail on violations


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d5ea9263
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d5ea9263
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d5ea9263

Branch: refs/heads/HBASE-19064
Commit: d5ea926321d8339dbbb761b32bcdc043aed576ee
Parents: 2fdd36f
Author: Jan Hentschel 
Authored: Sun Jan 7 13:10:31 2018 +0100
Committer: Jan Hentschel 
Committed: Fri Jun 1 10:59:47 2018 +0200

--
 hbase-hadoop2-compat/pom.xml|   7 +
 .../hbase/ipc/MetricsHBaseServerSourceImpl.java |  13 +-
 .../apache/hadoop/hbase/mapreduce/JobUtil.java  |  12 +-
 .../MetricsAssignmentManagerSourceImpl.java |  10 +-
 .../MetricsMasterFilesystemSourceImpl.java  |   2 +-
 .../master/MetricsMasterProcSourceImpl.java |   2 +-
 .../master/MetricsMasterQuotaSourceImpl.java|   5 +-
 .../hbase/master/MetricsMasterSourceImpl.java   |  11 +-
 .../hbase/master/MetricsSnapshotSourceImpl.java |   2 +-
 .../balancer/MetricsBalancerSourceImpl.java |   9 +-
 .../MetricsStochasticBalancerSourceImpl.java|  23 +-
 .../hadoop/hbase/metrics/BaseSourceImpl.java|  22 +-
 .../apache/hadoop/hbase/metrics/Interns.java|  13 +-
 .../hadoop/hbase/metrics/MBeanSourceImpl.java   |   2 +-
 .../hadoop/hbase/metrics/MetricsInfoImpl.java   |   6 +-
 .../MetricsHeapMemoryManagerSourceImpl.java |   2 +-
 .../MetricsRegionAggregateSourceImpl.java   |   6 +-
 .../MetricsRegionServerSourceFactoryImpl.java   |   5 +-
 .../MetricsRegionServerSourceImpl.java  | 406 ++-
 .../regionserver/MetricsRegionSourceImpl.java   |   6 +-
 .../MetricsTableAggregateSourceImpl.java|   8 +-
 .../regionserver/MetricsTableSourceImpl.java|  18 +-
 .../regionserver/wal/MetricsWALSourceImpl.java  |  11 +-
 .../MetricsReplicationGlobalSourceSource.java   |  13 +-
 .../MetricsReplicationSourceImpl.java   |   2 +-
 .../MetricsReplicationSourceSourceImpl.java |   3 +
 .../hbase/rest/MetricsRESTSourceImpl.java   |  25 +-
 .../thrift/MetricsThriftServerSourceImpl.java   |   5 +-
 .../zookeeper/MetricsZooKeeperSourceImpl.java   |  47 ++-
 .../hadoop/metrics2/impl/JmxCacheBuster.java|   8 +-
 .../lib/DefaultMetricsSystemHelper.java |   4 +-
 .../metrics2/lib/DynamicMetricsRegistry.java|  39 +-
 .../metrics2/lib/MetricsExecutorImpl.java   |   2 +-
 .../hadoop/metrics2/lib/MutableHistogram.java   |   2 +-
 .../metrics2/lib/MutableRangeHistogram.java |   3 +-
 .../metrics2/lib/MutableSizeHistogram.java  |   2 +-
 .../metrics2/lib/MutableTimeHistogram.java  |   4 +-
 .../hadoop/metrics2/util/MetricQuantile.java|  13 +-
 .../metrics2/util/MetricSampleQuantiles.java|  10 +-
 .../apache/hadoop/hbase/HadoopShimsImpl.java|   3 +-
 .../master/TestMetricsMasterSourceImpl.java |   9 +-
 .../TestMetricsRegionServerSourceImpl.java  |  10 +-
 .../TestMetricsRegionSourceImpl.java|  22 +-
 .../TestMetricsTableSourceImpl.java |  11 +-
 ...TestMetricsReplicationSourceFactoryImpl.java |   9 +-
 .../TestMetricsReplicationSourceImpl.java   |   6 +-
 .../hbase/rest/TestMetricsRESTSourceImpl.java   |  13 +-
 .../hbase/test/MetricsAssertHelperImpl.java |  14 +-
 ...estMetricsThriftServerSourceFactoryImpl.java |  19 +-
 .../TestMetricsZooKeeperSourceImpl.java |   8 +-
 50 files changed, 469 insertions(+), 438 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d5ea9263/hbase-hadoop2-compat/pom.xml
--
diff --git a/hbase-hadoop2-compat/pom.xml b/hbase-hadoop2-compat/pom.xml
index 6a313ab..ab33c72 100644
--- a/hbase-hadoop2-compat/pom.xml
+++ b/hbase-hadoop2-compat/pom.xml
@@ -71,6 +71,13 @@ limitations under the License.
 net.revelc.code
 warbucks-maven-plugin
   
+  
+org.apache.maven.plugins
+maven-checkstyle-plugin
+
+  true
+
+  
 
 
   

http://git-wip-us.apache.org/repos/asf/hbase/blob/d5ea9263/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java
--
diff --git 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java
 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java
index 6e8b81d..45f60c4 100644
--- 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java
+++ 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java
@@ -19,20 +19,17 @@
 

[35/50] [abbrv] hbase git commit: HBASE-19079 Support setting up two clusters with A and S stat

HBASE-19079 Support setting up two clusters with A and S stat


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d0187a37
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d0187a37
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d0187a37

Branch: refs/heads/HBASE-19064
Commit: d0187a37d3452bd0747da52b54505e3e03aef980
Parents: 95f97d8
Author: zhangduo 
Authored: Tue Apr 10 22:35:19 2018 +0800
Committer: zhangduo 
Committed: Tue Jun 5 18:13:58 2018 +0800

--
 .../replication/ReplicationPeerManager.java |   5 +-
 ...ransitPeerSyncReplicationStateProcedure.java |   2 +-
 .../hbase/regionserver/wal/DualAsyncFSWAL.java  |  14 ++
 .../hadoop/hbase/regionserver/wal/WALUtil.java  |  25 ++-
 .../hbase/replication/ChainWALEntryFilter.java  |  28 +--
 .../ReplaySyncReplicationWALCallable.java   |  27 ++-
 .../SyncReplicationPeerInfoProviderImpl.java|   6 +-
 .../hadoop/hbase/wal/AbstractFSWALProvider.java |  10 +-
 .../hbase/wal/SyncReplicationWALProvider.java   |  94 ++---
 .../org/apache/hadoop/hbase/wal/WALEdit.java|   8 +-
 .../org/apache/hadoop/hbase/wal/WALFactory.java |   2 +-
 .../replication/TestReplicationAdmin.java   |  33 +--
 .../regionserver/wal/TestWALDurability.java |   2 +
 .../replication/SyncReplicationTestBase.java| 185 +
 .../hbase/replication/TestSyncReplication.java  | 207 ---
 .../replication/TestSyncReplicationActive.java  |  64 ++
 .../replication/TestSyncReplicationStandBy.java |  96 +
 17 files changed, 521 insertions(+), 287 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d0187a37/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
index 41dd6e3..229549e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
@@ -68,8 +68,9 @@ public class ReplicationPeerManager {
 
   private final ImmutableMap>
 allowedTransition = 
Maps.immutableEnumMap(ImmutableMap.of(SyncReplicationState.ACTIVE,
-  EnumSet.of(SyncReplicationState.DOWNGRADE_ACTIVE), 
SyncReplicationState.STANDBY,
-  EnumSet.of(SyncReplicationState.DOWNGRADE_ACTIVE), 
SyncReplicationState.DOWNGRADE_ACTIVE,
+  EnumSet.of(SyncReplicationState.DOWNGRADE_ACTIVE, 
SyncReplicationState.STANDBY),
+  SyncReplicationState.STANDBY, 
EnumSet.of(SyncReplicationState.DOWNGRADE_ACTIVE),
+  SyncReplicationState.DOWNGRADE_ACTIVE,
   EnumSet.of(SyncReplicationState.STANDBY, SyncReplicationState.ACTIVE)));
 
   ReplicationPeerManager(ReplicationPeerStorage peerStorage, 
ReplicationQueueStorage queueStorage,

http://git-wip-us.apache.org/repos/asf/hbase/blob/d0187a37/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
index cc51890..5da2b0c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
@@ -171,7 +171,7 @@ public class TransitPeerSyncReplicationStateProcedure
 }
 return Flow.HAS_MORE_STATE;
   case REPLAY_REMOTE_WAL_IN_PEER:
-// TODO: replay remote wal when transiting from S to DA.
+addChildProcedure(new RecoverStandbyProcedure(peerId));
 
setNextState(PeerSyncReplicationStateTransitionState.REOPEN_ALL_REGIONS_IN_PEER);
 return Flow.HAS_MORE_STATE;
   case REOPEN_ALL_REGIONS_IN_PEER:

http://git-wip-us.apache.org/repos/asf/hbase/blob/d0187a37/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java
index 0495337..a98567a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsync

[04/50] [abbrv] hbase git commit: HBASE-18116 Replication source in-memory accounting should not include bulk transfer hfiles

HBASE-18116 Replication source in-memory accounting should not include bulk 
transfer hfiles

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a11701ec
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a11701ec
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a11701ec

Branch: refs/heads/HBASE-19064
Commit: a11701ecc5094d0c8cd96a290d86e52df83a9707
Parents: 0968668
Author: Xu Cang 
Authored: Thu May 31 20:00:04 2018 -0700
Committer: Andrew Purtell 
Committed: Fri Jun 1 11:15:47 2018 -0700

--
 .../regionserver/ReplicationSource.java |  1 +
 .../regionserver/ReplicationSourceShipper.java  | 19 -
 .../ReplicationSourceWALReader.java | 22 +++-
 .../regionserver/TestGlobalThrottler.java   | 20 --
 4 files changed, 45 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a11701ec/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
index 4051efe..d21d83c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
@@ -598,6 +598,7 @@ public class ReplicationSource implements 
ReplicationSourceInterface {
   }
 
   @Override
+  //offsets totalBufferUsed by deducting shipped batchSize.
   public void postShipEdits(List entries, int batchSize) {
 if (throttler.isEnabled()) {
   throttler.addPushSize(batchSize);

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11701ec/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java
index 11fd660..123ecbe 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceShipper.java
@@ -124,6 +124,18 @@ public class ReplicationSourceShipper extends Thread {
   }
 
   /**
+   * get batchEntry size excludes bulk load file sizes.
+   * Uses ReplicationSourceWALReader's static method.
+   */
+  private int getBatchEntrySizeExcludeBulkLoad(WALEntryBatch entryBatch) {
+int totalSize = 0;
+for(Entry entry : entryBatch.getWalEntries()) {
+  totalSize += entryReader.getEntrySizeExcludeBulkLoad(entry);
+}
+return  totalSize;
+  }
+
+  /**
* Do the shipping logic
*/
   private void shipEdits(WALEntryBatch entryBatch) {
@@ -139,6 +151,7 @@ public class ReplicationSourceShipper extends Thread {
   return;
 }
 int currentSize = (int) entryBatch.getHeapSize();
+int sizeExcludeBulkLoad = getBatchEntrySizeExcludeBulkLoad(entryBatch);
 while (isActive()) {
   try {
 try {
@@ -175,7 +188,11 @@ public class ReplicationSourceShipper extends Thread {
 // Log and clean up WAL logs
 updateLogPosition(entryBatch);
 
-source.postShipEdits(entries, currentSize);
+//offsets totalBufferUsed by deducting shipped batchSize (excludes 
bulk load size)
+//this sizeExcludeBulkLoad has to use same calculation that when 
calling
+//acquireBufferQuota() in ReplicatinoSourceWALReader because they 
maintain
+//same variable: totalBufferUsed
+source.postShipEdits(entries, sizeExcludeBulkLoad);
 // FIXME check relationship between wal group and overall
 source.getSourceMetrics().shipBatch(entryBatch.getNbOperations(), 
currentSize,
   entryBatch.getNbHFiles());

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11701ec/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.java
index 64fd48d..f685a9b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver

[30/50] [abbrv] hbase git commit: HBASE-19082 Reject read/write from client but accept write from replication in state S

HBASE-19082 Reject read/write from client but accept write from replication in 
state S


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9b741bdf
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9b741bdf
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9b741bdf

Branch: refs/heads/HBASE-19064
Commit: 9b741bdf2074f666572d4605bfd097d63f768b4a
Parents: 1d067ef
Author: zhangduo 
Authored: Mon Feb 12 18:20:18 2018 +0800
Committer: zhangduo 
Committed: Tue Jun 5 17:24:38 2018 +0800

--
 .../org/apache/hadoop/hbase/HConstants.java |   3 -
 .../src/main/protobuf/MasterProcedure.proto |   3 +-
 .../hbase/replication/ReplicationUtils.java |   4 +
 ...ransitPeerSyncReplicationStateProcedure.java |  10 +
 .../hadoop/hbase/regionserver/HRegion.java  |   5 +-
 .../hbase/regionserver/HRegionServer.java   |   2 +-
 .../hbase/regionserver/RSRpcServices.java   |  88 ++--
 .../RejectRequestsFromClientStateChecker.java   |  44 
 .../regionserver/ReplicationSink.java   |  72 ---
 .../SyncReplicationPeerInfoProvider.java|  10 +-
 .../SyncReplicationPeerInfoProviderImpl.java|  19 +-
 .../hbase/wal/SyncReplicationWALProvider.java   |   3 +
 .../org/apache/hadoop/hbase/wal/WALFactory.java |   4 +-
 .../hbase/replication/TestSyncReplication.java  | 200 +++
 .../wal/TestSyncReplicationWALProvider.java |   8 +-
 15 files changed, 401 insertions(+), 74 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9b741bdf/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
--
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 522c2cf..9241682 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -1355,9 +1355,6 @@ public final class HConstants {
 
   public static final String NOT_IMPLEMENTED = "Not implemented";
 
-  // TODO: need to find a better place to hold it.
-  public static final String SYNC_REPLICATION_ENABLED = 
"hbase.replication.sync.enabled";
-
   private HConstants() {
 // Can't be instantiated with this ctor.
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/9b741bdf/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto 
b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
index 56ac0d0..e60881f 100644
--- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
@@ -397,7 +397,8 @@ enum PeerSyncReplicationStateTransitionState {
   REOPEN_ALL_REGIONS_IN_PEER = 5;
   TRANSIT_PEER_NEW_SYNC_REPLICATION_STATE = 6;
   REFRESH_PEER_SYNC_REPLICATION_STATE_ON_RS_END = 7;
-  POST_PEER_SYNC_REPLICATION_STATE_TRANSITION = 8;
+  CREATE_DIR_FOR_REMOTE_WAL = 8;
+  POST_PEER_SYNC_REPLICATION_STATE_TRANSITION = 9;
 }
 
 message PeerModificationStateData {

http://git-wip-us.apache.org/repos/asf/hbase/blob/9b741bdf/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
index e4dea83..d94cb00 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
@@ -37,6 +37,10 @@ import org.apache.yetus.audience.InterfaceAudience;
 @InterfaceAudience.Private
 public final class ReplicationUtils {
 
+  public static final String SYNC_REPLICATION_ENABLED = 
"hbase.replication.sync.enabled";
+
+  public static final String REPLICATION_ATTR_NAME = "__rep__";
+
   private ReplicationUtils() {
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/9b741bdf/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
index 8fc932f..69404a0 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replicat

[18/50] [abbrv] hbase git commit: HBASE-20677 Backport test of HBASE-20566 'Creating a system table after enabling rsgroup feature puts region into RIT' to branch-2

HBASE-20677 Backport test of HBASE-20566 'Creating a system table after 
enabling rsgroup feature puts region into RIT' to branch-2

Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/832f67d4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/832f67d4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/832f67d4

Branch: refs/heads/HBASE-19064
Commit: 832f67d483985bc6cf488bb8ecef32280fdee668
Parents: d99ba62
Author: Nihal Jain 
Authored: Tue Jun 5 01:57:03 2018 +0530
Committer: tedyu 
Committed: Mon Jun 4 14:30:04 2018 -0700

--
 .../hadoop/hbase/rsgroup/TestRSGroups.java  | 38 ++--
 1 file changed, 28 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/832f67d4/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java
--
diff --git 
a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java 
b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java
index c2fc0f1..f9dafd4 100644
--- 
a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java
+++ 
b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java
@@ -89,14 +89,16 @@ public class TestRSGroups extends TestRSGroupsBase {
 RSGroupBasedLoadBalancer.class.getName());
 
TEST_UTIL.getConfiguration().set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
 RSGroupAdminEndpoint.class.getName() + "," + 
CPMasterObserver.class.getName());
-// Enable quota for testRSGroupsWithHBaseQuota()
-TEST_UTIL.getConfiguration().setBoolean(QuotaUtil.QUOTA_CONF_KEY, true);
 TEST_UTIL.startMiniCluster(NUM_SLAVES_BASE - 1);
 TEST_UTIL.getConfiguration().setInt(
 ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,
 NUM_SLAVES_BASE - 1);
 
TEST_UTIL.getConfiguration().setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, 
true);
 
+initialize();
+  }
+
+  private static void initialize() throws Exception {
 admin = TEST_UTIL.getAdmin();
 cluster = TEST_UTIL.getHBaseCluster();
 master = ((MiniHBaseCluster)cluster).getMaster();
@@ -178,8 +180,8 @@ public class TestRSGroups extends TestRSGroupsBase {
 assertEquals(4, defaultInfo.getServers().size());
 // Assignment of root and meta regions.
 int count = 
master.getAssignmentManager().getRegionStates().getRegionAssignments().size();
-//4 meta,namespace, group, quota
-assertEquals(4, count);
+//3 meta,namespace, group
+assertEquals(3, count);
   }
 
   @Test
@@ -525,11 +527,27 @@ public class TestRSGroups extends TestRSGroupsBase {
 
   @Test
   public void testRSGroupsWithHBaseQuota() throws Exception {
-TEST_UTIL.waitFor(9, new Waiter.Predicate() {
-  @Override
-  public boolean evaluate() throws Exception {
-return admin.isTableAvailable(QuotaTableUtil.QUOTA_TABLE_NAME);
-  }
-});
+TEST_UTIL.getConfiguration().setBoolean(QuotaUtil.QUOTA_CONF_KEY, true);
+restartHBaseCluster();
+try {
+  TEST_UTIL.waitFor(9, new Waiter.Predicate() {
+@Override
+public boolean evaluate() throws Exception {
+  return admin.isTableAvailable(QuotaTableUtil.QUOTA_TABLE_NAME);
+}
+  });
+} finally {
+  TEST_UTIL.getConfiguration().setBoolean(QuotaUtil.QUOTA_CONF_KEY, false);
+  restartHBaseCluster();
+}
+  }
+
+  private void restartHBaseCluster() throws Exception {
+LOG.info("\n\nShutting down cluster");
+TEST_UTIL.shutdownMiniHBaseCluster();
+LOG.info("\n\nSleeping a bit");
+Thread.sleep(2000);
+TEST_UTIL.restartHBaseCluster(NUM_SLAVES_BASE - 1);
+initialize();
   }
 }



[49/50] [abbrv] hbase git commit: HBASE-20637 Polish the WAL switching when transiting from A to S

HBASE-20637 Polish the WAL switching when transiting from A to S


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c8fbb18a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c8fbb18a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c8fbb18a

Branch: refs/heads/HBASE-19064
Commit: c8fbb18a77d8be6a3263a108f33dcc8f457f67ea
Parents: 599080c
Author: zhangduo 
Authored: Tue May 29 20:38:20 2018 +0800
Committer: zhangduo 
Committed: Tue Jun 5 18:13:59 2018 +0800

--
 .../hbase/regionserver/wal/AsyncFSWAL.java  | 52 +-
 .../hbase/regionserver/wal/DualAsyncFSWAL.java  | 71 ++--
 .../apache/hadoop/hbase/util/FSHDFSUtils.java   | 16 +++--
 .../hbase/wal/SyncReplicationWALProvider.java   |  2 +-
 .../replication/DualAsyncFSWALForTest.java  |  4 +-
 .../replication/SyncReplicationTestBase.java| 26 +--
 .../replication/TestSyncReplicationActive.java  | 42 ++--
 7 files changed, 176 insertions(+), 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c8fbb18a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
index 9b4ce9c..7f3e30b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
@@ -52,12 +52,12 @@ import org.apache.hadoop.hbase.wal.WALEdit;
 import org.apache.hadoop.hbase.wal.WALKeyImpl;
 import org.apache.hadoop.hbase.wal.WALProvider.AsyncWriter;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
 import org.apache.htrace.core.TraceScope;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
 import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
 import org.apache.hbase.thirdparty.io.netty.channel.Channel;
 import org.apache.hbase.thirdparty.io.netty.channel.EventLoop;
@@ -470,6 +470,44 @@ public class AsyncFSWAL extends AbstractFSWAL 
{
 // whether to issue a sync in the caller method.
   }
 
+  private void drainNonMarkerEditsAndFailSyncs() {
+if (toWriteAppends.isEmpty()) {
+  return;
+}
+boolean hasNonMarkerEdits = false;
+Iterator iter = toWriteAppends.descendingIterator();
+while (iter.hasNext()) {
+  FSWALEntry entry = iter.next();
+  if (!entry.getEdit().isMetaEdit()) {
+hasNonMarkerEdits = true;
+break;
+  }
+}
+if (hasNonMarkerEdits) {
+  for (;;) {
+iter.remove();
+if (!iter.hasNext()) {
+  break;
+}
+iter.next();
+  }
+  unackedAppends.clear();
+  // fail the sync futures which are under the txid of the first remaining 
edit, if none, fail
+  // all the sync futures.
+  long txid = toWriteAppends.isEmpty() ? Long.MAX_VALUE : 
toWriteAppends.peek().getTxid();
+  IOException error = new IOException("WAL is closing, only marker edit is 
allowed");
+  for (Iterator syncIter = syncFutures.iterator(); 
syncIter.hasNext();) {
+SyncFuture future = syncIter.next();
+if (future.getTxid() < txid) {
+  future.done(future.getTxid(), error);
+  syncIter.remove();
+} else {
+  break;
+}
+  }
+}
+  }
+
   private void consume() {
 consumeLock.lock();
 try {
@@ -512,6 +550,9 @@ public class AsyncFSWAL extends AbstractFSWAL {
   }
   waitingConsumePayloadsGatingSequence.set(nextCursor);
 }
+if (markerEditOnly()) {
+  drainNonMarkerEditsAndFailSyncs();
+}
 appendAndSync();
 if (hasConsumerTask.get()) {
   return;
@@ -553,9 +594,18 @@ public class AsyncFSWAL extends AbstractFSWAL 
{
 return consumerScheduled.compareAndSet(false, true);
   }
 
+  // This is used by sync replication, where we are going to close the wal 
soon after we reopen all
+  // the regions. Will be overridden by sub classes.
+  protected boolean markerEditOnly() {
+return false;
+  }
+
   @Override
   public long append(RegionInfo hri, WALKeyImpl key, WALEdit edits, boolean 
inMemstore)
   throws IOException {
+if (markerEditOnly() && !edits.isMetaEdit()) {
+  throw new IOException("WAL is closing, only marker edit is allowed");
+}
 long txid =
   stampSequenceIdAndPublishToRingBuffer(hri, key, edits, inMemstore, 
waitingConsumePayload

[45/50] [abbrv] hbase git commit: HBASE-20458 Support removing a WAL from LogRoller

HBASE-20458 Support removing a WAL from LogRoller


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3f1b25e4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3f1b25e4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3f1b25e4

Branch: refs/heads/HBASE-19064
Commit: 3f1b25e4e2fc36c6d22f70feaf26e97f32c3dcb1
Parents: 20283d4
Author: Guanghao Zhang 
Authored: Mon Apr 23 16:31:54 2018 +0800
Committer: zhangduo 
Committed: Tue Jun 5 18:13:59 2018 +0800

--
 .../hadoop/hbase/regionserver/LogRoller.java| 29 +--
 .../hbase/regionserver/wal/AbstractFSWAL.java   |  7 +-
 .../regionserver/wal/WALClosedException.java| 47 ++
 .../hbase/regionserver/TestLogRoller.java   | 90 
 .../regionserver/wal/AbstractTestFSWAL.java |  9 ++
 5 files changed, 171 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3f1b25e4/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java
index 55c5219..ab0083f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.hbase.regionserver;
 
 import java.io.Closeable;
 import java.io.IOException;
+import java.util.Iterator;
+import java.util.Map;
 import java.util.Map.Entry;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.atomic.AtomicBoolean;
@@ -30,6 +32,7 @@ import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL;
 import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
 import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
+import org.apache.hadoop.hbase.regionserver.wal.WALClosedException;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.HasThread;
 import org.apache.hadoop.hbase.wal.WAL;
@@ -177,17 +180,24 @@ public class LogRoller extends HasThread implements 
Closeable {
   rollLock.lock(); // FindBugs UL_UNRELEASED_LOCK_EXCEPTION_PATH
   try {
 this.lastrolltime = now;
-for (Entry entry : walNeedsRoll.entrySet()) {
+for (Iterator> iter = 
walNeedsRoll.entrySet().iterator(); iter
+.hasNext();) {
+  Entry entry = iter.next();
   final WAL wal = entry.getKey();
   // Force the roll if the logroll.period is elapsed or if a roll was 
requested.
   // The returned value is an array of actual region names.
-  final byte [][] regionsToFlush = wal.rollWriter(periodic ||
-  entry.getValue().booleanValue());
-  walNeedsRoll.put(wal, Boolean.FALSE);
-  if (regionsToFlush != null) {
-for (byte[] r : regionsToFlush) {
-  scheduleFlush(r);
+  try {
+final byte[][] regionsToFlush =
+wal.rollWriter(periodic || entry.getValue().booleanValue());
+walNeedsRoll.put(wal, Boolean.FALSE);
+if (regionsToFlush != null) {
+  for (byte[] r : regionsToFlush) {
+scheduleFlush(r);
+  }
 }
+  } catch (WALClosedException e) {
+LOG.warn("WAL has been closed. Skipping rolling of writer and just 
remove it", e);
+iter.remove();
   }
 }
   } catch (FailedLogCloseException e) {
@@ -252,4 +262,9 @@ public class LogRoller extends HasThread implements 
Closeable {
 running = false;
 interrupt();
   }
+
+  @VisibleForTesting
+  Map getWalNeedsRoll() {
+return this.walNeedsRoll;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/3f1b25e4/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
index 4255086..72ad8b8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
@@ -754,15 +754,14 @@ public abstract class AbstractFSWAL 
implements WAL {
   public byte[][] rollWriter(boolean force) throws FailedLogCloseException, 
IOException {
 rollWriterLock.lock();
 try {
+  if (this.closed) {
+throw new WALClosedExceptio

[11/50] [abbrv] hbase git commit: HBASE-20579 Include original exception in wrapped exception

HBASE-20579 Include original exception in wrapped exception

Signed-off-by: Ted Yu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a5ed463d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a5ed463d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a5ed463d

Branch: refs/heads/HBASE-19064
Commit: a5ed463d48b9bd1785f12a9abdb92de1c76767fd
Parents: 8377873
Author: Josh Elser 
Authored: Sat Jun 2 18:11:08 2018 -0400
Committer: Josh Elser 
Committed: Sat Jun 2 22:27:13 2018 -0400

--
 .../java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java| 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a5ed463d/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java
index f75a479..93b8ab5 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java
@@ -877,7 +877,7 @@ public class ExportSnapshot extends AbstractHBaseTool 
implements Tool {
 fs.setOwner(path, filesUser, filesGroup);
   } catch (IOException e) {
 throw new RuntimeException(
-"set owner for file " + path + " to " + filesUser + ":" + 
filesGroup + " failed");
+"set owner for file " + path + " to " + filesUser + ":" + 
filesGroup + " failed", e);
   }
 }, conf);
   }
@@ -893,7 +893,7 @@ public class ExportSnapshot extends AbstractHBaseTool 
implements Tool {
 fs.setPermission(path, perm);
   } catch (IOException e) {
 throw new RuntimeException(
-"set permission for file " + path + " to " + filesMode + " 
failed");
+"set permission for file " + path + " to " + filesMode + " 
failed", e);
   }
 }, conf);
   }



[43/50] [abbrv] hbase git commit: HBASE-20426 Give up replicating anything in S state

HBASE-20426 Give up replicating anything in S state


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/cb3c0c03
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/cb3c0c03
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/cb3c0c03

Branch: refs/heads/HBASE-19064
Commit: cb3c0c032e57e0e00eed9af9fd87b95ada36aca7
Parents: 84b69e2
Author: zhangduo 
Authored: Thu May 3 15:51:35 2018 +0800
Committer: zhangduo 
Committed: Tue Jun 5 18:13:59 2018 +0800

--
 .../src/main/protobuf/MasterProcedure.proto |  13 +-
 .../replication/AbstractPeerProcedure.java  |   4 +
 .../master/replication/ModifyPeerProcedure.java |   6 -
 .../replication/ReplicationPeerManager.java |  13 +-
 ...ransitPeerSyncReplicationStateProcedure.java |  94 +++
 .../hadoop/hbase/regionserver/LogRoller.java|  11 +-
 .../regionserver/PeerProcedureHandlerImpl.java  |  63 --
 .../regionserver/ReplicationSource.java |   1 +
 .../regionserver/ReplicationSourceManager.java  | 118 ---
 .../TestDrainReplicationQueuesForStandBy.java   | 118 +++
 10 files changed, 379 insertions(+), 62 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/cb3c0c03/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto 
b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
index d58608a..f58ad2e 100644
--- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
@@ -394,11 +394,14 @@ enum PeerSyncReplicationStateTransitionState {
   SET_PEER_NEW_SYNC_REPLICATION_STATE = 2;
   REFRESH_PEER_SYNC_REPLICATION_STATE_ON_RS_BEGIN = 3;
   REPLAY_REMOTE_WAL_IN_PEER = 4;
-  REOPEN_ALL_REGIONS_IN_PEER = 5;
-  TRANSIT_PEER_NEW_SYNC_REPLICATION_STATE = 6;
-  REFRESH_PEER_SYNC_REPLICATION_STATE_ON_RS_END = 7;
-  CREATE_DIR_FOR_REMOTE_WAL = 8;
-  POST_PEER_SYNC_REPLICATION_STATE_TRANSITION = 9;
+  REMOVE_ALL_REPLICATION_QUEUES_IN_PEER = 5;
+  REOPEN_ALL_REGIONS_IN_PEER = 6;
+  TRANSIT_PEER_NEW_SYNC_REPLICATION_STATE = 7;
+  REFRESH_PEER_SYNC_REPLICATION_STATE_ON_RS_END = 8;
+  SYNC_REPLICATION_SET_PEER_ENABLED = 9;
+  SYNC_REPLICATION_ENABLE_PEER_REFRESH_PEER_ON_RS = 10;
+  CREATE_DIR_FOR_REMOTE_WAL = 11;
+  POST_PEER_SYNC_REPLICATION_STATE_TRANSITION = 12;
 }
 
 message PeerModificationStateData {

http://git-wip-us.apache.org/repos/asf/hbase/blob/cb3c0c03/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
index 6679d78..458e073 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
@@ -106,4 +106,8 @@ public abstract class AbstractPeerProcedure
 throw new UnsupportedOperationException();
   }
 
+  protected final void refreshPeer(MasterProcedureEnv env, PeerOperationType 
type) {
+
addChildProcedure(env.getMasterServices().getServerManager().getOnlineServersList().stream()
+  .map(sn -> new RefreshPeerProcedure(peerId, type, 
sn)).toArray(RefreshPeerProcedure[]::new));
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/cb3c0c03/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
index fc559b0..ad4df61 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
@@ -109,12 +109,6 @@ public abstract class ModifyPeerProcedure extends 
AbstractPeerProcedure new RefreshPeerProcedure(peerId, type, sn))
-  .toArray(RefreshPeerProcedure[]::new));
-  }
-
   protected ReplicationPeerConfig getOldPeerConfig() {
 return null;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/cb3c0c03/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
--
diff --git 
a/hbase-server/src/main/java/

[16/50] [abbrv] hbase git commit: HBASE-20628 SegmentScanner does over-comparing when one flushing

HBASE-20628 SegmentScanner does over-comparing when one flushing

Signed-off-by: eshcar 
Signed-off-by: anoopsjohn 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/03c0f7fe
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/03c0f7fe
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/03c0f7fe

Branch: refs/heads/HBASE-19064
Commit: 03c0f7fe1343e681eda350a7a3528b62b28532cc
Parents: a472f24
Author: Michael Stack 
Authored: Fri Jun 1 13:35:23 2018 -0700
Committer: Michael Stack 
Committed: Mon Jun 4 09:50:47 2018 -0700

--
 .../regionserver/CompositeImmutableSegment.java |   9 ++
 .../hbase/regionserver/ImmutableSegment.java|   4 +
 .../hbase/regionserver/MemStoreSnapshot.java|   3 +-
 .../regionserver/SnapshotSegmentScanner.java| 101 +++
 4 files changed, 115 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/03c0f7fe/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java
index 1fd2f23..dcfaf81 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java
@@ -279,4 +279,13 @@ public class CompositeImmutableSegment extends 
ImmutableSegment {
 }
 return sb.toString();
   }
+
+  @Override
+  List getSnapshotScanners() {
+List list = new ArrayList<>(this.segments.size());
+for (ImmutableSegment segment: this.segments) {
+  list.add(new SnapshotSegmentScanner(segment));
+}
+return list;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/03c0f7fe/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java
index b781aab..8c426bc 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java
@@ -84,4 +84,8 @@ public abstract class ImmutableSegment extends Segment {
 res += "Num uniques "+getNumUniqueKeys()+"; ";
 return res;
   }
+
+  List getSnapshotScanners() {
+return Collections.singletonList(new SnapshotSegmentScanner(this));
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/03c0f7fe/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSnapshot.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSnapshot.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSnapshot.java
index f747224..3b34828 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSnapshot.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSnapshot.java
@@ -40,7 +40,7 @@ public class MemStoreSnapshot implements Closeable {
 this.cellsCount = snapshot.getCellsCount();
 this.memStoreSize = snapshot.getMemStoreSize();
 this.timeRangeTracker = snapshot.getTimeRangeTracker();
-this.scanners = snapshot.getScanners(Long.MAX_VALUE);
+this.scanners = snapshot.getSnapshotScanners();
 this.tagsPresent = snapshot.isTagsPresent();
   }
 
@@ -95,5 +95,4 @@ public class MemStoreSnapshot implements Closeable {
   }
 }
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/03c0f7fe/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SnapshotSegmentScanner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SnapshotSegmentScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SnapshotSegmentScanner.java
new file mode 100644
index 000..87be2e4
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SnapshotSegmentScanner.java
@@ -0,0 +1,101 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2

[20/50] [abbrv] hbase git commit: HBASE-20678 NPE in ReplicationSourceManager#NodeFailoverWorker

HBASE-20678 NPE in ReplicationSourceManager#NodeFailoverWorker


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/57c86717
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/57c86717
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/57c86717

Branch: refs/heads/HBASE-19064
Commit: 57c86717285d74f4604a277ee034878398568d81
Parents: a45763d
Author: Guanghao Zhang 
Authored: Mon Jun 4 11:10:22 2018 +0800
Committer: Guanghao Zhang 
Committed: Tue Jun 5 14:37:52 2018 +0800

--
 .../hadoop/hbase/replication/ZKReplicationQueueStorage.java | 9 +
 1 file changed, 5 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/57c86717/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java
index b9ebfb9..cca8bfc 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java
@@ -393,10 +393,10 @@ class ZKReplicationQueueStorage extends 
ZKReplicationStorageBase
 " failed when creating the node for " + destServerName,
   e);
 }
+String newQueueId = queueId + "-" + sourceServerName;
 try {
   String oldQueueNode = getQueueNode(sourceServerName, queueId);
   List wals = ZKUtil.listChildrenNoWatch(zookeeper, oldQueueNode);
-  String newQueueId = queueId + "-" + sourceServerName;
   if (CollectionUtils.isEmpty(wals)) {
 ZKUtil.deleteNodeFailSilent(zookeeper, oldQueueNode);
 LOG.info("Removed empty {}/{}", sourceServerName, queueId);
@@ -427,11 +427,12 @@ class ZKReplicationQueueStorage extends 
ZKReplicationStorageBase
   return new Pair<>(newQueueId, logQueue);
 } catch (NoNodeException | NodeExistsException | NotEmptyException | 
BadVersionException e) {
   // Multi call failed; it looks like some other regionserver took away 
the logs.
-  // These exceptions mean that zk tells us the request can not be execute 
so it is safe to just
-  // return a null. For other types of exception should be thrown out to 
notify the upper layer.
+  // These exceptions mean that zk tells us the request can not be 
execute. So return an empty
+  // queue to tell the upper layer that claim nothing. For other types of 
exception should be
+  // thrown out to notify the upper layer.
   LOG.info("Claim queue queueId={} from {} to {} failed with {}, someone 
else took the log?",
   queueId,sourceServerName, destServerName, e.toString());
-  return null;
+  return new Pair<>(newQueueId, Collections.emptySortedSet());
 } catch (KeeperException | InterruptedException e) {
   throw new ReplicationException("Claim queue queueId=" + queueId + " from 
" +
 sourceServerName + " to " + destServerName + " failed", e);



[28/50] [abbrv] hbase git commit: HBASE-19864 Use protobuf instead of enum.ordinal to store SyncReplicationState

HBASE-19864 Use protobuf instead of enum.ordinal to store SyncReplicationState

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b4106bad
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b4106bad
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b4106bad

Branch: refs/heads/HBASE-19064
Commit: b4106bad9880190df05e6b909dd077be71d2ded1
Parents: 346b480
Author: Guanghao Zhang 
Authored: Fri Jan 26 16:50:48 2018 +0800
Committer: zhangduo 
Committed: Tue Jun 5 17:24:38 2018 +0800

--
 .../replication/ReplicationPeerConfigUtil.java  | 22 +++---
 .../hbase/replication/SyncReplicationState.java | 17 ++
 .../hbase/shaded/protobuf/RequestConverter.java |  7 +++---
 .../src/main/protobuf/Replication.proto | 13 +++
 .../replication/ZKReplicationPeerStorage.java   | 24 +---
 .../hadoop/hbase/master/MasterRpcServices.java  |  9 
 ...ransitPeerSyncReplicationStateProcedure.java |  9 
 .../TestReplicationSourceManager.java   |  2 +-
 8 files changed, 67 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b4106bad/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
index 6cbe05b..331795c 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
@@ -403,7 +403,7 @@ public final class ReplicationPeerConfigUtil {
 ReplicationProtos.ReplicationState.State.ENABLED == 
desc.getState().getState();
 ReplicationPeerConfig config = convert(desc.getConfig());
 return new ReplicationPeerDescription(desc.getId(), enabled, config,
-
SyncReplicationState.valueOf(desc.getSyncReplicationState().getNumber()));
+  toSyncReplicationState(desc.getSyncReplicationState()));
   }
 
   public static ReplicationProtos.ReplicationPeerDescription
@@ -411,17 +411,33 @@ public final class ReplicationPeerConfigUtil {
 ReplicationProtos.ReplicationPeerDescription.Builder builder =
 ReplicationProtos.ReplicationPeerDescription.newBuilder();
 builder.setId(desc.getPeerId());
+
 ReplicationProtos.ReplicationState.Builder stateBuilder =
 ReplicationProtos.ReplicationState.newBuilder();
 stateBuilder.setState(desc.isEnabled() ? 
ReplicationProtos.ReplicationState.State.ENABLED :
 ReplicationProtos.ReplicationState.State.DISABLED);
 builder.setState(stateBuilder.build());
+
 builder.setConfig(convert(desc.getPeerConfig()));
-builder.setSyncReplicationState(
-  
ReplicationProtos.SyncReplicationState.forNumber(desc.getSyncReplicationState().ordinal()));
+
builder.setSyncReplicationState(toSyncReplicationState(desc.getSyncReplicationState()));
+
 return builder.build();
   }
 
+  public static ReplicationProtos.SyncReplicationState
+  toSyncReplicationState(SyncReplicationState state) {
+ReplicationProtos.SyncReplicationState.Builder syncReplicationStateBuilder 
=
+ReplicationProtos.SyncReplicationState.newBuilder();
+syncReplicationStateBuilder
+
.setState(ReplicationProtos.SyncReplicationState.State.forNumber(state.ordinal()));
+return syncReplicationStateBuilder.build();
+  }
+
+  public static SyncReplicationState
+  toSyncReplicationState(ReplicationProtos.SyncReplicationState state) {
+return SyncReplicationState.valueOf(state.getState().getNumber());
+  }
+
   public static ReplicationPeerConfig appendTableCFsToReplicationPeerConfig(
   Map> tableCfs, ReplicationPeerConfig peerConfig) 
{
 ReplicationPeerConfigBuilder builder = 
ReplicationPeerConfig.newBuilder(peerConfig);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4106bad/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java
index bd144e9..a65b144 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/SyncReplicationState.java
@@ -17,8 +17,15 @@
  */
 package org.apache.hadoop.hbase.replication;
 

[22/50] [abbrv] hbase git commit: HBASE-19857 Complete the procedure for adding a sync replication peer

HBASE-19857 Complete the procedure for adding a sync replication peer


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/346b480d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/346b480d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/346b480d

Branch: refs/heads/HBASE-19064
Commit: 346b480d9cb988ab4b82dac9645a0fca14fcfc64
Parents: 7bef837
Author: zhangduo 
Authored: Thu Jan 25 20:09:00 2018 +0800
Committer: zhangduo 
Committed: Tue Jun 5 17:24:38 2018 +0800

--
 .../hbase/replication/ReplicationPeer.java  |   9 +
 .../hbase/replication/ReplicationPeerImpl.java  |  28 +--
 .../hbase/replication/ReplicationPeers.java |   3 +-
 .../regionserver/PeerActionListener.java|  10 +-
 .../SyncReplicationPeerProvider.java|  35 +++
 .../SynchronousReplicationPeerProvider.java |  35 ---
 .../hbase/wal/SyncReplicationWALProvider.java   | 234 +++
 .../wal/SynchronousReplicationWALProvider.java  | 225 --
 .../org/apache/hadoop/hbase/wal/WALFactory.java |   8 +-
 .../TestReplicationSourceManager.java   |   3 +
 .../wal/TestSyncReplicationWALProvider.java | 153 
 .../TestSynchronousReplicationWALProvider.java  | 153 
 12 files changed, 456 insertions(+), 440 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/346b480d/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
index 2da3cce..0196a9a 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java
@@ -54,6 +54,15 @@ public interface ReplicationPeer {
   PeerState getPeerState();
 
   /**
+   * Returns the sync replication state of the peer by reading local cache.
+   * 
+   * If the peer is not a synchronous replication peer, a {@link 
SyncReplicationState#NONE} will be
+   * returned.
+   * @return the sync replication state
+   */
+  SyncReplicationState getSyncReplicationState();
+
+  /**
* Test whether the peer is enabled.
* @return {@code true} if enabled, otherwise {@code false}.
*/

http://git-wip-us.apache.org/repos/asf/hbase/blob/346b480d/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
index d656466..ff3f662 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java
@@ -36,6 +36,8 @@ public class ReplicationPeerImpl implements ReplicationPeer {
 
   private volatile PeerState peerState;
 
+  private volatile SyncReplicationState syncReplicationState;
+
   private final List peerConfigListeners;
 
   /**
@@ -45,12 +47,13 @@ public class ReplicationPeerImpl implements ReplicationPeer 
{
* @param id string representation of this peer's identifier
* @param peerConfig configuration for the replication peer
*/
-  public ReplicationPeerImpl(Configuration conf, String id, boolean peerState,
-  ReplicationPeerConfig peerConfig) {
+  public ReplicationPeerImpl(Configuration conf, String id, 
ReplicationPeerConfig peerConfig,
+  boolean peerState, SyncReplicationState syncReplicationState) {
 this.conf = conf;
 this.id = id;
 this.peerState = peerState ? PeerState.ENABLED : PeerState.DISABLED;
 this.peerConfig = peerConfig;
+this.syncReplicationState = syncReplicationState;
 this.peerConfigListeners = new ArrayList<>();
   }
 
@@ -77,37 +80,26 @@ public class ReplicationPeerImpl implements ReplicationPeer 
{
 return peerState;
   }
 
-  /**
-   * Get the peer config object
-   * @return the ReplicationPeerConfig for this peer
-   */
+  @Override
+  public SyncReplicationState getSyncReplicationState() {
+return syncReplicationState;
+  }
+
   @Override
   public ReplicationPeerConfig getPeerConfig() {
 return peerConfig;
   }
 
-  /**
-   * Get the configuration object required to communicate with this peer
-   * @return configuration object
-   */
   @Override
   public Configuration getConfiguration() {
 return conf;
   }
 
-  /**
-   * Get replicable (table, cf-list) map of 

[12/50] [abbrv] hbase git commit: Add Guangxu Cheng to pom.xml

Add Guangxu Cheng to pom.xml


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1b716ad5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1b716ad5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1b716ad5

Branch: refs/heads/HBASE-19064
Commit: 1b716ad5c8c81d10ecdbc4ebeb10c67e28d23801
Parents: a5ed463
Author: Guangxu Cheng 
Authored: Mon Jun 4 14:54:39 2018 +0800
Committer: Guangxu Cheng 
Committed: Mon Jun 4 14:54:39 2018 +0800

--
 pom.xml | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1b716ad5/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 4e1797b..ed7a172 100755
--- a/pom.xml
+++ b/pom.xml
@@ -303,6 +303,12 @@
   -8
 
 
+gxcheng
+Guangxu Cheng
+gxch...@apache.org
++8
+
+
   huaxiangsun
   Huaxiang Sun
   huaxiang...@apache.org



[44/50] [abbrv] hbase git commit: HBASE-19865 Add UT for sync replication peer in DA state

HBASE-19865 Add UT for sync replication peer in DA state


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1dc079de
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1dc079de
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1dc079de

Branch: refs/heads/HBASE-19064
Commit: 1dc079de5b5e35c8256c6a9f9eb657293a9b26b7
Parents: cb3c0c0
Author: zhangduo 
Authored: Tue May 8 20:33:22 2018 +0800
Committer: zhangduo 
Committed: Tue Jun 5 18:13:59 2018 +0800

--
 .../hbase/replication/TestReplicationBase.java  | 28 +++---
 ...estReplicationChangingPeerRegionservers.java | 20 ++
 .../TestReplicationSmallTestsSync.java  | 40 
 3 files changed, 76 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1dc079de/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java
index f96dbe5..cd84293 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java
@@ -1,5 +1,4 @@
-/*
- *
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -28,6 +27,8 @@ import java.util.List;
 import java.util.NavigableMap;
 import java.util.TreeMap;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
@@ -58,6 +59,9 @@ import org.junit.BeforeClass;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
+import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap;
+
 /**
  * This class is only a base for other integration-level replication tests.
  * Do not add tests here.
@@ -99,6 +103,10 @@ public class TestReplicationBase {
 return false;
   }
 
+  protected boolean isSyncPeer() {
+return false;
+  }
+
   protected final void cleanUp() throws IOException, InterruptedException {
 // Starting and stopping replication can make us miss new logs,
 // rolling like this makes sure the most recent one gets added to the queue
@@ -245,9 +253,19 @@ public class TestReplicationBase {
   @Before
   public void setUpBase() throws Exception {
 if (!peerExist(PEER_ID2)) {
-  ReplicationPeerConfig rpc = ReplicationPeerConfig.newBuilder()
-  
.setClusterKey(utility2.getClusterKey()).setSerial(isSerialPeer()).build();
-  hbaseAdmin.addReplicationPeer(PEER_ID2, rpc);
+  ReplicationPeerConfigBuilder builder = ReplicationPeerConfig.newBuilder()
+.setClusterKey(utility2.getClusterKey()).setSerial(isSerialPeer());
+  if (isSyncPeer()) {
+FileSystem fs2 = utility2.getTestFileSystem();
+// The remote wal dir is not important as we do not use it in DA 
state, here we only need to
+// confirm that a sync peer in DA state can still replicate data to 
remote cluster
+// asynchronously.
+builder.setReplicateAllUserTables(false)
+  .setTableCFsMap(ImmutableMap.of(tableName, ImmutableList.of()))
+  .setRemoteWALDir(new Path("/RemoteWAL")
+.makeQualified(fs2.getUri(), 
fs2.getWorkingDirectory()).toUri().toString());
+  }
+  hbaseAdmin.addReplicationPeer(PEER_ID2, builder.build());
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/1dc079de/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationChangingPeerRegionservers.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationChangingPeerRegionservers.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationChangingPeerRegionservers.java
index b94b443..5c96742 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationChangingPeerRegionservers.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationChangingPeerRegionservers.java
@@ -62,22 +62,28 @@ public class TestReplicationChangingPeerRegionservers 
extends TestReplicationBas
   private static final Logger LOG =
   LoggerFactory.getLogger(TestReplicat

[50/50] [abbrv] hbase git commit: HBASE-20434 Also remove remote wals when peer is in DA state

HBASE-20434 Also remove remote wals when peer is in DA state


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/20283d4b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/20283d4b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/20283d4b

Branch: refs/heads/HBASE-19064
Commit: 20283d4b5049edb3b34968ede46ea7ab19b0f8ac
Parents: 44d5ea7
Author: zhangduo 
Authored: Wed Apr 25 17:12:23 2018 +0800
Committer: zhangduo 
Committed: Tue Jun 5 18:13:59 2018 +0800

--
 .../hbase/replication/ReplicationUtils.java |   4 +
 ...ransitPeerSyncReplicationStateProcedure.java |   2 +-
 .../regionserver/ReplicationSource.java |   7 +-
 .../regionserver/ReplicationSourceManager.java  |  86 ++--
 .../hadoop/hbase/wal/AbstractFSWALProvider.java |  19 ++--
 .../hbase/wal/SyncReplicationWALProvider.java   |  30 +-
 .../TestSyncReplicationRemoveRemoteWAL.java | 101 +++
 .../TestReplicationSourceManager.java   |  68 -
 8 files changed, 251 insertions(+), 66 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/20283d4b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
index 66e9b01..069db7a 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
@@ -191,6 +191,10 @@ public final class ReplicationUtils {
 return new Path(remoteWALDir, peerId);
   }
 
+  public static Path getRemoteWALDirForPeer(Path remoteWALDir, String peerId) {
+return new Path(remoteWALDir, peerId);
+  }
+
   /**
* Do the sleeping logic
* @param msg Why we sleep

http://git-wip-us.apache.org/repos/asf/hbase/blob/20283d4b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
index 5da2b0c..99fd615 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
@@ -211,7 +211,7 @@ public class TransitPeerSyncReplicationStateProcedure
   case CREATE_DIR_FOR_REMOTE_WAL:
 MasterFileSystem mfs = env.getMasterFileSystem();
 Path remoteWALDir = new Path(mfs.getWALRootDir(), 
ReplicationUtils.REMOTE_WAL_DIR_NAME);
-Path remoteWALDirForPeer = new Path(remoteWALDir, peerId);
+Path remoteWALDirForPeer = 
ReplicationUtils.getRemoteWALDirForPeer(remoteWALDir, peerId);
 FileSystem walFs = mfs.getWALFileSystem();
 try {
   if (walFs.exists(remoteWALDirForPeer)) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/20283d4b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
index e48e7f1..61d0387 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
@@ -550,14 +550,17 @@ public class ReplicationSource implements 
ReplicationSourceInterface {
 }
 
 /**
+ * 
  * Split a path to get the start time
+ * 
+ * 
  * For example: 10.20.20.171%3A60020.1277499063250
+ * 
  * @param p path to split
  * @return start time
  */
 private static long getTS(Path p) {
-  int tsIndex = p.getName().lastIndexOf('.') + 1;
-  return Long.parseLong(p.getName().substring(tsIndex));
+  return AbstractFSWALProvider.getWALStartTimeFromWALName(p.getName());
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/20283d4b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
-

[24/50] [abbrv] hbase git commit: HBASE-19747 Introduce a special WALProvider for synchronous replication

HBASE-19747 Introduce a special WALProvider for synchronous replication


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/500ac83c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/500ac83c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/500ac83c

Branch: refs/heads/HBASE-19064
Commit: 500ac83ccbb02a8de959ad8511cf5d3503eb6f34
Parents: 0eabb9f
Author: zhangduo 
Authored: Fri Jan 19 18:38:39 2018 +0800
Committer: zhangduo 
Committed: Tue Jun 5 17:24:38 2018 +0800

--
 .../hbase/regionserver/wal/AbstractFSWAL.java   |   7 +
 .../hbase/regionserver/wal/AsyncFSWAL.java  |   1 -
 .../hbase/regionserver/wal/DualAsyncFSWAL.java  |   4 +-
 .../hadoop/hbase/regionserver/wal/FSHLog.java   |   4 -
 .../regionserver/PeerActionListener.java|  33 +++
 .../SynchronousReplicationPeerProvider.java |  35 +++
 .../hadoop/hbase/wal/AbstractFSWALProvider.java |   1 +
 .../hadoop/hbase/wal/AsyncFSWALProvider.java|  18 +-
 .../hbase/wal/NettyAsyncFSWALConfigHelper.java  |   8 +-
 .../hbase/wal/RegionGroupingProvider.java   |  13 +-
 .../wal/SynchronousReplicationWALProvider.java  | 225 +++
 .../org/apache/hadoop/hbase/wal/WALFactory.java |  37 ++-
 .../org/apache/hadoop/hbase/wal/WALKeyImpl.java |  16 +-
 .../regionserver/TestCompactionPolicy.java  |   1 +
 .../regionserver/TestFailedAppendAndSync.java   | 122 +-
 .../hadoop/hbase/regionserver/TestHRegion.java  |  24 +-
 .../TestHRegionWithInMemoryFlush.java   |   7 -
 .../hbase/regionserver/TestRegionIncrement.java |  20 +-
 .../hbase/regionserver/TestWALLockup.java   |   1 +
 .../regionserver/wal/AbstractTestWALReplay.java |   1 +
 .../regionserver/wal/ProtobufLogTestHelper.java |  44 +++-
 .../hbase/regionserver/wal/TestAsyncFSWAL.java  |  13 +-
 .../regionserver/wal/TestAsyncWALReplay.java|   4 +-
 .../wal/TestCombinedAsyncWriter.java|   3 +-
 .../hbase/regionserver/wal/TestFSHLog.java  |  15 +-
 .../hbase/regionserver/wal/TestWALReplay.java   |   1 +
 .../apache/hadoop/hbase/wal/IOTestProvider.java |   2 -
 .../TestSynchronousReplicationWALProvider.java  | 153 +
 28 files changed, 659 insertions(+), 154 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/500ac83c/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
index 825ad17..4255086 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
@@ -434,6 +434,13 @@ public abstract class AbstractFSWAL 
implements WAL {
 this.implClassName = getClass().getSimpleName();
   }
 
+  /**
+   * Used to initialize the WAL. Usually just call rollWriter to create the 
first log writer.
+   */
+  public void init() throws IOException {
+rollWriter();
+  }
+
   @Override
   public void registerWALActionsListener(WALActionsListener listener) {
 this.listeners.add(listener);

http://git-wip-us.apache.org/repos/asf/hbase/blob/500ac83c/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
index 4732f41..d98ab75 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
@@ -248,7 +248,6 @@ public class AsyncFSWAL extends AbstractFSWAL {
 batchSize = conf.getLong(WAL_BATCH_SIZE, DEFAULT_WAL_BATCH_SIZE);
 waitOnShutdownInSeconds = 
conf.getInt(ASYNC_WAL_WAIT_ON_SHUTDOWN_IN_SECONDS,
   DEFAULT_ASYNC_WAL_WAIT_ON_SHUTDOWN_IN_SECONDS);
-rollWriter();
   }
 
   private static boolean waitingRoll(int epochAndState) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/500ac83c/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java
index 42b0dae..0495337 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/DualAsyncFSWAL.java
+++ 
b/hbase-serv

[34/50] [abbrv] hbase git commit: HBASE-19973 Implement a procedure to replay sync replication wal for standby cluster

HBASE-19973 Implement a procedure to replay sync replication wal for standby 
cluster


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7fc48c1b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7fc48c1b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7fc48c1b

Branch: refs/heads/HBASE-19064
Commit: 7fc48c1bdc857620612dfa31306df7e9064c8d47
Parents: 5d1ecc4
Author: Guanghao Zhang 
Authored: Fri Mar 2 18:43:25 2018 +0800
Committer: zhangduo 
Committed: Tue Jun 5 18:13:50 2018 +0800

--
 .../src/main/protobuf/MasterProcedure.proto |  22 +++
 .../apache/hadoop/hbase/executor/EventType.java |   9 +-
 .../hadoop/hbase/executor/ExecutorType.java |   3 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |   9 +
 .../hadoop/hbase/master/MasterServices.java |   6 +
 .../procedure/PeerProcedureInterface.java   |   3 +-
 .../hbase/master/procedure/PeerQueue.java   |   3 +-
 .../replication/RecoverStandbyProcedure.java| 114 +++
 .../ReplaySyncReplicationWALManager.java| 139 +
 .../ReplaySyncReplicationWALProcedure.java  | 196 +++
 .../hbase/regionserver/HRegionServer.java   |   9 +-
 .../ReplaySyncReplicationWALCallable.java   | 149 ++
 .../SyncReplicationPeerInfoProviderImpl.java|   3 +
 .../org/apache/hadoop/hbase/util/FSUtils.java   |   5 +
 .../hbase/master/MockNoopMasterServices.java|   8 +-
 .../master/TestRecoverStandbyProcedure.java | 186 ++
 16 files changed, 857 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7fc48c1b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto 
b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
index e60881f..d58608a 100644
--- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
@@ -468,3 +468,25 @@ message TransitPeerSyncReplicationStateStateData {
   optional SyncReplicationState fromState = 1;
   required SyncReplicationState toState = 2;
 }
+
+enum RecoverStandbyState {
+  RENAME_SYNC_REPLICATION_WALS_DIR = 1;
+  INIT_WORKERS = 2;
+  DISPATCH_TASKS = 3;
+  REMOVE_SYNC_REPLICATION_WALS_DIR = 4;
+}
+
+message RecoverStandbyStateData {
+  required string peer_id = 1;
+}
+
+message ReplaySyncReplicationWALStateData {
+  required string peer_id = 1;
+  required string wal = 2;
+  optional ServerName target_server = 3;
+}
+
+message ReplaySyncReplicationWALParameter {
+  required string peer_id = 1;
+  required string wal = 2;
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/7fc48c1b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java
index 922deb8..ad38d1c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java
@@ -281,7 +281,14 @@ public enum EventType {
*
* RS_REFRESH_PEER
*/
-  RS_REFRESH_PEER (84, ExecutorType.RS_REFRESH_PEER);
+  RS_REFRESH_PEER(84, ExecutorType.RS_REFRESH_PEER),
+
+  /**
+   * RS replay sync replication wal.
+   *
+   * RS_REPLAY_SYNC_REPLICATION_WAL
+   */
+  RS_REPLAY_SYNC_REPLICATION_WAL(85, 
ExecutorType.RS_REPLAY_SYNC_REPLICATION_WAL);
 
   private final int code;
   private final ExecutorType executor;

http://git-wip-us.apache.org/repos/asf/hbase/blob/7fc48c1b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java
index 7f130d1..ea97354 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java
@@ -47,7 +47,8 @@ public enum ExecutorType {
   RS_REGION_REPLICA_FLUSH_OPS  (28),
   RS_COMPACTED_FILES_DISCHARGER (29),
   RS_OPEN_PRIORITY_REGION(30),
-  RS_REFRESH_PEER   (31);
+  RS_REFRESH_PEER(31),
+  RS_REPLAY_SYNC_REPLICATION_WAL(32);
 
   ExecutorType(int value) {
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/7fc48c1b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
---

[05/50] [abbrv] hbase git commit: HBASE-20668 Avoid permission change if ExportSnapshot's copy fails

HBASE-20668 Avoid permission change if ExportSnapshot's copy fails


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/74ef118e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/74ef118e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/74ef118e

Branch: refs/heads/HBASE-19064
Commit: 74ef118e9e2246c09280ebb7eb6552ef91bdd094
Parents: a11701e
Author: tedyu 
Authored: Fri Jun 1 14:34:51 2018 -0700
Committer: tedyu 
Committed: Fri Jun 1 14:34:51 2018 -0700

--
 .../hadoop/hbase/snapshot/ExportSnapshot.java   | 26 +++-
 1 file changed, 15 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/74ef118e/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java
index 4af7dfb..f75a479 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java
@@ -1027,25 +1027,29 @@ public class ExportSnapshot extends AbstractHBaseTool 
implements Tool {
 // The snapshot references must be copied before the hfiles otherwise the 
cleaner
 // will remove them because they are unreferenced.
 List travesedPaths = new ArrayList<>();
+boolean copySucceeded = false;
 try {
-  LOG.info("Copy Snapshot Manifest");
+  LOG.info("Copy Snapshot Manifest from " + snapshotDir + " to " + 
initialOutputSnapshotDir);
   travesedPaths =
   FSUtils.copyFilesParallel(inputFs, snapshotDir, outputFs, 
initialOutputSnapshotDir, conf,
   conf.getInt(CONF_COPY_MANIFEST_THREADS, 
DEFAULT_COPY_MANIFEST_THREADS));
+  copySucceeded = true;
 } catch (IOException e) {
   throw new ExportSnapshotException("Failed to copy the snapshot 
directory: from=" +
 snapshotDir + " to=" + initialOutputSnapshotDir, e);
 } finally {
-  if (filesUser != null || filesGroup != null) {
-LOG.warn((filesUser == null ? "" : "Change the owner of " + 
needSetOwnerDir + " to "
-+ filesUser)
-+ (filesGroup == null ? "" : ", Change the group of " + 
needSetOwnerDir + " to "
-+ filesGroup));
-setOwnerParallel(outputFs, filesUser, filesGroup, conf, travesedPaths);
-  }
-  if (filesMode > 0) {
-LOG.warn("Change the permission of " + needSetOwnerDir + " to " + 
filesMode);
-setPermissionParallel(outputFs, (short)filesMode, travesedPaths, conf);
+  if (copySucceeded) {
+if (filesUser != null || filesGroup != null) {
+  LOG.warn((filesUser == null ? "" : "Change the owner of " + 
needSetOwnerDir + " to "
+  + filesUser)
+  + (filesGroup == null ? "" : ", Change the group of " + 
needSetOwnerDir + " to "
+  + filesGroup));
+  setOwnerParallel(outputFs, filesUser, filesGroup, conf, 
travesedPaths);
+}
+if (filesMode > 0) {
+  LOG.warn("Change the permission of " + needSetOwnerDir + " to " + 
filesMode);
+  setPermissionParallel(outputFs, (short)filesMode, travesedPaths, 
conf);
+}
   }
 }
 



[26/50] [abbrv] hbase git commit: HBASE-19781 Add a new cluster state flag for synchronous replication

http://git-wip-us.apache.org/repos/asf/hbase/blob/7bef8370/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
index 8911982..f5eca39 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
@@ -28,6 +28,7 @@ import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.ReplicationPeerStorage;
 import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
 import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
 import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE;
@@ -67,9 +68,9 @@ public class TestHBaseFsckReplication {
 String peerId1 = "1";
 String peerId2 = "2";
 peerStorage.addPeer(peerId1, 
ReplicationPeerConfig.newBuilder().setClusterKey("key").build(),
-  true);
+  true, SyncReplicationState.NONE);
 peerStorage.addPeer(peerId2, 
ReplicationPeerConfig.newBuilder().setClusterKey("key").build(),
-  true);
+  true, SyncReplicationState.NONE);
 for (int i = 0; i < 10; i++) {
   queueStorage.addWAL(ServerName.valueOf("localhost", 1 + i, 10 + 
i), peerId1,
 "file-" + i);

http://git-wip-us.apache.org/repos/asf/hbase/blob/7bef8370/hbase-shell/src/main/ruby/hbase/replication_admin.rb
--
diff --git a/hbase-shell/src/main/ruby/hbase/replication_admin.rb 
b/hbase-shell/src/main/ruby/hbase/replication_admin.rb
index d1f1344..5f86365 100644
--- a/hbase-shell/src/main/ruby/hbase/replication_admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/replication_admin.rb
@@ -20,6 +20,7 @@
 include Java
 
 java_import 
org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil
+java_import org.apache.hadoop.hbase.replication.SyncReplicationState
 java_import org.apache.hadoop.hbase.replication.ReplicationPeerConfig
 java_import org.apache.hadoop.hbase.util.Bytes
 java_import org.apache.hadoop.hbase.zookeeper.ZKConfig
@@ -338,6 +339,20 @@ module Hbase
   '!' + ReplicationPeerConfigUtil.convertToString(tableCFs)
 end
 
+# Transit current cluster to a new state in the specified synchronous
+# replication peer
+def transit_peer_sync_replication_state(id, state)
+  if 'ACTIVE'.eql?(state)
+@admin.transitReplicationPeerSyncReplicationState(id, 
SyncReplicationState::ACTIVE)
+  elsif 'DOWNGRADE_ACTIVE'.eql?(state)
+@admin.transitReplicationPeerSyncReplicationState(id, 
SyncReplicationState::DOWNGRADE_ACTIVE)
+  elsif 'STANDBY'.eql?(state)
+@admin.transitReplicationPeerSyncReplicationState(id, 
SyncReplicationState::STANDBY)
+  else
+raise(ArgumentError, 'synchronous replication state must be ACTIVE, 
DOWNGRADE_ACTIVE or STANDBY')
+  end
+end
+
 
#--
 # Enables a table's replication switch
 def enable_tablerep(table_name)

http://git-wip-us.apache.org/repos/asf/hbase/blob/7bef8370/hbase-shell/src/main/ruby/shell.rb
--
diff --git a/hbase-shell/src/main/ruby/shell.rb 
b/hbase-shell/src/main/ruby/shell.rb
index 9a79658..934fa11 100644
--- a/hbase-shell/src/main/ruby/shell.rb
+++ b/hbase-shell/src/main/ruby/shell.rb
@@ -393,6 +393,7 @@ Shell.load_command_group(
 get_peer_config
 list_peer_configs
 update_peer_config
+transit_peer_sync_replication_state
   ]
 )
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/7bef8370/hbase-shell/src/main/ruby/shell/commands/list_peers.rb
--
diff --git a/hbase-shell/src/main/ruby/shell/commands/list_peers.rb 
b/hbase-shell/src/main/ruby/shell/commands/list_peers.rb
index f3ab749..f2ec014 100644
--- a/hbase-shell/src/main/ruby/shell/commands/list_peers.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/list_peers.rb
@@ -39,8 +39,8 @@ EOF
 peers = replication_admin.list_peers
 
 formatter.header(%w[PEER_ID CLUSTER_KEY ENDPOINT_CLASSNAME
-REMOTE_ROOT_DIR STATE REPLICATE_ALL 
-NAMESPACES TABLE_CFS BANDWIDTH
+REMOTE_ROOT_DIR SYNC_REPLICATION_STATE STATE
+REPLICATE_ALL NAMESPACES TABLE_CFS BANDWIDTH

[46/50] [abbrv] hbase git commit: HBASE-20424 Allow writing WAL to local and remote cluster concurrently

HBASE-20424 Allow writing WAL to local and remote cluster concurrently


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/599080cc
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/599080cc
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/599080cc

Branch: refs/heads/HBASE-19064
Commit: 599080ccd46ec8a09859bef95c5b096b708fa238
Parents: fbddf63
Author: zhangduo 
Authored: Thu May 24 16:20:28 2018 +0800
Committer: zhangduo 
Committed: Tue Jun 5 18:13:59 2018 +0800

--
 .../src/main/protobuf/MasterProcedure.proto |   2 +-
 .../hbase/replication/ReplicationUtils.java |  26 ++-
 .../asyncfs/FanOutOneBlockAsyncDFSOutput.java   |   3 +-
 .../replication/RecoverStandbyProcedure.java|  10 +-
 .../master/replication/RemovePeerProcedure.java |   5 +-
 .../ReplaySyncReplicationWALManager.java| 110 ++-
 ...ransitPeerSyncReplicationStateProcedure.java |   4 +-
 .../hbase/regionserver/HRegionServer.java   |   3 +-
 .../regionserver/ReplicationSourceService.java  |   6 +
 .../hbase/regionserver/SplitLogWorker.java  | 188 +--
 .../regionserver/wal/CombinedAsyncWriter.java   |  80 ++--
 .../hbase/regionserver/wal/DualAsyncFSWAL.java  |  11 +-
 .../replication/regionserver/Replication.java   |   5 +
 .../regionserver/ReplicationSourceManager.java  |   2 +-
 .../SyncReplicationPeerInfoProviderImpl.java|   3 +-
 .../org/apache/hadoop/hbase/util/FSUtils.java   |   9 +
 .../hbase/wal/SyncReplicationWALProvider.java   |  43 -
 .../replication/TestReplicationAdmin.java   |   2 +-
 .../wal/TestCombinedAsyncWriter.java|  20 +-
 .../replication/DualAsyncFSWALForTest.java  | 149 +++
 .../replication/SyncReplicationTestBase.java|  12 +-
 .../replication/TestSyncReplicationActive.java  |   5 +-
 ...cReplicationMoreLogsInLocalCopyToRemote.java | 108 +++
 ...plicationMoreLogsInLocalGiveUpSplitting.java | 128 +
 .../TestSyncReplicationRemoveRemoteWAL.java |   7 +-
 .../replication/TestSyncReplicationStandBy.java |  20 +-
 .../master/TestRecoverStandbyProcedure.java |   4 +-
 .../TestReplicationSourceManager.java   |   5 +-
 .../wal/TestSyncReplicationWALProvider.java |   1 -
 29 files changed, 733 insertions(+), 238 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/599080cc/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto 
b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
index f58ad2e..5764a21 100644
--- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
@@ -476,7 +476,7 @@ enum RecoverStandbyState {
   RENAME_SYNC_REPLICATION_WALS_DIR = 1;
   INIT_WORKERS = 2;
   DISPATCH_TASKS = 3;
-  REMOVE_SYNC_REPLICATION_WALS_DIR = 4;
+  SNAPSHOT_SYNC_REPLICATION_WALS_DIR = 4;
 }
 
 message RecoverStandbyStateData {

http://git-wip-us.apache.org/repos/asf/hbase/blob/599080cc/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
index 069db7a..dc4217c 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
@@ -46,6 +46,16 @@ public final class ReplicationUtils {
 
   public static final String REMOTE_WAL_DIR_NAME = "remoteWALs";
 
+  public static final String SYNC_WAL_SUFFIX = ".syncrep";
+
+  public static final String REMOTE_WAL_REPLAY_SUFFIX = "-replay";
+
+  public static final String REMOTE_WAL_SNAPSHOT_SUFFIX = "-snapshot";
+
+  // This is used for copying sync replication log from local to remote and 
overwrite the old one
+  // since some FileSystem implementation may not support atomic rename.
+  public static final String RENAME_WAL_SUFFIX = ".ren";
+
   private ReplicationUtils() {
   }
 
@@ -187,14 +197,26 @@ public final class ReplicationUtils {
 return new Path(remoteWALDir).getFileSystem(conf);
   }
 
-  public static Path getRemoteWALDirForPeer(String remoteWALDir, String 
peerId) {
+  public static Path getPeerRemoteWALDir(String remoteWALDir, String peerId) {
 return new Path(remoteWALDir, peerId);
   }
 
-  public static Path getRemoteWALDirForPeer(Path remoteWALDir, String peerId) {
+  public static Path getPeerRemoteWALDir(Path remoteWALDir, String peerId) {
  

[03/50] [abbrv] hbase git commit: HBASE-20592 Create a tool to verify tables do not have prefix tree encoding

HBASE-20592 Create a tool to verify tables do not have prefix tree encoding

Signed-off-by: Mike Drob 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/09686682
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/09686682
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/09686682

Branch: refs/heads/HBASE-19064
Commit: 0968668283d9e3b23c2da8c2c4a0a77caee2e9af
Parents: d5ea926
Author: Peter Somogyi 
Authored: Fri May 25 15:03:17 2018 +0200
Committer: Peter Somogyi 
Committed: Fri Jun 1 19:17:49 2018 +0200

--
 bin/hbase   |   3 +
 .../hadoop/hbase/tool/PreUpgradeValidator.java  | 129 +++
 .../_chapters/appendix_hfile_format.adoc|   2 +-
 src/main/asciidoc/_chapters/compression.adoc|   2 +-
 src/main/asciidoc/_chapters/ops_mgt.adoc|  22 
 src/main/asciidoc/_chapters/upgrading.adoc  |   4 +
 6 files changed, 160 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/09686682/bin/hbase
--
diff --git a/bin/hbase b/bin/hbase
index f1e2306..4f1c854 100755
--- a/bin/hbase
+++ b/bin/hbase
@@ -108,6 +108,7 @@ if [ $# = 0 ]; then
   echo "  regionsplitter  Run RegionSplitter tool"
   echo "  rowcounter  Run RowCounter tool"
   echo "  cellcounter Run CellCounter tool"
+  echo "  pre-upgrade Run Pre-Upgrade validator tool"
   echo "  CLASSNAME   Run the class named CLASSNAME"
   exit 1
 fi
@@ -471,6 +472,8 @@ elif [ "$COMMAND" = "rowcounter" ] ; then
   CLASS='org.apache.hadoop.hbase.mapreduce.RowCounter'
 elif [ "$COMMAND" = "cellcounter" ] ; then
   CLASS='org.apache.hadoop.hbase.mapreduce.CellCounter'
+elif [ "$COMMAND" = "pre-upgrade" ] ; then
+  CLASS='org.apache.hadoop.hbase.tool.PreUpgradeValidator'
 else
   CLASS=$COMMAND
 fi

http://git-wip-us.apache.org/repos/asf/hbase/blob/09686682/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/PreUpgradeValidator.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/PreUpgradeValidator.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/PreUpgradeValidator.java
new file mode 100644
index 000..138af6a
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/PreUpgradeValidator.java
@@ -0,0 +1,129 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.tool;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+import org.apache.hadoop.hbase.util.AbstractHBaseTool;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine;
+
+/**
+ * Tool for validating that cluster can be upgraded from HBase 1.x to 2.0
+ * 
+ * Available validations:
+ * 
+ * all: Run all pre-upgrade validations
+ * validateDBE: Check Data Block Encoding for column families
+ * 
+ * 
+ */
+@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
+public class PreUpgradeValidator extends AbstractHBaseTool {
+
+  public static final String NAME = "pre-upgrade";
+  private static final Logger LOG = 
LoggerFactory.getLogger(PreUpgradeValidator.class);
+  private static final byte[] DATA_BLOCK_ENCODING = 
Bytes.toBytes("DATA_BLOCK_ENCODING");
+  private boolean validateAll;
+  private boolean validateDBE;
+
+  /**
+   * Check DataBlockEncodings for column families.
+   *
+   * @return DataBlockEncoding compatible wit

[38/50] [abbrv] hbase git commit: HBASE-19782 Reject the replication request when peer is DA or A state

HBASE-19782 Reject the replication request when peer is DA or A state


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e08e8bf7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e08e8bf7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e08e8bf7

Branch: refs/heads/HBASE-19064
Commit: e08e8bf78d3f905a494d802328d6f52666a15d6d
Parents: 10a56ce
Author: huzheng 
Authored: Fri Mar 2 18:05:29 2018 +0800
Committer: zhangduo 
Committed: Tue Jun 5 18:13:58 2018 +0800

--
 .../hbase/protobuf/ReplicationProtbufUtil.java  |  2 +-
 .../hadoop/hbase/regionserver/HRegion.java  |  2 +-
 .../hbase/regionserver/HRegionServer.java   |  5 +--
 .../hbase/regionserver/RSRpcServices.java   | 25 +--
 .../RejectReplicationRequestStateChecker.java   | 45 
 .../ReplaySyncReplicationWALCallable.java   | 24 ++-
 .../replication/regionserver/Replication.java   |  2 +-
 .../regionserver/ReplicationSink.java   | 16 +++
 .../SyncReplicationPeerInfoProvider.java| 11 ++---
 .../SyncReplicationPeerInfoProviderImpl.java| 13 +++---
 .../SyncReplicationPeerMappingManager.java  |  5 +--
 .../hbase/wal/SyncReplicationWALProvider.java   |  7 +--
 .../replication/SyncReplicationTestBase.java| 32 ++
 .../replication/TestSyncReplicationActive.java  | 13 +-
 .../regionserver/TestReplicationSink.java   |  5 +--
 .../regionserver/TestWALEntrySinkFilter.java|  3 +-
 .../wal/TestSyncReplicationWALProvider.java |  6 +--
 17 files changed, 163 insertions(+), 53 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e08e8bf7/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
index 81dd59e..e01f881 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.PrivateCellUtil;
+import org.apache.hadoop.hbase.wal.WAL.Entry;
 import org.apache.hadoop.hbase.wal.WALKeyImpl;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.io.SizedCellScanner;
@@ -45,7 +46,6 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminServic
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
 import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.wal.WAL.Entry;
 
 @InterfaceAudience.Private
 public class ReplicationProtbufUtil {

http://git-wip-us.apache.org/repos/asf/hbase/blob/e08e8bf7/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 42a86c4..8dbccc5 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -1990,7 +1990,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   private boolean shouldForbidMajorCompaction() {
 if (rsServices != null && rsServices.getReplicationSourceService() != 
null) {
   return 
rsServices.getReplicationSourceService().getSyncReplicationPeerInfoProvider()
-  .checkState(getRegionInfo(), ForbidMajorCompactionChecker.get());
+  .checkState(getRegionInfo().getTable(), 
ForbidMajorCompactionChecker.get());
 }
 return false;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e08e8bf7/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 4ea800d..5c7bae0 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -2475,10 +2475,9 @@ public class HRegionServer extends HasThread implemen

[13/50] [abbrv] hbase git commit: HBASE-20590 REST Java client is not able to negotiate with the server in the secure mode

HBASE-20590 REST Java client is not able to negotiate with the server in the 
secure mode

Signed-off-by: Ashish Singhi 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7da0015a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7da0015a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7da0015a

Branch: refs/heads/HBASE-19064
Commit: 7da0015a3b58a28ccbae0b03ba7de9ce62b751e1
Parents: 1b716ad
Author: Ashish Singhi 
Authored: Mon Jun 4 14:11:19 2018 +0530
Committer: Ashish Singhi 
Committed: Mon Jun 4 14:11:19 2018 +0530

--
 hbase-examples/pom.xml  |   4 +
 .../hadoop/hbase/rest/RESTDemoClient.java   | 144 +++
 .../apache/hadoop/hbase/rest/client/Client.java |  55 ++-
 3 files changed, 200 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7da0015a/hbase-examples/pom.xml
--
diff --git a/hbase-examples/pom.xml b/hbase-examples/pom.xml
index 8814491..c74c1ba 100644
--- a/hbase-examples/pom.xml
+++ b/hbase-examples/pom.xml
@@ -183,6 +183,10 @@
   findbugs-annotations
 
 
+  org.apache.hbase
+  hbase-rest
+
+
   junit
   junit
   test

http://git-wip-us.apache.org/repos/asf/hbase/blob/7da0015a/hbase-examples/src/main/java/org/apache/hadoop/hbase/rest/RESTDemoClient.java
--
diff --git 
a/hbase-examples/src/main/java/org/apache/hadoop/hbase/rest/RESTDemoClient.java 
b/hbase-examples/src/main/java/org/apache/hadoop/hbase/rest/RESTDemoClient.java
new file mode 100644
index 000..19fae47
--- /dev/null
+++ 
b/hbase-examples/src/main/java/org/apache/hadoop/hbase/rest/RESTDemoClient.java
@@ -0,0 +1,144 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.security.PrivilegedExceptionAction;
+import java.util.HashMap;
+import java.util.Map;
+
+import javax.security.auth.Subject;
+import javax.security.auth.login.AppConfigurationEntry;
+import javax.security.auth.login.Configuration;
+import javax.security.auth.login.LoginContext;
+
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.rest.client.Client;
+import org.apache.hadoop.hbase.rest.client.Cluster;
+import org.apache.hadoop.hbase.rest.client.RemoteHTable;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+
+@InterfaceAudience.Private
+public class RESTDemoClient {
+
+  private static String host = "localhost";
+  private static int port = 9090;
+  private static boolean secure = false;
+  private static org.apache.hadoop.conf.Configuration conf = null;
+
+  public static void main(String[] args) throws Exception {
+System.out.println("REST Demo");
+System.out.println("Usage: RESTDemoClient [host=localhost] [port=9090] 
[secure=false]");
+System.out.println("This demo assumes you have a table called \"example\""
++ " with a column family called \"family1\"");
+
+// use passed in arguments instead of defaults
+if (args.length >= 1) {
+  host = args[0];
+}
+if (args.length >= 2) {
+  port = Integer.parseInt(args[1]);
+}
+conf = HBaseConfiguration.create();
+String principal = conf.get(Constants.REST_KERBEROS_PRINCIPAL);
+if (principal != null) {
+  secure = true;
+}
+if (args.length >= 3) {
+  secure = Boolean.parseBoolean(args[2]);
+}
+
+final RESTDemoClient client = new RESTDemoClient();
+Subject.doAs(getSubject(), new PrivilegedExceptionAction() {
+  @Override
+  public Void run() throws Exception {
+client.run();
+ 

[37/50] [abbrv] hbase git commit: HBASE-20370 Also remove the wal file in remote cluster when we finish replicating a file

HBASE-20370 Also remove the wal file in remote cluster when we finish 
replicating a file


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/10a56ce0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/10a56ce0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/10a56ce0

Branch: refs/heads/HBASE-19064
Commit: 10a56ce0f8990a2496a707e98b6bdf107ae988ef
Parents: 4af10f2
Author: zhangduo 
Authored: Tue Apr 17 09:04:56 2018 +0800
Committer: zhangduo 
Committed: Tue Jun 5 18:13:58 2018 +0800

--
 .../hbase/replication/ReplicationUtils.java |  36 ++-
 .../regionserver/ReplicationSource.java |  38 +++
 .../ReplicationSourceInterface.java |  21 +++-
 .../regionserver/ReplicationSourceManager.java  | 108 ++-
 .../regionserver/ReplicationSourceShipper.java  |  27 ++---
 .../hbase/wal/SyncReplicationWALProvider.java   |  11 +-
 .../replication/ReplicationSourceDummy.java |  20 ++--
 .../TestReplicationSourceManager.java   | 101 -
 8 files changed, 246 insertions(+), 116 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/10a56ce0/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
index cb22f57..66e9b01 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
@@ -22,14 +22,17 @@ import java.util.Collection;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.CompoundConfiguration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Helper class for replication.
@@ -37,6 +40,8 @@ import org.apache.yetus.audience.InterfaceAudience;
 @InterfaceAudience.Private
 public final class ReplicationUtils {
 
+  private static final Logger LOG = 
LoggerFactory.getLogger(ReplicationUtils.class);
+
   public static final String REPLICATION_ATTR_NAME = "__rep__";
 
   public static final String REMOTE_WAL_DIR_NAME = "remoteWALs";
@@ -176,4 +181,33 @@ public final class ReplicationUtils {
   return tableCFs != null && tableCFs.containsKey(tableName);
 }
   }
+
+  public static FileSystem getRemoteWALFileSystem(Configuration conf, String 
remoteWALDir)
+  throws IOException {
+return new Path(remoteWALDir).getFileSystem(conf);
+  }
+
+  public static Path getRemoteWALDirForPeer(String remoteWALDir, String 
peerId) {
+return new Path(remoteWALDir, peerId);
+  }
+
+  /**
+   * Do the sleeping logic
+   * @param msg Why we sleep
+   * @param sleepForRetries the base sleep time.
+   * @param sleepMultiplier by how many times the default sleeping time is 
augmented
+   * @param maxRetriesMultiplier the max retry multiplier
+   * @return True if sleepMultiplier is < 
maxRetriesMultiplier
+   */
+  public static boolean sleepForRetries(String msg, long sleepForRetries, int 
sleepMultiplier,
+  int maxRetriesMultiplier) {
+try {
+  LOG.trace("{}, sleeping {} times {}", msg, sleepForRetries, 
sleepMultiplier);
+  Thread.sleep(sleepForRetries * sleepMultiplier);
+} catch (InterruptedException e) {
+  LOG.debug("Interrupted while sleeping between retries");
+  Thread.currentThread().interrupt();
+}
+return sleepMultiplier < maxRetriesMultiplier;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/10a56ce0/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
index d21d83c..bf7e30e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
@@ -89,8 +89,6 @@ public class ReplicationSource implements 
ReplicationSourceInterface {
 
   protect

[08/50] [abbrv] hbase git commit: HBASE-19761:Fix Checkstyle errors in hbase-zookeeper

HBASE-19761:Fix Checkstyle errors in hbase-zookeeper

Signed-off-by: Jan Hentschel 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1b98a96c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1b98a96c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1b98a96c

Branch: refs/heads/HBASE-19064
Commit: 1b98a96caa09ee9be27d6bf028200fe6790ac726
Parents: 9d50048
Author: maoling 
Authored: Tue May 29 21:06:38 2018 +0800
Committer: Jan Hentschel 
Committed: Sat Jun 2 10:08:15 2018 +0200

--
 .../hbase/IntegrationTestMetaReplicas.java  |  2 +-
 .../test/IntegrationTestZKAndFSPermissions.java |  4 +-
 .../replication/ReplicationTrackerZKImpl.java   |  7 +-
 .../replication/ZKReplicationStorageBase.java   |  2 +-
 .../replication/TestReplicationStateZKImpl.java |  5 +-
 .../hbase/rsgroup/RSGroupInfoManagerImpl.java   |  6 +-
 .../rsgroup/VerifyingRSGroupAdminClient.java|  2 +-
 .../apache/hadoop/hbase/ZKNamespaceManager.java |  2 +-
 .../org/apache/hadoop/hbase/ZNodeClearer.java   |  2 +-
 .../backup/example/ZKTableArchiveClient.java|  2 +-
 .../ZKSplitLogManagerCoordination.java  | 12 ++--
 .../ZkSplitLogWorkerCoordination.java   | 20 +++---
 .../hbase/master/ActiveMasterManager.java   | 18 ++---
 .../hbase/master/DrainingServerTracker.java |  8 +--
 .../org/apache/hadoop/hbase/master/HMaster.java | 10 +--
 .../hbase/master/MasterMetaBootstrap.java   |  4 +-
 .../hbase/master/RegionServerTracker.java   | 12 ++--
 .../hadoop/hbase/master/ServerManager.java  |  2 +-
 .../hbase/master/SplitOrMergeTracker.java   |  8 +--
 .../hbase/master/zksyncer/ClientZKSyncer.java   |  2 +-
 .../master/zksyncer/MasterAddressSyncer.java|  2 +-
 .../master/zksyncer/MetaLocationSyncer.java |  4 +-
 .../hadoop/hbase/procedure/ZKProcedureUtil.java |  2 +-
 .../hbase/regionserver/HRegionServer.java   |  2 +-
 .../replication/HBaseReplicationEndpoint.java   |  5 +-
 .../master/ReplicationPeerConfigUpgrader.java   |  2 +-
 .../security/access/ZKPermissionWatcher.java|  8 +--
 .../hbase/security/token/ZKSecretWatcher.java   |  2 +-
 .../visibility/ZKVisibilityLabelWatcher.java|  5 +-
 .../org/apache/hadoop/hbase/util/HBaseFsck.java |  4 +-
 .../hadoop/hbase/util/ZKDataMigrator.java   |  5 +-
 .../hbase/client/TestMetaWithReplicas.java  |  4 +-
 .../hbase/master/TestActiveMasterManager.java   | 15 ++--
 .../hbase/master/TestHMasterRPCException.java   |  5 +-
 .../hbase/master/TestMasterNoCluster.java   |  2 +-
 .../hbase/master/TestMetaShutdownHandler.java   |  3 +-
 .../master/TestMirroringTableStateManager.java  |  3 +-
 .../hbase/master/TestSplitLogManager.java   | 14 ++--
 .../regionserver/TestMasterAddressTracker.java  |  8 ++-
 .../regionserver/TestRegionServerHostname.java  |  4 +-
 .../hbase/regionserver/TestSplitLogWorker.java  | 27 +++
 .../TestReplicationTrackerZKImpl.java   | 17 ++---
 hbase-zookeeper/pom.xml |  7 ++
 .../hbase/zookeeper/ClusterStatusTracker.java   | 14 ++--
 .../hbase/zookeeper/LoadBalancerTracker.java| 10 +--
 .../hbase/zookeeper/MasterAddressTracker.java   | 14 ++--
 .../zookeeper/MasterMaintenanceModeTracker.java |  5 +-
 .../hbase/zookeeper/MetaTableLocator.java   | 27 +++
 .../hbase/zookeeper/RecoverableZooKeeper.java   | 22 +++---
 .../zookeeper/RegionNormalizerTracker.java  |  8 +--
 .../hadoop/hbase/zookeeper/ZKAclReset.java  |  4 +-
 .../hadoop/hbase/zookeeper/ZKClusterId.java |  8 +--
 .../hadoop/hbase/zookeeper/ZKMainServer.java|  8 +--
 .../hadoop/hbase/zookeeper/ZKNodeTracker.java   |  6 +-
 .../hadoop/hbase/zookeeper/ZKSplitLog.java  |  4 +-
 .../apache/hadoop/hbase/zookeeper/ZKUtil.java   | 75 ++--
 .../hadoop/hbase/zookeeper/ZKWatcher.java   | 12 ++--
 .../hbase/zookeeper/TestZKLeaderManager.java|  1 -
 .../hbase/zookeeper/TestZKMainServer.java   |  2 +-
 .../hadoop/hbase/zookeeper/TestZKMulti.java | 40 +--
 .../hbase/zookeeper/TestZKNodeTracker.java  |  6 +-
 .../hbase/zookeeper/TestZKUtilNoServer.java |  4 +-
 62 files changed, 288 insertions(+), 262 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1b98a96c/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java
--
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java
 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java
index 37e2686..f14b9a5 100644
--- 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java
+++ 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java
@@ -81,7 +81,7 @@ public class In

[48/50] [abbrv] hbase git commit: HBASE-20660 Reopen regions using ReopenTableRegionsProcedure

HBASE-20660 Reopen regions using ReopenTableRegionsProcedure


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/55abf011
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/55abf011
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/55abf011

Branch: refs/heads/HBASE-19064
Commit: 55abf011ba39f2a23e953a0e9b9bc6db2e096a88
Parents: c8fbb18
Author: zhangduo 
Authored: Thu May 31 09:53:44 2018 +0800
Committer: zhangduo 
Committed: Tue Jun 5 18:13:59 2018 +0800

--
 ...ransitPeerSyncReplicationStateProcedure.java | 24 ++--
 1 file changed, 7 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/55abf011/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
index 81ee6b6..66f67dd 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
@@ -18,16 +18,14 @@
 package org.apache.hadoop.hbase.master.replication;
 
 import java.io.IOException;
-import java.util.List;
-import java.util.stream.Collectors;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
-import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.master.MasterFileSystem;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
 import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
 import org.apache.hadoop.hbase.procedure2.ProcedureYieldException;
@@ -141,11 +139,10 @@ public class TransitPeerSyncReplicationStateProcedure
 }
   }
 
-  private List getRegionsToReopen(MasterProcedureEnv env) {
-return 
env.getReplicationPeerManager().getPeerConfig(peerId).get().getTableCFsMap().keySet()
-  .stream()
-  .flatMap(tn -> 
env.getAssignmentManager().getRegionStates().getRegionsOfTable(tn).stream())
-  .collect(Collectors.toList());
+  private void reopenRegions(MasterProcedureEnv env) {
+addChildProcedure(
+  
env.getReplicationPeerManager().getPeerConfig(peerId).get().getTableCFsMap().keySet().stream()
+
.map(ReopenTableRegionsProcedure::new).toArray(ReopenTableRegionsProcedure[]::new));
   }
 
   private void createDirForRemoteWAL(MasterProcedureEnv env)
@@ -190,7 +187,7 @@ public class TransitPeerSyncReplicationStateProcedure
   }
 
   private void replayRemoteWAL() {
-addChildProcedure(new RecoverStandbyProcedure[] { new 
RecoverStandbyProcedure(peerId) });
+addChildProcedure(new RecoverStandbyProcedure(peerId));
   }
 
   @Override
@@ -252,14 +249,7 @@ public class TransitPeerSyncReplicationStateProcedure
   : 
PeerSyncReplicationStateTransitionState.TRANSIT_PEER_NEW_SYNC_REPLICATION_STATE);
 return Flow.HAS_MORE_STATE;
   case REOPEN_ALL_REGIONS_IN_PEER:
-try {
-  addChildProcedure(
-
env.getAssignmentManager().createReopenProcedures(getRegionsToReopen(env)));
-} catch (IOException e) {
-  LOG.warn("Failed to schedule region reopen for peer {} when starting 
transiting sync " +
-"replication peer state from {} to {}, retry", peerId, fromState, 
toState, e);
-  throw new ProcedureYieldException();
-}
+reopenRegions(env);
 setNextState(
   
PeerSyncReplicationStateTransitionState.TRANSIT_PEER_NEW_SYNC_REPLICATION_STATE);
 return Flow.HAS_MORE_STATE;



[06/50] [abbrv] hbase git commit: HBASE-20667 Rename TestGlobalThrottler to TestReplicationGlobalThrottler

HBASE-20667 Rename TestGlobalThrottler to TestReplicationGlobalThrottler


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9d500489
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9d500489
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9d500489

Branch: refs/heads/HBASE-19064
Commit: 9d5004894c0d4d9bd53dbe79c4650833deacc1a4
Parents: 74ef118
Author: Andrew Purtell 
Authored: Fri Jun 1 16:44:11 2018 -0700
Committer: Andrew Purtell 
Committed: Fri Jun 1 17:01:16 2018 -0700

--
 .../TestGlobalReplicationThrottler.java | 188 ++
 .../regionserver/TestGlobalThrottler.java   | 191 ---
 2 files changed, 188 insertions(+), 191 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9d500489/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalReplicationThrottler.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalReplicationThrottler.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalReplicationThrottler.java
new file mode 100644
index 000..bf64946
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalReplicationThrottler.java
@@ -0,0 +1,188 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication.regionserver;
+
+import java.io.IOException;
+import java.util.concurrent.atomic.AtomicLong;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.HTestConst;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.replication.ReplicationAdmin;
+import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.testclassification.ReplicationTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.Threads;
+import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
+import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@Category({ ReplicationTests.class, LargeTests.class })
+public class TestGlobalReplicationThrottler {
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+  HBaseClassTestRule.forClass(TestGlobalReplicationThrottler.class);
+
+  private static final Logger LOG = 
LoggerFactory.getLogger(TestGlobalReplicationThrottler.class);
+  private static final int REPLICATION_SOURCE_QUOTA = 200;
+  private static int numOfPeer = 0;
+  private static Configuration conf1;
+  private static Configuration conf2;
+
+  private static HBaseTestingUtility utility1;
+  private static HBaseTestingUtility utility2;
+
+  private static final byte[] famName = Bytes.toBytes("f");
+  private static final byte[] VALUE = Bytes.toBytes("v");
+  private static final byte[] ROW = Bytes.toBytes("r");
+  private static final byte[][] ROWS = HTestConst.makeNAscii(ROW, 100);
+
+  @Rule
+  public TestName name = new TestName();

[25/50] [abbrv] hbase git commit: HBASE-19078 Add a remote peer cluster wal directory config for synchronous replication

HBASE-19078 Add a remote peer cluster wal directory config for synchronous 
replication

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0eabb9fc
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0eabb9fc
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0eabb9fc

Branch: refs/heads/HBASE-19064
Commit: 0eabb9fcba7c5bd608908301395369e4040f42a1
Parents: 94e358c
Author: Guanghao Zhang 
Authored: Sat Jan 13 18:55:28 2018 +0800
Committer: zhangduo 
Committed: Tue Jun 5 17:24:38 2018 +0800

--
 .../replication/ReplicationPeerConfigUtil.java  |  6 ++
 .../replication/ReplicationPeerConfig.java  | 20 -
 .../ReplicationPeerConfigBuilder.java   |  7 ++
 .../src/main/protobuf/Replication.proto |  1 +
 .../replication/ReplicationPeerManager.java | 15 
 .../replication/TestReplicationAdmin.java   | 77 
 .../src/main/ruby/hbase/replication_admin.rb| 14 ++--
 hbase-shell/src/main/ruby/hbase_constants.rb|  1 +
 .../src/main/ruby/shell/commands/add_peer.rb| 21 +-
 .../src/main/ruby/shell/commands/list_peers.rb  | 19 -
 .../test/ruby/hbase/replication_admin_test.rb   | 16 
 11 files changed, 186 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0eabb9fc/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
index b1c1713..474ded3 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
@@ -319,6 +319,9 @@ public final class ReplicationPeerConfigUtil {
 
excludeNamespacesList.stream().map(ByteString::toStringUtf8).collect(Collectors.toSet()));
 }
 
+if (peer.hasRemoteWALDir()) {
+  builder.setRemoteWALDir(peer.getRemoteWALDir());
+}
 return builder.build();
   }
 
@@ -376,6 +379,9 @@ public final class ReplicationPeerConfigUtil {
   }
 }
 
+if (peerConfig.getRemoteWALDir() != null) {
+  builder.setRemoteWALDir(peerConfig.getRemoteWALDir());
+}
 return builder.build();
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/0eabb9fc/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
index e0d9a4c..97abc74 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
@@ -47,6 +47,8 @@ public class ReplicationPeerConfig {
   private Set excludeNamespaces = null;
   private long bandwidth = 0;
   private final boolean serial;
+  // Used by synchronous replication
+  private String remoteWALDir;
 
   private ReplicationPeerConfig(ReplicationPeerConfigBuilderImpl builder) {
 this.clusterKey = builder.clusterKey;
@@ -66,6 +68,7 @@ public class ReplicationPeerConfig {
 : null;
 this.bandwidth = builder.bandwidth;
 this.serial = builder.serial;
+this.remoteWALDir = builder.remoteWALDir;
   }
 
   private Map>
@@ -213,6 +216,10 @@ public class ReplicationPeerConfig {
 return this;
   }
 
+  public String getRemoteWALDir() {
+return this.remoteWALDir;
+  }
+
   public static ReplicationPeerConfigBuilder newBuilder() {
 return new ReplicationPeerConfigBuilderImpl();
   }
@@ -230,7 +237,8 @@ public class ReplicationPeerConfig {
   .setReplicateAllUserTables(peerConfig.replicateAllUserTables())
   .setExcludeTableCFsMap(peerConfig.getExcludeTableCFsMap())
   .setExcludeNamespaces(peerConfig.getExcludeNamespaces())
-  
.setBandwidth(peerConfig.getBandwidth()).setSerial(peerConfig.isSerial());
+  .setBandwidth(peerConfig.getBandwidth()).setSerial(peerConfig.isSerial())
+  .setRemoteWALDir(peerConfig.getRemoteWALDir());
 return builder;
   }
 
@@ -259,6 +267,8 @@ public class ReplicationPeerConfig {
 
 private boolean serial = false;
 
+private String remoteWALDir = null;
+
 @Override
 public ReplicationPeerConfigBuilder setClusterKey(String clusterKey) {
   this.clusterKey = clusterKey;
@@ -327,6 +337,11 @@ public 

[39/50] [abbrv] hbase git commit: HBASE-20425 Do not write the cluster id of the current active cluster when writing remote WAL

HBASE-20425 Do not write the cluster id of the current active cluster when 
writing remote WAL


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/dd986dec
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/dd986dec
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/dd986dec

Branch: refs/heads/HBASE-19064
Commit: dd986dec90e9327cebf9b7c6c2354e36a419921a
Parents: e08e8bf
Author: huzheng 
Authored: Mon Apr 23 17:20:55 2018 +0800
Committer: zhangduo 
Committed: Tue Jun 5 18:13:58 2018 +0800

--
 .../replication/TestSyncReplicationActive.java  | 32 
 1 file changed, 32 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/dd986dec/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActive.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActive.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActive.java
index bff4572..f9020a0 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActive.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationActive.java
@@ -17,9 +17,17 @@
  */
 package org.apache.hadoop.hbase.replication;
 
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
+import org.apache.hadoop.hbase.wal.WAL.Entry;
+import org.apache.hadoop.hbase.wal.WAL.Reader;
+import org.apache.hadoop.hbase.wal.WALFactory;
+import org.junit.Assert;
 import org.junit.ClassRule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -49,6 +57,9 @@ public class TestSyncReplicationActive extends 
SyncReplicationTestBase {
 // peer is disabled so no data have been replicated
 verifyNotReplicatedThroughRegion(UTIL2, 0, 100);
 
+// Ensure that there's no cluster id in remote log entries.
+verifyNoClusterIdInRemoteLog(UTIL2, PEER_ID);
+
 UTIL2.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID,
   SyncReplicationState.DOWNGRADE_ACTIVE);
 // confirm that peer with state DA will reject replication request.
@@ -72,4 +83,25 @@ public class TestSyncReplicationActive extends 
SyncReplicationTestBase {
 verifyReplicationRequestRejection(UTIL2, true);
 write(UTIL2, 200, 300);
   }
+
+  private void verifyNoClusterIdInRemoteLog(HBaseTestingUtility utility, 
String peerId)
+  throws Exception {
+FileSystem fs2 = utility.getTestFileSystem();
+Path remoteDir =
+new 
Path(utility.getMiniHBaseCluster().getMaster().getMasterFileSystem().getRootDir(),
+"remoteWALs").makeQualified(fs2.getUri(), 
fs2.getWorkingDirectory());
+FileStatus[] files = fs2.listStatus(new Path(remoteDir, peerId));
+Assert.assertTrue(files.length > 0);
+for (FileStatus file : files) {
+  try (Reader reader =
+  WALFactory.createReader(fs2, file.getPath(), 
utility.getConfiguration())) {
+Entry entry = reader.next();
+Assert.assertTrue(entry != null);
+while (entry != null) {
+  Assert.assertEquals(entry.getKey().getClusterIds().size(), 0);
+  entry = reader.next();
+}
+  }
+}
+  }
 }



[47/50] [abbrv] hbase git commit: HBASE-20576 Check remote WAL directory when creating peer and transiting peer to A

HBASE-20576 Check remote WAL directory when creating peer and transiting peer 
to A


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/fbddf634
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/fbddf634
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/fbddf634

Branch: refs/heads/HBASE-19064
Commit: fbddf63453c61de2b7d48c6f0c0f321c4ed0261f
Parents: 1dc079d
Author: zhangduo 
Authored: Tue May 15 15:07:40 2018 +0800
Committer: zhangduo 
Committed: Tue Jun 5 18:13:59 2018 +0800

--
 .../replication/ReplicationPeerManager.java | 19 +++--
 ...ransitPeerSyncReplicationStateProcedure.java | 73 +---
 .../replication/TestReplicationAdmin.java   | 57 ---
 3 files changed, 110 insertions(+), 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/fbddf634/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
index e1d8b51..8e49137 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hbase.master.replication;
 
 import java.io.IOException;
+import java.net.URI;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.EnumSet;
@@ -31,6 +32,7 @@ import java.util.regex.Pattern;
 import java.util.stream.Collectors;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
@@ -45,7 +47,6 @@ import 
org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
 import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
 import org.apache.hadoop.hbase.replication.ReplicationUtils;
 import org.apache.hadoop.hbase.replication.SyncReplicationState;
-import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.zookeeper.ZKConfig;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -193,9 +194,9 @@ public class ReplicationPeerManager {
   }
 
   /**
-   * @return the old state, and whether the peer is enabled.
+   * @return the old desciption of the peer
*/
-  Pair 
preTransitPeerSyncReplicationState(String peerId,
+  ReplicationPeerDescription preTransitPeerSyncReplicationState(String peerId,
   SyncReplicationState state) throws DoNotRetryIOException {
 ReplicationPeerDescription desc = checkPeerExists(peerId);
 SyncReplicationState fromState = desc.getSyncReplicationState();
@@ -204,7 +205,7 @@ public class ReplicationPeerManager {
   throw new DoNotRetryIOException("Can not transit current cluster state 
from " + fromState +
 " to " + state + " for peer id=" + peerId);
 }
-return Pair.newPair(fromState, desc.isEnabled());
+return desc;
   }
 
   public void addPeer(String peerId, ReplicationPeerConfig peerConfig, boolean 
enabled)
@@ -384,6 +385,16 @@ public class ReplicationPeerManager {
   "Only support replicated table config for sync replication peer");
   }
 }
+Path remoteWALDir = new Path(peerConfig.getRemoteWALDir());
+if (!remoteWALDir.isAbsolute()) {
+  throw new DoNotRetryIOException(
+"The remote WAL directory " + peerConfig.getRemoteWALDir() + " is not 
absolute");
+}
+URI remoteWALDirUri = remoteWALDir.toUri();
+if (remoteWALDirUri.getScheme() == null || remoteWALDirUri.getAuthority() 
== null) {
+  throw new DoNotRetryIOException("The remote WAL directory " + 
peerConfig.getRemoteWALDir() +
+" is not qualified, you must provide scheme and authority");
+}
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/fbddf634/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
index 0175296..ebe7a93 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
+++ 
b/

[41/50] [abbrv] hbase git commit: HBASE-19999 Remove the SYNC_REPLICATION_ENABLED flag

HBASE-1 Remove the SYNC_REPLICATION_ENABLED flag


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/95f97d84
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/95f97d84
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/95f97d84

Branch: refs/heads/HBASE-19064
Commit: 95f97d841d280bc07d2d98c7bbd35ab7c06f76ef
Parents: 7fc48c1
Author: Guanghao Zhang 
Authored: Fri Mar 9 11:30:25 2018 +0800
Committer: zhangduo 
Committed: Tue Jun 5 18:13:58 2018 +0800

--
 .../hbase/replication/ReplicationUtils.java  |  2 --
 .../hadoop/hbase/regionserver/HRegionServer.java | 13 -
 .../hbase/wal/SyncReplicationWALProvider.java| 19 ++-
 .../org/apache/hadoop/hbase/wal/WALFactory.java  | 18 --
 .../hbase/replication/TestSyncReplication.java   |  1 -
 .../master/TestRecoverStandbyProcedure.java  |  2 --
 .../wal/TestSyncReplicationWALProvider.java  |  2 --
 7 files changed, 38 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/95f97d84/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
index e402d0f..cb22f57 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
@@ -37,8 +37,6 @@ import org.apache.yetus.audience.InterfaceAudience;
 @InterfaceAudience.Private
 public final class ReplicationUtils {
 
-  public static final String SYNC_REPLICATION_ENABLED = 
"hbase.replication.sync.enabled";
-
   public static final String REPLICATION_ATTR_NAME = "__rep__";
 
   public static final String REMOTE_WAL_DIR_NAME = "remoteWALs";

http://git-wip-us.apache.org/repos/asf/hbase/blob/95f97d84/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 65aa95d..4b495c0 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -1803,10 +1803,8 @@ public class HRegionServer extends HasThread implements
   private void setupWALAndReplication() throws IOException {
 boolean isMasterNoTableOrSystemTableOnly = this instanceof HMaster &&
   (!LoadBalancer.isTablesOnMaster(conf) || 
LoadBalancer.isSystemTablesOnlyOnMaster(conf));
-if (isMasterNoTableOrSystemTableOnly) {
-  conf.setBoolean(ReplicationUtils.SYNC_REPLICATION_ENABLED, false);
-}
-WALFactory factory = new WALFactory(conf, serverName.toString());
+WALFactory factory =
+new WALFactory(conf, serverName.toString(), 
!isMasterNoTableOrSystemTableOnly);
 if (!isMasterNoTableOrSystemTableOnly) {
   // TODO Replication make assumptions here based on the default 
filesystem impl
   Path oldLogDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
@@ -1925,11 +1923,8 @@ public class HRegionServer extends HasThread implements
 }
 this.executorService.startExecutorService(ExecutorType.RS_REFRESH_PEER,
   conf.getInt("hbase.regionserver.executor.refresh.peer.threads", 2));
-
-if (conf.getBoolean(ReplicationUtils.SYNC_REPLICATION_ENABLED, false)) {
-  
this.executorService.startExecutorService(ExecutorType.RS_REPLAY_SYNC_REPLICATION_WAL,
-
conf.getInt("hbase.regionserver.executor.replay.sync.replication.wal.threads", 
2));
-}
+
this.executorService.startExecutorService(ExecutorType.RS_REPLAY_SYNC_REPLICATION_WAL,
+  
conf.getInt("hbase.regionserver.executor.replay.sync.replication.wal.threads", 
1));
 
 Threads.setDaemonThreadRunning(this.walRoller.getThread(), getName() + 
".logRoller",
 uncaughtExceptionHandler);

http://git-wip-us.apache.org/repos/asf/hbase/blob/95f97d84/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java
index 282aa21..54287fe 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/SyncReplicationWALProvider.java

[09/50] [abbrv] hbase git commit: HBASE-19475 Extended backporting strategy in documentation

HBASE-19475 Extended backporting strategy in documentation


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5b24d4ce
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5b24d4ce
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5b24d4ce

Branch: refs/heads/HBASE-19064
Commit: 5b24d4ce0cd3a24da4b0ff9e3dc80286b783b11b
Parents: 1b98a96
Author: Jan Hentschel 
Authored: Sun Dec 10 15:17:50 2017 +0100
Committer: Jan Hentschel 
Committed: Sat Jun 2 10:36:01 2018 +0200

--
 src/main/asciidoc/_chapters/developer.adoc | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5b24d4ce/src/main/asciidoc/_chapters/developer.adoc
--
diff --git a/src/main/asciidoc/_chapters/developer.adoc 
b/src/main/asciidoc/_chapters/developer.adoc
index 637cdbc..6d0a7d1 100644
--- a/src/main/asciidoc/_chapters/developer.adoc
+++ b/src/main/asciidoc/_chapters/developer.adoc
@@ -2201,7 +2201,11 @@ If the contributor used +git format-patch+ to generate 
the patch, their commit m
 [[committer.amending.author]]
 == Add Amending-Author when a conflict cherrypick backporting
 
-We've established the practice of committing to master and then cherry picking 
back to branches whenever possible.
+We've established the practice of committing to master and then cherry picking 
back to branches whenever possible, unless
+
+* it's breaking compat: In which case, if it can go in minor releases, 
backport to branch-1 and branch-2.
+* it's a new feature: No for maintenance releases, For minor releases, discuss 
and arrive at consensus.
+
 When there is a minor conflict we can fix it up and just proceed with the 
commit.
 The resulting commit retains the original author.
 When the amending author is different from the original committer, add notice 
of this at the end of the commit message as: `Amending-Author: Author



[36/50] [abbrv] hbase git commit: HBASE-20163 Forbid major compaction when standby cluster replay the remote wals

HBASE-20163 Forbid major compaction when standby cluster replay the remote wals


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4af10f22
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4af10f22
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4af10f22

Branch: refs/heads/HBASE-19064
Commit: 4af10f223a4c9f0856c6326665185789eff837b7
Parents: d0187a3
Author: Guanghao Zhang 
Authored: Thu Apr 12 14:44:25 2018 +0800
Committer: zhangduo 
Committed: Tue Jun 5 18:13:58 2018 +0800

--
 .../hadoop/hbase/regionserver/HRegion.java  | 18 
 .../hbase/regionserver/HRegionServer.java   |  2 +-
 .../regionserver/RegionServerServices.java  |  5 +++
 .../ForbidMajorCompactionChecker.java   | 44 
 .../hadoop/hbase/MockRegionServerServices.java  |  6 +++
 .../hadoop/hbase/master/MockRegionServer.java   |  6 +++
 6 files changed, 80 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4af10f22/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 05fb036..42a86c4 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -144,6 +144,7 @@ import 
org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope;
 import org.apache.hadoop.hbase.regionserver.ScannerContext.NextState;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
 import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
+import 
org.apache.hadoop.hbase.regionserver.compactions.ForbidMajorCompactionChecker;
 import 
org.apache.hadoop.hbase.regionserver.throttle.CompactionThroughputControllerFactory;
 import 
org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController;
 import org.apache.hadoop.hbase.regionserver.throttle.StoreHotnessProtector;
@@ -1986,6 +1987,14 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 return compact(compaction, store, throughputController, null);
   }
 
+  private boolean shouldForbidMajorCompaction() {
+if (rsServices != null && rsServices.getReplicationSourceService() != 
null) {
+  return 
rsServices.getReplicationSourceService().getSyncReplicationPeerInfoProvider()
+  .checkState(getRegionInfo(), ForbidMajorCompactionChecker.get());
+}
+return false;
+  }
+
   public boolean compact(CompactionContext compaction, HStore store,
   ThroughputController throughputController, User user) throws IOException 
{
 assert compaction != null && compaction.hasSelection();
@@ -1995,6 +2004,15 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   store.cancelRequestedCompaction(compaction);
   return false;
 }
+
+if (compaction.getRequest().isAllFiles() && shouldForbidMajorCompaction()) 
{
+  LOG.warn("Skipping major compaction on " + this
+  + " because this cluster is transiting sync replication state"
+  + " from STANDBY to DOWNGRADE_ACTIVE");
+  store.cancelRequestedCompaction(compaction);
+  return false;
+}
+
 MonitoredTask status = null;
 boolean requestNeedsCancellation = true;
 /*

http://git-wip-us.apache.org/repos/asf/hbase/blob/4af10f22/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 4b495c0..4ea800d 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -2469,7 +2469,7 @@ public class HRegionServer extends HasThread implements
* @return Return the object that implements the replication
* source executorService.
*/
-  @VisibleForTesting
+  @Override
   public ReplicationSourceService getReplicationSourceService() {
 return replicationSourceHandler;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/4af10f22/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
 
b/hbase-server/

[27/50] [abbrv] hbase git commit: HBASE-19781 Add a new cluster state flag for synchronous replication

HBASE-19781 Add a new cluster state flag for synchronous replication


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7bef8370
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7bef8370
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7bef8370

Branch: refs/heads/HBASE-19064
Commit: 7bef83706038d57d6d93f9564db99e91d21192dc
Parents: 500ac83
Author: Guanghao Zhang 
Authored: Mon Jan 22 11:44:49 2018 +0800
Committer: zhangduo 
Committed: Tue Jun 5 17:24:38 2018 +0800

--
 .../org/apache/hadoop/hbase/client/Admin.java   |  39 +
 .../apache/hadoop/hbase/client/AsyncAdmin.java  |  31 
 .../hadoop/hbase/client/AsyncHBaseAdmin.java|   7 +
 .../hbase/client/ConnectionImplementation.java  |   9 ++
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |  26 +++
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java |  15 ++
 .../client/ShortCircuitMasterConnection.java|   9 ++
 .../replication/ReplicationPeerConfigUtil.java  |  26 +--
 .../replication/ReplicationPeerDescription.java |  10 +-
 .../hbase/replication/SyncReplicationState.java |  48 ++
 .../hbase/shaded/protobuf/RequestConverter.java |  10 ++
 .../src/main/protobuf/Master.proto  |   4 +
 .../src/main/protobuf/MasterProcedure.proto |   4 +
 .../src/main/protobuf/Replication.proto |  20 +++
 .../replication/ReplicationPeerStorage.java |  18 ++-
 .../hbase/replication/ReplicationUtils.java |   1 +
 .../replication/ZKReplicationPeerStorage.java   |  61 +--
 .../replication/TestReplicationStateBasic.java  |  23 ++-
 .../TestZKReplicationPeerStorage.java   |  12 +-
 .../hbase/coprocessor/MasterObserver.java   |  23 +++
 .../org/apache/hadoop/hbase/master/HMaster.java |  12 ++
 .../hbase/master/MasterCoprocessorHost.java |  21 +++
 .../hadoop/hbase/master/MasterRpcServices.java  |  17 ++
 .../hadoop/hbase/master/MasterServices.java |   9 ++
 .../procedure/PeerProcedureInterface.java   |   2 +-
 .../replication/ReplicationPeerManager.java |  51 +-
 ...ransitPeerSyncReplicationStateProcedure.java | 159 +++
 .../hbase/security/access/AccessController.java |   8 +
 .../replication/TestReplicationAdmin.java   |  62 
 .../hbase/master/MockNoopMasterServices.java|   8 +-
 .../cleaner/TestReplicationHFileCleaner.java|   4 +-
 .../TestReplicationTrackerZKImpl.java   |   6 +-
 .../TestReplicationSourceManager.java   |   3 +-
 .../security/access/TestAccessController.java   |  16 ++
 .../hbase/util/TestHBaseFsckReplication.java|   5 +-
 .../src/main/ruby/hbase/replication_admin.rb|  15 ++
 hbase-shell/src/main/ruby/shell.rb  |   1 +
 .../src/main/ruby/shell/commands/list_peers.rb  |   6 +-
 .../transit_peer_sync_replication_state.rb  |  44 +
 .../test/ruby/hbase/replication_admin_test.rb   |  24 +++
 40 files changed, 816 insertions(+), 53 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7bef8370/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index 331f2d1..39542e4 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -51,6 +51,7 @@ import 
org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
 import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
 import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
 import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
@@ -2657,6 +2658,44 @@ public interface Admin extends Abortable, Closeable {
   List listReplicationPeers(Pattern pattern) 
throws IOException;
 
   /**
+   * Transit current cluster to a new state in a synchronous replication peer.
+   * @param peerId a short name that identifies the peer
+   * @param state a new state of current cluster
+   * @throws IOException if a remote or network exception occurs
+   */
+  void transitReplicationPeerSyncReplicationState(String peerId, 
SyncReplicationState state)
+  throws IOException;
+
+  /**
+   * Transit current cluster to a new state in a synchronous replication peer. 
But does not block
+   * and wait for it.
+   * 
+   * You can use Future.get(long, TimeUnit) to wait on the operation to 
complete. It may throw
+   * Execu

[42/50] [abbrv] hbase git commit: HBASE-20432 Cleanup related resources when remove a sync replication peer

HBASE-20432 Cleanup related resources when remove a sync replication peer


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/84b69e2a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/84b69e2a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/84b69e2a

Branch: refs/heads/HBASE-19064
Commit: 84b69e2a717e022b986791d2a9755f43ebe93f4d
Parents: 3f1b25e
Author: huzheng 
Authored: Wed Apr 18 20:38:33 2018 +0800
Committer: zhangduo 
Committed: Tue Jun 5 18:13:59 2018 +0800

--
 .../master/replication/RemovePeerProcedure.java | 10 +
 .../ReplaySyncReplicationWALManager.java|  8 
 .../replication/SyncReplicationTestBase.java| 45 +---
 .../replication/TestSyncReplicationActive.java  |  9 ++--
 .../replication/TestSyncReplicationStandBy.java | 31 --
 5 files changed, 89 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/84b69e2a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RemovePeerProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RemovePeerProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RemovePeerProcedure.java
index 82dc07e..7335fe0 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RemovePeerProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RemovePeerProcedure.java
@@ -66,9 +66,19 @@ public class RemovePeerProcedure extends ModifyPeerProcedure 
{
 env.getReplicationPeerManager().removePeer(peerId);
   }
 
+  private void removeRemoteWALs(MasterProcedureEnv env) throws IOException {
+ReplaySyncReplicationWALManager remoteWALManager =
+env.getMasterServices().getReplaySyncReplicationWALManager();
+remoteWALManager.removePeerRemoteWALs(peerId);
+remoteWALManager.removePeerReplayWALDir(peerId);
+  }
+
   @Override
   protected void postPeerModification(MasterProcedureEnv env)
   throws IOException, ReplicationException {
+if (peerConfig.isSyncReplication()) {
+  removeRemoteWALs(env);
+}
 env.getReplicationPeerManager().removeAllQueuesAndHFileRefs(peerId);
 if (peerConfig.isSerial()) {
   env.getReplicationPeerManager().removeAllLastPushedSeqIds(peerId);

http://git-wip-us.apache.org/repos/asf/hbase/blob/84b69e2a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplaySyncReplicationWALManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplaySyncReplicationWALManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplaySyncReplicationWALManager.java
index 72f5c37..eac5aa4 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplaySyncReplicationWALManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplaySyncReplicationWALManager.java
@@ -115,6 +115,14 @@ public class ReplaySyncReplicationWALManager {
 }
   }
 
+  public void removePeerRemoteWALs(String peerId) throws IOException {
+Path remoteWALDir = getPeerRemoteWALDir(peerId);
+if (fs.exists(remoteWALDir) && !fs.delete(remoteWALDir, true)) {
+  throw new IOException(
+  "Failed to remove remote WALs dir " + remoteWALDir + " for peer id=" 
+ peerId);
+}
+  }
+
   public void initPeerWorkers(String peerId) {
 BlockingQueue servers = new LinkedBlockingQueue<>();
 services.getServerManager().getOnlineServers().keySet()

http://git-wip-us.apache.org/repos/asf/hbase/blob/84b69e2a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java
index 0d5fce8..de679be 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.replication;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -71,6 +72,10 @@ public class SyncReplicationTestBase {
 
   protected static String PEER_ID = "1";
 
+  protected static Path remoteWALDir1;
+
+  protected static Path remoteWALDir2;
+
   pr

[17/50] [abbrv] hbase git commit: HBASE-20634 Reopen region while server crash can cause the procedure to be stuck; ADDENDUM

HBASE-20634 Reopen region while server crash can cause the procedure to be 
stuck; ADDENDUM


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d99ba62b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d99ba62b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d99ba62b

Branch: refs/heads/HBASE-19064
Commit: d99ba62b1202b291bbcc3b7af0cc8a039e1eb32c
Parents: 03c0f7f
Author: Michael Stack 
Authored: Mon Jun 4 12:38:56 2018 -0700
Committer: Michael Stack 
Committed: Mon Jun 4 12:39:39 2018 -0700

--
 .../hbase/master/replication/RefreshPeerProcedure.java  | 9 ++---
 1 file changed, 6 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d99ba62b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.java
index ba9bcdc..10e16e9 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.java
@@ -22,6 +22,7 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.master.procedure.PeerProcedureInterface;
 import 
org.apache.hadoop.hbase.master.procedure.RSProcedureDispatcher.ServerOperation;
+import org.apache.hadoop.hbase.procedure2.FailedRemoteDispatchException;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureEvent;
 import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
@@ -166,10 +167,12 @@ public class RefreshPeerProcedure extends 
Procedure
   // retry
   dispatched = false;
 }
-if (!env.getRemoteDispatcher().addOperationToNode(targetServer, this)) {
+try {
+  env.getRemoteDispatcher().addOperationToNode(targetServer, this);
+} catch (FailedRemoteDispatchException frde) {
   LOG.info("Can not add remote operation for refreshing peer {} for {} to 
{}, " +
-"this usually because the server is already dead, " +
-"give up and mark the procedure as complete", peerId, type, 
targetServer);
+"this is usually because the server is already dead, " +
+"give up and mark the procedure as complete", peerId, type, 
targetServer, frde);
   return null;
 }
 dispatched = true;



[33/50] [abbrv] hbase git commit: HBASE-19990 Create remote wal directory when transitting to state S

HBASE-19990 Create remote wal directory when transitting to state S


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/84ef1374
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/84ef1374
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/84ef1374

Branch: refs/heads/HBASE-19064
Commit: 84ef13742ef40a0c51d048df4f45ca3cb294d721
Parents: 9b741bd
Author: zhangduo 
Authored: Wed Feb 14 16:01:16 2018 +0800
Committer: zhangduo 
Committed: Tue Jun 5 17:24:38 2018 +0800

--
 .../procedure2/ProcedureYieldException.java |  9 --
 .../hbase/replication/ReplicationUtils.java |  2 ++
 .../hadoop/hbase/master/MasterFileSystem.java   | 19 ++---
 .../master/procedure/MasterProcedureEnv.java|  5 
 ...ransitPeerSyncReplicationStateProcedure.java | 29 
 .../hbase/replication/TestSyncReplication.java  |  8 ++
 6 files changed, 55 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/84ef1374/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java
index 0487ac5b..dbb9981 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java
@@ -15,16 +15,21 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.hbase.procedure2;
 
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.yetus.audience.InterfaceStability;
 
-// TODO: Not used yet
+/**
+ * Indicate that a procedure wants to be rescheduled. Usually because there 
are something wrong but
+ * we do not want to fail the procedure.
+ * 
+ * TODO: need to support scheduling after a delay.
+ */
 @InterfaceAudience.Private
 @InterfaceStability.Stable
 public class ProcedureYieldException extends ProcedureException {
+
   /** default constructor */
   public ProcedureYieldException() {
 super();

http://git-wip-us.apache.org/repos/asf/hbase/blob/84ef1374/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
index d94cb00..e402d0f 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java
@@ -41,6 +41,8 @@ public final class ReplicationUtils {
 
   public static final String REPLICATION_ATTR_NAME = "__rep__";
 
+  public static final String REMOTE_WAL_DIR_NAME = "remoteWALs";
+
   private ReplicationUtils() {
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/84ef1374/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
index 864be02..7ccbd71 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.log.HBaseMarkers;
 import org.apache.hadoop.hbase.mob.MobConstants;
 import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
 import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.replication.ReplicationUtils;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSTableDescriptors;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -133,7 +134,6 @@ public class MasterFileSystem {
* Idempotent.
*/
   private void createInitialFileSystemLayout() throws IOException {
-
 final String[] protectedSubDirs = new String[] {
 HConstants.BASE_NAMESPACE_DIR,
 HConstants.HFILE_ARCHIVE_DIRECTORY,
@@ -145,7 +145,8 @@ public class MasterFileSystem {
   HConstants.HREGION_LOGDIR_NAME,
   HConstants.HREGION_OLDLOGDIR_NAME,
   HConstants.CORRUPT_DIR_NAME,
-  WALProcedureStore.MASTER_PROCEDURE_LOGDIR
+  WALProcedureStore.MASTER_PROCEDURE_LOGDIR,
+

  1   2   >