hbase git commit: HBASE-20893 Data loss if splitting region while ServerCrashProcedure executing

2018-07-22 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/master 37de961f2 -> 4804483f7


HBASE-20893 Data loss if splitting region while ServerCrashProcedure executing


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4804483f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4804483f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4804483f

Branch: refs/heads/master
Commit: 4804483f7e55edf91a8e9d7ad30ad8239a96eaf3
Parents: 37de961
Author: Allan Yang 
Authored: Mon Jul 23 14:48:43 2018 +0800
Committer: Allan Yang 
Committed: Mon Jul 23 14:48:43 2018 +0800

--
 .../src/main/protobuf/MasterProcedure.proto |   1 +
 .../assignment/SplitTableRegionProcedure.java   |  23 
 .../apache/hadoop/hbase/wal/WALSplitter.java|  24 
 .../master/TestSplitRegionWhileRSCrash.java | 122 +++
 4 files changed, 170 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4804483f/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto 
b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
index a062e9a..d651011 100644
--- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
@@ -248,6 +248,7 @@ enum SplitTableRegionState {
   SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_META = 8;
   SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS = 9;
   SPLIT_TABLE_REGION_POST_OPERATION = 10;
+  SPLIT_TABLE_REGIONS_CHECK_CLOSED_REGIONS = 11;
 }
 
 message SplitTableRegionStateData {

http://git-wip-us.apache.org/repos/asf/hbase/blob/4804483f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
index 2306037..f0ea25b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
@@ -139,6 +139,21 @@ public class SplitTableRegionProcedure
   }
 
   /**
+   * Check whether there is recovered.edits in the closed region
+   * If any, that means this region is not closed property, we need
+   * to abort region merge to prevent data loss
+   * @param env master env
+   * @throws IOException IOException
+   */
+  private void checkClosedRegion(final MasterProcedureEnv env) throws 
IOException {
+if (WALSplitter.hasRecoveredEdits(env.getMasterServices().getFileSystem(),
+env.getMasterConfiguration(), getRegion())) {
+  throw new IOException("Recovered.edits are found in Region: " + 
getRegion()
+  + ", abort split to prevent data loss");
+}
+  }
+
+  /**
* Check whether the region is splittable
* @param env MasterProcedureEnv
* @param regionToSplit parent Region to be split
@@ -238,6 +253,10 @@ public class SplitTableRegionProcedure
   break;
 case SPLIT_TABLE_REGION_CLOSE_PARENT_REGION:
   addChildProcedure(createUnassignProcedures(env, 
getRegionReplication(env)));
+  
setNextState(SplitTableRegionState.SPLIT_TABLE_REGIONS_CHECK_CLOSED_REGIONS);
+  break;
+case SPLIT_TABLE_REGIONS_CHECK_CLOSED_REGIONS:
+  checkClosedRegion(env);
   
setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS);
   break;
 case SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS:
@@ -312,6 +331,10 @@ public class SplitTableRegionProcedure
 case SPLIT_TABLE_REGION_WRITE_MAX_SEQUENCE_ID_FILE:
   // Doing nothing, as re-open parent region would clean up daughter 
region directories.
   break;
+case SPLIT_TABLE_REGIONS_CHECK_CLOSED_REGIONS:
+  // Doing nothing, in SPLIT_TABLE_REGION_CLOSE_PARENT_REGION,
+  // we will bring parent region online
+  break;
 case SPLIT_TABLE_REGION_CLOSE_PARENT_REGION:
   openParentRegion(env);
   break;

http://git-wip-us.apache.org/repos/asf/hbase/blob/4804483f/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
index f020e7a..65d5fb7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
+++ b/hbase-

hbase git commit: HBASE-20893 Data loss if splitting region while ServerCrashProcedure executing

2018-07-22 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2 654480326 -> 786fd9137


HBASE-20893 Data loss if splitting region while ServerCrashProcedure executing


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/786fd913
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/786fd913
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/786fd913

Branch: refs/heads/branch-2
Commit: 786fd9137a88854731b097b8a5f882b7d0df99c9
Parents: 6544803
Author: Allan Yang 
Authored: Mon Jul 23 14:41:02 2018 +0800
Committer: Allan Yang 
Committed: Mon Jul 23 14:46:32 2018 +0800

--
 .../src/main/protobuf/MasterProcedure.proto |   1 +
 .../assignment/SplitTableRegionProcedure.java   |  23 
 .../apache/hadoop/hbase/wal/WALSplitter.java|  24 
 .../master/TestSplitRegionWhileRSCrash.java | 122 +++
 4 files changed, 170 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/786fd913/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto 
b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
index 39d2824..1b137c7 100644
--- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
@@ -248,6 +248,7 @@ enum SplitTableRegionState {
   SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_META = 8;
   SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS = 9;
   SPLIT_TABLE_REGION_POST_OPERATION = 10;
+  SPLIT_TABLE_REGIONS_CHECK_CLOSED_REGIONS = 11;
 }
 
 message SplitTableRegionStateData {

http://git-wip-us.apache.org/repos/asf/hbase/blob/786fd913/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
index 2306037..f0ea25b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
@@ -139,6 +139,21 @@ public class SplitTableRegionProcedure
   }
 
   /**
+   * Check whether there is recovered.edits in the closed region
+   * If any, that means this region is not closed property, we need
+   * to abort region merge to prevent data loss
+   * @param env master env
+   * @throws IOException IOException
+   */
+  private void checkClosedRegion(final MasterProcedureEnv env) throws 
IOException {
+if (WALSplitter.hasRecoveredEdits(env.getMasterServices().getFileSystem(),
+env.getMasterConfiguration(), getRegion())) {
+  throw new IOException("Recovered.edits are found in Region: " + 
getRegion()
+  + ", abort split to prevent data loss");
+}
+  }
+
+  /**
* Check whether the region is splittable
* @param env MasterProcedureEnv
* @param regionToSplit parent Region to be split
@@ -238,6 +253,10 @@ public class SplitTableRegionProcedure
   break;
 case SPLIT_TABLE_REGION_CLOSE_PARENT_REGION:
   addChildProcedure(createUnassignProcedures(env, 
getRegionReplication(env)));
+  
setNextState(SplitTableRegionState.SPLIT_TABLE_REGIONS_CHECK_CLOSED_REGIONS);
+  break;
+case SPLIT_TABLE_REGIONS_CHECK_CLOSED_REGIONS:
+  checkClosedRegion(env);
   
setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS);
   break;
 case SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS:
@@ -312,6 +331,10 @@ public class SplitTableRegionProcedure
 case SPLIT_TABLE_REGION_WRITE_MAX_SEQUENCE_ID_FILE:
   // Doing nothing, as re-open parent region would clean up daughter 
region directories.
   break;
+case SPLIT_TABLE_REGIONS_CHECK_CLOSED_REGIONS:
+  // Doing nothing, in SPLIT_TABLE_REGION_CLOSE_PARENT_REGION,
+  // we will bring parent region online
+  break;
 case SPLIT_TABLE_REGION_CLOSE_PARENT_REGION:
   openParentRegion(env);
   break;

http://git-wip-us.apache.org/repos/asf/hbase/blob/786fd913/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
index a49b96b..634f44f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
+++ b/hb

hbase git commit: HBASE-20893 Data loss if splitting region while ServerCrashProcedure executing

2018-07-22 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2.1 9d481f1fa -> af2742fcf


HBASE-20893 Data loss if splitting region while ServerCrashProcedure executing


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/af2742fc
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/af2742fc
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/af2742fc

Branch: refs/heads/branch-2.1
Commit: af2742fcf2fc31945c8bdd09f0f6bd52f857b77f
Parents: 9d481f1
Author: Allan Yang 
Authored: Mon Jul 23 14:33:59 2018 +0800
Committer: Allan Yang 
Committed: Mon Jul 23 14:35:27 2018 +0800

--
 .../src/main/protobuf/MasterProcedure.proto |   1 +
 .../assignment/SplitTableRegionProcedure.java   |  23 
 .../apache/hadoop/hbase/wal/WALSplitter.java|  24 
 .../master/TestSplitRegionWhileRSCrash.java | 122 +++
 4 files changed, 170 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/af2742fc/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto 
b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
index 39d2824..1b137c7 100644
--- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
@@ -248,6 +248,7 @@ enum SplitTableRegionState {
   SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_META = 8;
   SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS = 9;
   SPLIT_TABLE_REGION_POST_OPERATION = 10;
+  SPLIT_TABLE_REGIONS_CHECK_CLOSED_REGIONS = 11;
 }
 
 message SplitTableRegionStateData {

http://git-wip-us.apache.org/repos/asf/hbase/blob/af2742fc/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
index 2306037..f0ea25b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
@@ -139,6 +139,21 @@ public class SplitTableRegionProcedure
   }
 
   /**
+   * Check whether there is recovered.edits in the closed region
+   * If any, that means this region is not closed property, we need
+   * to abort region merge to prevent data loss
+   * @param env master env
+   * @throws IOException IOException
+   */
+  private void checkClosedRegion(final MasterProcedureEnv env) throws 
IOException {
+if (WALSplitter.hasRecoveredEdits(env.getMasterServices().getFileSystem(),
+env.getMasterConfiguration(), getRegion())) {
+  throw new IOException("Recovered.edits are found in Region: " + 
getRegion()
+  + ", abort split to prevent data loss");
+}
+  }
+
+  /**
* Check whether the region is splittable
* @param env MasterProcedureEnv
* @param regionToSplit parent Region to be split
@@ -238,6 +253,10 @@ public class SplitTableRegionProcedure
   break;
 case SPLIT_TABLE_REGION_CLOSE_PARENT_REGION:
   addChildProcedure(createUnassignProcedures(env, 
getRegionReplication(env)));
+  
setNextState(SplitTableRegionState.SPLIT_TABLE_REGIONS_CHECK_CLOSED_REGIONS);
+  break;
+case SPLIT_TABLE_REGIONS_CHECK_CLOSED_REGIONS:
+  checkClosedRegion(env);
   
setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS);
   break;
 case SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS:
@@ -312,6 +331,10 @@ public class SplitTableRegionProcedure
 case SPLIT_TABLE_REGION_WRITE_MAX_SEQUENCE_ID_FILE:
   // Doing nothing, as re-open parent region would clean up daughter 
region directories.
   break;
+case SPLIT_TABLE_REGIONS_CHECK_CLOSED_REGIONS:
+  // Doing nothing, in SPLIT_TABLE_REGION_CLOSE_PARENT_REGION,
+  // we will bring parent region online
+  break;
 case SPLIT_TABLE_REGION_CLOSE_PARENT_REGION:
   openParentRegion(env);
   break;

http://git-wip-us.apache.org/repos/asf/hbase/blob/af2742fc/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
index a49b96b..634f44f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
+++ 

hbase git commit: HBASE-20893 Data loss if splitting region while ServerCrashProcedure executing

2018-07-22 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 8363b2f10 -> a90a20b4c


HBASE-20893 Data loss if splitting region while ServerCrashProcedure executing


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a90a20b4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a90a20b4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a90a20b4

Branch: refs/heads/branch-2.0
Commit: a90a20b4ce491cef70c1506d4bef1c4cdbf28d76
Parents: 8363b2f
Author: Allan Yang 
Authored: Mon Jul 23 14:30:00 2018 +0800
Committer: Allan Yang 
Committed: Mon Jul 23 14:31:03 2018 +0800

--
 .../src/main/protobuf/MasterProcedure.proto |   1 +
 .../assignment/SplitTableRegionProcedure.java   |  23 
 .../apache/hadoop/hbase/wal/WALSplitter.java|  24 
 .../master/TestSplitRegionWhileRSCrash.java | 122 +++
 4 files changed, 170 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a90a20b4/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto 
b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
index c2c6dce..b9b78a0 100644
--- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
@@ -246,6 +246,7 @@ enum SplitTableRegionState {
   SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_META = 8;
   SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS = 9;
   SPLIT_TABLE_REGION_POST_OPERATION = 10;
+  SPLIT_TABLE_REGIONS_CHECK_CLOSED_REGIONS = 11;
 }
 
 message SplitTableRegionStateData {

http://git-wip-us.apache.org/repos/asf/hbase/blob/a90a20b4/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
index 994983f..11cfb8f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
@@ -139,6 +139,21 @@ public class SplitTableRegionProcedure
   }
 
   /**
+   * Check whether there is recovered.edits in the closed region
+   * If any, that means this region is not closed property, we need
+   * to abort region merge to prevent data loss
+   * @param env master env
+   * @throws IOException IOException
+   */
+  private void checkClosedRegion(final MasterProcedureEnv env) throws 
IOException {
+if (WALSplitter.hasRecoveredEdits(env.getMasterServices().getFileSystem(),
+env.getMasterConfiguration(), getRegion())) {
+  throw new IOException("Recovered.edits are found in Region: " + 
getRegion()
+  + ", abort split to prevent data loss");
+}
+  }
+
+  /**
* Check whether the region is splittable
* @param env MasterProcedureEnv
* @param regionToSplit parent Region to be split
@@ -238,6 +253,10 @@ public class SplitTableRegionProcedure
   break;
 case SPLIT_TABLE_REGION_CLOSE_PARENT_REGION:
   addChildProcedure(createUnassignProcedures(env, 
getRegionReplication(env)));
+  
setNextState(SplitTableRegionState.SPLIT_TABLE_REGIONS_CHECK_CLOSED_REGIONS);
+  break;
+case SPLIT_TABLE_REGIONS_CHECK_CLOSED_REGIONS:
+  checkClosedRegion(env);
   
setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS);
   break;
 case SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS:
@@ -312,6 +331,10 @@ public class SplitTableRegionProcedure
 case SPLIT_TABLE_REGION_WRITE_MAX_SEQUENCE_ID_FILE:
   // Doing nothing, as re-open parent region would clean up daughter 
region directories.
   break;
+case SPLIT_TABLE_REGIONS_CHECK_CLOSED_REGIONS:
+  // Doing nothing, in SPLIT_TABLE_REGION_CLOSE_PARENT_REGION,
+  // we will bring parent region online
+  break;
 case SPLIT_TABLE_REGION_CLOSE_PARENT_REGION:
   openParentRegion(env);
   break;

http://git-wip-us.apache.org/repos/asf/hbase/blob/a90a20b4/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
index 254916e..5689a35 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
+++ 

hbase git commit: HBASE-20917 MetaTableMetrics#stop references uninitialized requestsMap for non-meta region

2018-07-22 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/branch-2 dd0325a74 -> 654480326


HBASE-20917 MetaTableMetrics#stop references uninitialized requestsMap for 
non-meta region


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/65448032
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/65448032
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/65448032

Branch: refs/heads/branch-2
Commit: 654480326b9a8a5fa9d9314d1cfced6a45cfa941
Parents: dd0325a
Author: tedyu 
Authored: Sun Jul 22 20:51:58 2018 -0700
Committer: tedyu 
Committed: Sun Jul 22 20:51:58 2018 -0700

--
 .../hadoop/hbase/coprocessor/MetaTableMetrics.java   | 15 ---
 1 file changed, 8 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/65448032/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetaTableMetrics.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetaTableMetrics.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetaTableMetrics.java
index 9bf35c0..64a6288 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetaTableMetrics.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetaTableMetrics.java
@@ -309,9 +309,8 @@ public class MetaTableMetrics implements RegionCoprocessor {
   public void start(CoprocessorEnvironment env) throws IOException {
 if (env instanceof RegionCoprocessorEnvironment
 && ((RegionCoprocessorEnvironment) env).getRegionInfo().getTable() != 
null
-&& ((RegionCoprocessorEnvironment) 
env).getRegionInfo().getTable().getName() != null
-&& new String(((RegionCoprocessorEnvironment) 
env).getRegionInfo().getTable().getName(),
-  
StandardCharsets.UTF_8).equals(TableName.META_TABLE_NAME.toString())) {
+&& ((RegionCoprocessorEnvironment) env).getRegionInfo().getTable()
+  .equals(TableName.META_TABLE_NAME)) {
   regionCoprocessorEnv = (RegionCoprocessorEnvironment) env;
   observer = new ExampleRegionObserverMeta();
   requestsMap = new ConcurrentHashMap<>();
@@ -324,11 +323,13 @@ public class MetaTableMetrics implements 
RegionCoprocessor {
   }
 
   @Override
-  public void stop(CoprocessorEnvironment e) throws IOException {
+  public void stop(CoprocessorEnvironment env) throws IOException {
 // since meta region can move around, clear stale metrics when stop.
-for (String meterName : requestsMap.keySet()) {
-  MetricRegistry registry = 
regionCoprocessorEnv.getMetricRegistryForRegionServer();
-  registry.remove(meterName);
+if (requestsMap != null) {
+  for (String meterName : requestsMap.keySet()) {
+MetricRegistry registry = 
regionCoprocessorEnv.getMetricRegistryForRegionServer();
+registry.remove(meterName);
+  }
 }
   }
 



hbase git commit: HBASE-20917 MetaTableMetrics#stop references uninitialized requestsMap for non-meta region

2018-07-22 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/master 4613f3e8c -> 37de961f2


HBASE-20917 MetaTableMetrics#stop references uninitialized requestsMap for 
non-meta region


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/37de961f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/37de961f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/37de961f

Branch: refs/heads/master
Commit: 37de961f2041f55e90ff9db5fb0aa83308c92515
Parents: 4613f3e
Author: tedyu 
Authored: Sun Jul 22 20:50:51 2018 -0700
Committer: tedyu 
Committed: Sun Jul 22 20:50:51 2018 -0700

--
 .../hadoop/hbase/coprocessor/MetaTableMetrics.java   | 15 ---
 1 file changed, 8 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/37de961f/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetaTableMetrics.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetaTableMetrics.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetaTableMetrics.java
index 9bf35c0..64a6288 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetaTableMetrics.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetaTableMetrics.java
@@ -309,9 +309,8 @@ public class MetaTableMetrics implements RegionCoprocessor {
   public void start(CoprocessorEnvironment env) throws IOException {
 if (env instanceof RegionCoprocessorEnvironment
 && ((RegionCoprocessorEnvironment) env).getRegionInfo().getTable() != 
null
-&& ((RegionCoprocessorEnvironment) 
env).getRegionInfo().getTable().getName() != null
-&& new String(((RegionCoprocessorEnvironment) 
env).getRegionInfo().getTable().getName(),
-  
StandardCharsets.UTF_8).equals(TableName.META_TABLE_NAME.toString())) {
+&& ((RegionCoprocessorEnvironment) env).getRegionInfo().getTable()
+  .equals(TableName.META_TABLE_NAME)) {
   regionCoprocessorEnv = (RegionCoprocessorEnvironment) env;
   observer = new ExampleRegionObserverMeta();
   requestsMap = new ConcurrentHashMap<>();
@@ -324,11 +323,13 @@ public class MetaTableMetrics implements 
RegionCoprocessor {
   }
 
   @Override
-  public void stop(CoprocessorEnvironment e) throws IOException {
+  public void stop(CoprocessorEnvironment env) throws IOException {
 // since meta region can move around, clear stale metrics when stop.
-for (String meterName : requestsMap.keySet()) {
-  MetricRegistry registry = 
regionCoprocessorEnv.getMetricRegistryForRegionServer();
-  registry.remove(meterName);
+if (requestsMap != null) {
+  for (String meterName : requestsMap.keySet()) {
+MetricRegistry registry = 
regionCoprocessorEnv.getMetricRegistryForRegionServer();
+registry.remove(meterName);
+  }
 }
   }
 



hbase git commit: HBASE-20559 Backport HBASE-18083 (Make large/small file clean thread number configurable in HFileCleaner) to branch-1

2018-07-22 Thread liyu
Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 4e30572e7 -> 6d6b5b777


HBASE-20559 Backport HBASE-18083 (Make large/small file clean thread number 
configurable in HFileCleaner) to branch-1

The last port commit of HBASE-20555

Signed-off-by: Yu Li 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6d6b5b77
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6d6b5b77
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6d6b5b77

Branch: refs/heads/branch-1.4
Commit: 6d6b5b7773bbefe6d8136aea4f2bc5e3b8e673a4
Parents: 4e30572
Author: TAK LON WU 
Authored: Mon Jul 23 11:23:57 2018 +0800
Committer: Yu Li 
Committed: Mon Jul 23 11:36:09 2018 +0800

--
 .../hbase/master/cleaner/HFileCleaner.java  | 154 +--
 .../hbase/master/cleaner/TestHFileCleaner.java  |  13 +-
 2 files changed, 120 insertions(+), 47 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6d6b5b77/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
index 70548b4..8f0b4be 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
@@ -23,6 +23,7 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.atomic.AtomicLong;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -66,6 +67,16 @@ public class HFileCleaner extends 
CleanerChore impleme
   "hbase.regionserver.hfilecleaner.small.queue.size";
   public final static int DEFAULT_SMALL_HFILE_QUEUE_INIT_SIZE = 10240;
 
+  // Configuration key for large file delete thread number
+  public final static String LARGE_HFILE_DELETE_THREAD_NUMBER =
+  "hbase.regionserver.hfilecleaner.large.thread.count";
+  public final static int DEFAULT_LARGE_HFILE_DELETE_THREAD_NUMBER = 1;
+
+  // Configuration key for small file delete thread number
+  public final static String SMALL_HFILE_DELETE_THREAD_NUMBER =
+  "hbase.regionserver.hfilecleaner.small.thread.count";
+  public final static int DEFAULT_SMALL_HFILE_DELETE_THREAD_NUMBER = 1;
+
   private static final Log LOG = LogFactory.getLog(HFileCleaner.class);
 
   StealJobQueue largeFileQueue;
@@ -73,11 +84,13 @@ public class HFileCleaner extends 
CleanerChore impleme
   private int throttlePoint;
   private int largeQueueInitSize;
   private int smallQueueInitSize;
+  private int largeFileDeleteThreadNumber;
+  private int smallFileDeleteThreadNumber;
   private List threads = new ArrayList();
   private boolean running;
 
-  private long deletedLargeFiles = 0L;
-  private long deletedSmallFiles = 0L;
+  private AtomicLong deletedLargeFiles = new AtomicLong();
+  private AtomicLong deletedSmallFiles = new AtomicLong();
 
   /**
* @param period the period of time to sleep between each run
@@ -99,6 +112,10 @@ public class HFileCleaner extends 
CleanerChore impleme
 conf.getInt(SMALL_HFILE_QUEUE_INIT_SIZE, 
DEFAULT_SMALL_HFILE_QUEUE_INIT_SIZE);
 largeFileQueue = new StealJobQueue<>(largeQueueInitSize, 
smallQueueInitSize);
 smallFileQueue = largeFileQueue.getStealFromQueue();
+largeFileDeleteThreadNumber =
+conf.getInt(LARGE_HFILE_DELETE_THREAD_NUMBER, 
DEFAULT_LARGE_HFILE_DELETE_THREAD_NUMBER);
+smallFileDeleteThreadNumber =
+conf.getInt(SMALL_HFILE_DELETE_THREAD_NUMBER, 
DEFAULT_SMALL_HFILE_DELETE_THREAD_NUMBER);
 startHFileDeleteThreads();
   }
 
@@ -182,30 +199,34 @@ public class HFileCleaner extends 
CleanerChore impleme
 final String n = Thread.currentThread().getName();
 running = true;
 // start thread for large file deletion
-Thread large = new Thread() {
-  @Override
-  public void run() {
-consumerLoop(largeFileQueue);
-  }
-};
-large.setDaemon(true);
-large.setName(n + "-HFileCleaner.large-" + System.currentTimeMillis());
-large.start();
-LOG.debug("Starting hfile cleaner for large files: " + large.getName());
-threads.add(large);
+for (int i = 0; i < largeFileDeleteThreadNumber; i++) {
+  Thread large = new Thread() {
+@Override
+public void run() {
+  consumerLoop(largeFileQueue);
+}
+  };
+  large.setDaemon(true);
+  large.setName(n + "-HFileCleaner.large." + i + "-" + 
System.currentTimeMillis());
+  large.start();
+  LOG.debug("St

hbase git commit: HBASE-20559 Backport HBASE-18083 (Make large/small file clean thread number configurable in HFileCleaner) to branch-1

2018-07-22 Thread liyu
Repository: hbase
Updated Branches:
  refs/heads/branch-1 780724b15 -> 896b69a0f


HBASE-20559 Backport HBASE-18083 (Make large/small file clean thread number 
configurable in HFileCleaner) to branch-1

The last port commit of HBASE-20555

Signed-off-by: Yu Li 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/896b69a0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/896b69a0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/896b69a0

Branch: refs/heads/branch-1
Commit: 896b69a0fc6c3164d33a1269e0d7ac87d89b3d90
Parents: 780724b
Author: TAK LON WU 
Authored: Mon Jul 23 11:23:57 2018 +0800
Committer: Yu Li 
Committed: Mon Jul 23 11:36:47 2018 +0800

--
 .../hbase/master/cleaner/HFileCleaner.java  | 154 +--
 .../hbase/master/cleaner/TestHFileCleaner.java  |  13 +-
 2 files changed, 120 insertions(+), 47 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/896b69a0/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
index 70548b4..8f0b4be 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
@@ -23,6 +23,7 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.atomic.AtomicLong;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -66,6 +67,16 @@ public class HFileCleaner extends 
CleanerChore impleme
   "hbase.regionserver.hfilecleaner.small.queue.size";
   public final static int DEFAULT_SMALL_HFILE_QUEUE_INIT_SIZE = 10240;
 
+  // Configuration key for large file delete thread number
+  public final static String LARGE_HFILE_DELETE_THREAD_NUMBER =
+  "hbase.regionserver.hfilecleaner.large.thread.count";
+  public final static int DEFAULT_LARGE_HFILE_DELETE_THREAD_NUMBER = 1;
+
+  // Configuration key for small file delete thread number
+  public final static String SMALL_HFILE_DELETE_THREAD_NUMBER =
+  "hbase.regionserver.hfilecleaner.small.thread.count";
+  public final static int DEFAULT_SMALL_HFILE_DELETE_THREAD_NUMBER = 1;
+
   private static final Log LOG = LogFactory.getLog(HFileCleaner.class);
 
   StealJobQueue largeFileQueue;
@@ -73,11 +84,13 @@ public class HFileCleaner extends 
CleanerChore impleme
   private int throttlePoint;
   private int largeQueueInitSize;
   private int smallQueueInitSize;
+  private int largeFileDeleteThreadNumber;
+  private int smallFileDeleteThreadNumber;
   private List threads = new ArrayList();
   private boolean running;
 
-  private long deletedLargeFiles = 0L;
-  private long deletedSmallFiles = 0L;
+  private AtomicLong deletedLargeFiles = new AtomicLong();
+  private AtomicLong deletedSmallFiles = new AtomicLong();
 
   /**
* @param period the period of time to sleep between each run
@@ -99,6 +112,10 @@ public class HFileCleaner extends 
CleanerChore impleme
 conf.getInt(SMALL_HFILE_QUEUE_INIT_SIZE, 
DEFAULT_SMALL_HFILE_QUEUE_INIT_SIZE);
 largeFileQueue = new StealJobQueue<>(largeQueueInitSize, 
smallQueueInitSize);
 smallFileQueue = largeFileQueue.getStealFromQueue();
+largeFileDeleteThreadNumber =
+conf.getInt(LARGE_HFILE_DELETE_THREAD_NUMBER, 
DEFAULT_LARGE_HFILE_DELETE_THREAD_NUMBER);
+smallFileDeleteThreadNumber =
+conf.getInt(SMALL_HFILE_DELETE_THREAD_NUMBER, 
DEFAULT_SMALL_HFILE_DELETE_THREAD_NUMBER);
 startHFileDeleteThreads();
   }
 
@@ -182,30 +199,34 @@ public class HFileCleaner extends 
CleanerChore impleme
 final String n = Thread.currentThread().getName();
 running = true;
 // start thread for large file deletion
-Thread large = new Thread() {
-  @Override
-  public void run() {
-consumerLoop(largeFileQueue);
-  }
-};
-large.setDaemon(true);
-large.setName(n + "-HFileCleaner.large-" + System.currentTimeMillis());
-large.start();
-LOG.debug("Starting hfile cleaner for large files: " + large.getName());
-threads.add(large);
+for (int i = 0; i < largeFileDeleteThreadNumber; i++) {
+  Thread large = new Thread() {
+@Override
+public void run() {
+  consumerLoop(largeFileQueue);
+}
+  };
+  large.setDaemon(true);
+  large.setName(n + "-HFileCleaner.large." + i + "-" + 
System.currentTimeMillis());
+  large.start();
+  LOG.debug("Starti

hbase git commit: HBASE-20401 Make MAX_WAIT and waitIfNotFinished in CleanerContext configurable (Contributed by Stephen Wu)

2018-07-22 Thread reidchan
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 dc19f0b03 -> 8363b2f10


HBASE-20401 Make MAX_WAIT and waitIfNotFinished in CleanerContext configurable 
(Contributed by Stephen Wu)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8363b2f1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8363b2f1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8363b2f1

Branch: refs/heads/branch-2.0
Commit: 8363b2f1071947f9899bbc966196ebce06c54f15
Parents: dc19f0b
Author: Reid Chan 
Authored: Mon Jul 23 10:29:38 2018 +0800
Committer: Reid Chan 
Committed: Mon Jul 23 10:34:18 2018 +0800

--
 .../hbase/master/cleaner/HFileCleaner.java  | 65 
 .../hadoop/hbase/master/cleaner/LogCleaner.java | 65 +++-
 .../hbase/master/cleaner/TestHFileCleaner.java  | 12 
 .../hbase/master/cleaner/TestLogsCleaner.java   | 22 ++-
 src/main/asciidoc/_chapters/configuration.adoc  |  4 ++
 5 files changed, 139 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8363b2f1/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
index a5e87ae..47b0228 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
@@ -23,6 +23,7 @@ import java.util.Comparator;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.hadoop.conf.Configuration;
@@ -76,6 +77,16 @@ public class HFileCleaner extends 
CleanerChore {
   "hbase.regionserver.hfilecleaner.small.thread.count";
   public final static int DEFAULT_SMALL_HFILE_DELETE_THREAD_NUMBER = 1;
 
+  public static final String HFILE_DELETE_THREAD_TIMEOUT_MSEC =
+  "hbase.regionserver.hfilecleaner.thread.timeout.msec";
+  @VisibleForTesting
+  static final long DEFAULT_HFILE_DELETE_THREAD_TIMEOUT_MSEC = 60 * 1000L;
+
+  public static final String HFILE_DELETE_THREAD_CHECK_INTERVAL_MSEC =
+  "hbase.regionserver.hfilecleaner.thread.check.interval.msec";
+  @VisibleForTesting
+  static final long DEFAULT_HFILE_DELETE_THREAD_CHECK_INTERVAL_MSEC = 1000L;
+
   private static final Logger LOG = 
LoggerFactory.getLogger(HFileCleaner.class);
 
   StealJobQueue largeFileQueue;
@@ -85,6 +96,8 @@ public class HFileCleaner extends 
CleanerChore {
   private int smallQueueInitSize;
   private int largeFileDeleteThreadNumber;
   private int smallFileDeleteThreadNumber;
+  private long cleanerThreadTimeoutMsec;
+  private long cleanerThreadCheckIntervalMsec;
   private List threads = new ArrayList();
   private boolean running;
 
@@ -115,6 +128,11 @@ public class HFileCleaner extends 
CleanerChore {
 conf.getInt(LARGE_HFILE_DELETE_THREAD_NUMBER, 
DEFAULT_LARGE_HFILE_DELETE_THREAD_NUMBER);
 smallFileDeleteThreadNumber =
 conf.getInt(SMALL_HFILE_DELETE_THREAD_NUMBER, 
DEFAULT_SMALL_HFILE_DELETE_THREAD_NUMBER);
+cleanerThreadTimeoutMsec =
+conf.getLong(HFILE_DELETE_THREAD_TIMEOUT_MSEC, 
DEFAULT_HFILE_DELETE_THREAD_TIMEOUT_MSEC);
+cleanerThreadCheckIntervalMsec =
+conf.getLong(HFILE_DELETE_THREAD_CHECK_INTERVAL_MSEC,
+DEFAULT_HFILE_DELETE_THREAD_CHECK_INTERVAL_MSEC);
 startHFileDeleteThreads();
   }
 
@@ -146,7 +164,7 @@ public class HFileCleaner extends 
CleanerChore {
 }
 // wait for each submitted task to finish
 for (HFileDeleteTask task : tasks) {
-  if (task.getResult()) {
+  if (task.getResult(cleanerThreadCheckIntervalMsec)) {
 deletedFiles++;
   }
 }
@@ -159,7 +177,7 @@ public class HFileCleaner extends 
CleanerChore {
* @return HFileDeleteTask to track progress
*/
   private HFileDeleteTask deleteFile(FileStatus file) {
-HFileDeleteTask task = new HFileDeleteTask(file);
+HFileDeleteTask task = new HFileDeleteTask(file, cleanerThreadTimeoutMsec);
 boolean enqueued = dispatch(task);
 return enqueued ? task : null;
   }
@@ -300,17 +318,17 @@ public class HFileCleaner extends 
CleanerChore {
   };
 
   private static final class HFileDeleteTask {
-private static final long MAX_WAIT = 60 * 1000L;
-private static final long WAIT_UNIT = 1000L;
 
 boolean done = false;
 boolean result;
 final Path filePath;
 final long fileLength;
+final long timeoutMsec;
 
-public HFileDeleteTask(FileStatus file) {
+public HFil

hbase git commit: HBASE-20401 Make MAX_WAIT and waitIfNotFinished in CleanerContext configurable (Contributed by Stephen Wu)

2018-07-22 Thread reidchan
Repository: hbase
Updated Branches:
  refs/heads/branch-2.1 46e5baf67 -> 9d481f1fa


HBASE-20401 Make MAX_WAIT and waitIfNotFinished in CleanerContext configurable 
(Contributed by Stephen Wu)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9d481f1f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9d481f1f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9d481f1f

Branch: refs/heads/branch-2.1
Commit: 9d481f1faa7a40110123af3c915c2b7034d2ca21
Parents: 46e5baf
Author: Reid Chan 
Authored: Mon Jul 23 10:29:38 2018 +0800
Committer: Reid Chan 
Committed: Mon Jul 23 10:33:26 2018 +0800

--
 .../hbase/master/cleaner/HFileCleaner.java  | 65 
 .../hadoop/hbase/master/cleaner/LogCleaner.java | 65 +++-
 .../hbase/master/cleaner/TestHFileCleaner.java  | 12 
 .../hbase/master/cleaner/TestLogsCleaner.java   | 22 ++-
 src/main/asciidoc/_chapters/configuration.adoc  |  4 ++
 5 files changed, 139 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9d481f1f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
index a5e87ae..47b0228 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
@@ -23,6 +23,7 @@ import java.util.Comparator;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.hadoop.conf.Configuration;
@@ -76,6 +77,16 @@ public class HFileCleaner extends 
CleanerChore {
   "hbase.regionserver.hfilecleaner.small.thread.count";
   public final static int DEFAULT_SMALL_HFILE_DELETE_THREAD_NUMBER = 1;
 
+  public static final String HFILE_DELETE_THREAD_TIMEOUT_MSEC =
+  "hbase.regionserver.hfilecleaner.thread.timeout.msec";
+  @VisibleForTesting
+  static final long DEFAULT_HFILE_DELETE_THREAD_TIMEOUT_MSEC = 60 * 1000L;
+
+  public static final String HFILE_DELETE_THREAD_CHECK_INTERVAL_MSEC =
+  "hbase.regionserver.hfilecleaner.thread.check.interval.msec";
+  @VisibleForTesting
+  static final long DEFAULT_HFILE_DELETE_THREAD_CHECK_INTERVAL_MSEC = 1000L;
+
   private static final Logger LOG = 
LoggerFactory.getLogger(HFileCleaner.class);
 
   StealJobQueue largeFileQueue;
@@ -85,6 +96,8 @@ public class HFileCleaner extends 
CleanerChore {
   private int smallQueueInitSize;
   private int largeFileDeleteThreadNumber;
   private int smallFileDeleteThreadNumber;
+  private long cleanerThreadTimeoutMsec;
+  private long cleanerThreadCheckIntervalMsec;
   private List threads = new ArrayList();
   private boolean running;
 
@@ -115,6 +128,11 @@ public class HFileCleaner extends 
CleanerChore {
 conf.getInt(LARGE_HFILE_DELETE_THREAD_NUMBER, 
DEFAULT_LARGE_HFILE_DELETE_THREAD_NUMBER);
 smallFileDeleteThreadNumber =
 conf.getInt(SMALL_HFILE_DELETE_THREAD_NUMBER, 
DEFAULT_SMALL_HFILE_DELETE_THREAD_NUMBER);
+cleanerThreadTimeoutMsec =
+conf.getLong(HFILE_DELETE_THREAD_TIMEOUT_MSEC, 
DEFAULT_HFILE_DELETE_THREAD_TIMEOUT_MSEC);
+cleanerThreadCheckIntervalMsec =
+conf.getLong(HFILE_DELETE_THREAD_CHECK_INTERVAL_MSEC,
+DEFAULT_HFILE_DELETE_THREAD_CHECK_INTERVAL_MSEC);
 startHFileDeleteThreads();
   }
 
@@ -146,7 +164,7 @@ public class HFileCleaner extends 
CleanerChore {
 }
 // wait for each submitted task to finish
 for (HFileDeleteTask task : tasks) {
-  if (task.getResult()) {
+  if (task.getResult(cleanerThreadCheckIntervalMsec)) {
 deletedFiles++;
   }
 }
@@ -159,7 +177,7 @@ public class HFileCleaner extends 
CleanerChore {
* @return HFileDeleteTask to track progress
*/
   private HFileDeleteTask deleteFile(FileStatus file) {
-HFileDeleteTask task = new HFileDeleteTask(file);
+HFileDeleteTask task = new HFileDeleteTask(file, cleanerThreadTimeoutMsec);
 boolean enqueued = dispatch(task);
 return enqueued ? task : null;
   }
@@ -300,17 +318,17 @@ public class HFileCleaner extends 
CleanerChore {
   };
 
   private static final class HFileDeleteTask {
-private static final long MAX_WAIT = 60 * 1000L;
-private static final long WAIT_UNIT = 1000L;
 
 boolean done = false;
 boolean result;
 final Path filePath;
 final long fileLength;
+final long timeoutMsec;
 
-public HFileDeleteTask(FileStatus file) {
+public HFil

hbase git commit: HBASE-20401 Make MAX_WAIT and waitIfNotFinished in CleanerContext configurable (Contributed by Stephen Wu)

2018-07-22 Thread reidchan
Repository: hbase
Updated Branches:
  refs/heads/branch-2 b15efbf8e -> dd0325a74


HBASE-20401 Make MAX_WAIT and waitIfNotFinished in CleanerContext configurable 
(Contributed by Stephen Wu)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/dd0325a7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/dd0325a7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/dd0325a7

Branch: refs/heads/branch-2
Commit: dd0325a740cb67df6b1b50c0fc457b7fa1bd23de
Parents: b15efbf
Author: Reid Chan 
Authored: Mon Jul 23 10:29:38 2018 +0800
Committer: Reid Chan 
Committed: Mon Jul 23 10:32:17 2018 +0800

--
 .../hbase/master/cleaner/HFileCleaner.java  | 65 
 .../hadoop/hbase/master/cleaner/LogCleaner.java | 65 +++-
 .../hbase/master/cleaner/TestHFileCleaner.java  | 12 
 .../hbase/master/cleaner/TestLogsCleaner.java   | 22 ++-
 src/main/asciidoc/_chapters/configuration.adoc  |  4 ++
 5 files changed, 139 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/dd0325a7/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
index a5e87ae..47b0228 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
@@ -23,6 +23,7 @@ import java.util.Comparator;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.hadoop.conf.Configuration;
@@ -76,6 +77,16 @@ public class HFileCleaner extends 
CleanerChore {
   "hbase.regionserver.hfilecleaner.small.thread.count";
   public final static int DEFAULT_SMALL_HFILE_DELETE_THREAD_NUMBER = 1;
 
+  public static final String HFILE_DELETE_THREAD_TIMEOUT_MSEC =
+  "hbase.regionserver.hfilecleaner.thread.timeout.msec";
+  @VisibleForTesting
+  static final long DEFAULT_HFILE_DELETE_THREAD_TIMEOUT_MSEC = 60 * 1000L;
+
+  public static final String HFILE_DELETE_THREAD_CHECK_INTERVAL_MSEC =
+  "hbase.regionserver.hfilecleaner.thread.check.interval.msec";
+  @VisibleForTesting
+  static final long DEFAULT_HFILE_DELETE_THREAD_CHECK_INTERVAL_MSEC = 1000L;
+
   private static final Logger LOG = 
LoggerFactory.getLogger(HFileCleaner.class);
 
   StealJobQueue largeFileQueue;
@@ -85,6 +96,8 @@ public class HFileCleaner extends 
CleanerChore {
   private int smallQueueInitSize;
   private int largeFileDeleteThreadNumber;
   private int smallFileDeleteThreadNumber;
+  private long cleanerThreadTimeoutMsec;
+  private long cleanerThreadCheckIntervalMsec;
   private List threads = new ArrayList();
   private boolean running;
 
@@ -115,6 +128,11 @@ public class HFileCleaner extends 
CleanerChore {
 conf.getInt(LARGE_HFILE_DELETE_THREAD_NUMBER, 
DEFAULT_LARGE_HFILE_DELETE_THREAD_NUMBER);
 smallFileDeleteThreadNumber =
 conf.getInt(SMALL_HFILE_DELETE_THREAD_NUMBER, 
DEFAULT_SMALL_HFILE_DELETE_THREAD_NUMBER);
+cleanerThreadTimeoutMsec =
+conf.getLong(HFILE_DELETE_THREAD_TIMEOUT_MSEC, 
DEFAULT_HFILE_DELETE_THREAD_TIMEOUT_MSEC);
+cleanerThreadCheckIntervalMsec =
+conf.getLong(HFILE_DELETE_THREAD_CHECK_INTERVAL_MSEC,
+DEFAULT_HFILE_DELETE_THREAD_CHECK_INTERVAL_MSEC);
 startHFileDeleteThreads();
   }
 
@@ -146,7 +164,7 @@ public class HFileCleaner extends 
CleanerChore {
 }
 // wait for each submitted task to finish
 for (HFileDeleteTask task : tasks) {
-  if (task.getResult()) {
+  if (task.getResult(cleanerThreadCheckIntervalMsec)) {
 deletedFiles++;
   }
 }
@@ -159,7 +177,7 @@ public class HFileCleaner extends 
CleanerChore {
* @return HFileDeleteTask to track progress
*/
   private HFileDeleteTask deleteFile(FileStatus file) {
-HFileDeleteTask task = new HFileDeleteTask(file);
+HFileDeleteTask task = new HFileDeleteTask(file, cleanerThreadTimeoutMsec);
 boolean enqueued = dispatch(task);
 return enqueued ? task : null;
   }
@@ -300,17 +318,17 @@ public class HFileCleaner extends 
CleanerChore {
   };
 
   private static final class HFileDeleteTask {
-private static final long MAX_WAIT = 60 * 1000L;
-private static final long WAIT_UNIT = 1000L;
 
 boolean done = false;
 boolean result;
 final Path filePath;
 final long fileLength;
+final long timeoutMsec;
 
-public HFileDeleteTask(FileStatus file) {
+public HFileDel

hbase git commit: HBASE-20401 Make MAX_WAIT and waitIfNotFinished in CleanerContext configurable (Contributed by Stephen Wu)

2018-07-22 Thread reidchan
Repository: hbase
Updated Branches:
  refs/heads/master b4759ce6e -> 4613f3e8c


HBASE-20401 Make MAX_WAIT and waitIfNotFinished in CleanerContext configurable 
(Contributed by Stephen Wu)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4613f3e8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4613f3e8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4613f3e8

Branch: refs/heads/master
Commit: 4613f3e8c740e881505fcd2c661a0e28462eb459
Parents: b4759ce
Author: Reid Chan 
Authored: Mon Jul 23 10:29:38 2018 +0800
Committer: Reid Chan 
Committed: Mon Jul 23 10:30:01 2018 +0800

--
 .../hbase/master/cleaner/HFileCleaner.java  | 65 
 .../hadoop/hbase/master/cleaner/LogCleaner.java | 65 +++-
 .../hbase/master/cleaner/TestHFileCleaner.java  | 12 
 .../hbase/master/cleaner/TestLogsCleaner.java   | 22 ++-
 src/main/asciidoc/_chapters/configuration.adoc  |  4 ++
 5 files changed, 139 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4613f3e8/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
index a5e87ae..47b0228 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
@@ -23,6 +23,7 @@ import java.util.Comparator;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.hadoop.conf.Configuration;
@@ -76,6 +77,16 @@ public class HFileCleaner extends 
CleanerChore {
   "hbase.regionserver.hfilecleaner.small.thread.count";
   public final static int DEFAULT_SMALL_HFILE_DELETE_THREAD_NUMBER = 1;
 
+  public static final String HFILE_DELETE_THREAD_TIMEOUT_MSEC =
+  "hbase.regionserver.hfilecleaner.thread.timeout.msec";
+  @VisibleForTesting
+  static final long DEFAULT_HFILE_DELETE_THREAD_TIMEOUT_MSEC = 60 * 1000L;
+
+  public static final String HFILE_DELETE_THREAD_CHECK_INTERVAL_MSEC =
+  "hbase.regionserver.hfilecleaner.thread.check.interval.msec";
+  @VisibleForTesting
+  static final long DEFAULT_HFILE_DELETE_THREAD_CHECK_INTERVAL_MSEC = 1000L;
+
   private static final Logger LOG = 
LoggerFactory.getLogger(HFileCleaner.class);
 
   StealJobQueue largeFileQueue;
@@ -85,6 +96,8 @@ public class HFileCleaner extends 
CleanerChore {
   private int smallQueueInitSize;
   private int largeFileDeleteThreadNumber;
   private int smallFileDeleteThreadNumber;
+  private long cleanerThreadTimeoutMsec;
+  private long cleanerThreadCheckIntervalMsec;
   private List threads = new ArrayList();
   private boolean running;
 
@@ -115,6 +128,11 @@ public class HFileCleaner extends 
CleanerChore {
 conf.getInt(LARGE_HFILE_DELETE_THREAD_NUMBER, 
DEFAULT_LARGE_HFILE_DELETE_THREAD_NUMBER);
 smallFileDeleteThreadNumber =
 conf.getInt(SMALL_HFILE_DELETE_THREAD_NUMBER, 
DEFAULT_SMALL_HFILE_DELETE_THREAD_NUMBER);
+cleanerThreadTimeoutMsec =
+conf.getLong(HFILE_DELETE_THREAD_TIMEOUT_MSEC, 
DEFAULT_HFILE_DELETE_THREAD_TIMEOUT_MSEC);
+cleanerThreadCheckIntervalMsec =
+conf.getLong(HFILE_DELETE_THREAD_CHECK_INTERVAL_MSEC,
+DEFAULT_HFILE_DELETE_THREAD_CHECK_INTERVAL_MSEC);
 startHFileDeleteThreads();
   }
 
@@ -146,7 +164,7 @@ public class HFileCleaner extends 
CleanerChore {
 }
 // wait for each submitted task to finish
 for (HFileDeleteTask task : tasks) {
-  if (task.getResult()) {
+  if (task.getResult(cleanerThreadCheckIntervalMsec)) {
 deletedFiles++;
   }
 }
@@ -159,7 +177,7 @@ public class HFileCleaner extends 
CleanerChore {
* @return HFileDeleteTask to track progress
*/
   private HFileDeleteTask deleteFile(FileStatus file) {
-HFileDeleteTask task = new HFileDeleteTask(file);
+HFileDeleteTask task = new HFileDeleteTask(file, cleanerThreadTimeoutMsec);
 boolean enqueued = dispatch(task);
 return enqueued ? task : null;
   }
@@ -300,17 +318,17 @@ public class HFileCleaner extends 
CleanerChore {
   };
 
   private static final class HFileDeleteTask {
-private static final long MAX_WAIT = 60 * 1000L;
-private static final long WAIT_UNIT = 1000L;
 
 boolean done = false;
 boolean result;
 final Path filePath;
 final long fileLength;
+final long timeoutMsec;
 
-public HFileDeleteTask(FileStatus file) {
+public HFileDeleteT

[46/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionFailedOpen.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionFailedOpen.html
 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionFailedOpen.html
index 60d53f4..f0b1205 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionFailedOpen.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionFailedOpen.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static final class RegionStates.RegionFailedOpen
+public static final class RegionStates.RegionFailedOpen
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 
 
@@ -229,7 +229,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 regionNode
-private final RegionStates.RegionStateNode regionNode
+private final RegionStates.RegionStateNode regionNode
 
 
 
@@ -238,7 +238,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 exception
-private volatile https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true";
 title="class or interface in java.lang">Exception exception
+private volatile https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true";
 title="class or interface in java.lang">Exception exception
 
 
 
@@ -247,7 +247,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 retries
-private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true";
 title="class or interface in java.util.concurrent.atomic">AtomicInteger retries
+private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true";
 title="class or interface in java.util.concurrent.atomic">AtomicInteger retries
 
 
 
@@ -264,7 +264,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 RegionFailedOpen
-public RegionFailedOpen(RegionStates.RegionStateNode regionNode)
+public RegionFailedOpen(RegionStates.RegionStateNode regionNode)
 
 
 
@@ -281,7 +281,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getRegionStateNode
-public RegionStates.RegionStateNode getRegionStateNode()
+public RegionStates.RegionStateNode getRegionStateNode()
 
 
 
@@ -290,7 +290,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getRegionInfo
-public RegionInfo getRegionInfo()
+public RegionInfo getRegionInfo()
 
 
 
@@ -299,7 +299,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 incrementAndGetRetries
-public int incrementAndGetRetries()
+public int incrementAndGetRetries()
 
 
 
@@ -308,7 +308,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getRetries
-public int getRetries()
+public int getRetries()
 
 
 
@@ -317,7 +317,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 setException
-public void setException(https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true";
 title="class or interface in java.lang">Exception exception)
+public void setException(https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true";
 title="class or interface in java.lang">Exception exception)
 
 
 
@@ -326,7 +326,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getException
-public https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true";
 title="class or interface in java.lang">Exception getException()
+public https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true";
 title="class or interface in java.lang">Exception getException()
 
 
 



[36/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateNode.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateNode.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateNode.html
index 5f41fe7..c8158b5 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateNode.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateNode.html
@@ -524,636 +524,639 @@
 516}
 517  }
 518
-519  ArrayList 
getTableRegionStateNodes(final TableName tableName) {
-520final 
ArrayList regions = new 
ArrayList();
-521for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
-522  if 
(!node.getTable().equals(tableName)) break;
-523  regions.add(node);
-524}
-525return regions;
-526  }
-527
-528  ArrayList 
getTableRegionStates(final TableName tableName) {
-529final ArrayList 
regions = new ArrayList();
-530for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
-531  if 
(!node.getTable().equals(tableName)) break;
-532  
regions.add(node.toRegionState());
-533}
-534return regions;
-535  }
-536
-537  ArrayList 
getTableRegionsInfo(final TableName tableName) {
-538final ArrayList 
regions = new ArrayList();
-539for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
-540  if 
(!node.getTable().equals(tableName)) break;
-541  
regions.add(node.getRegionInfo());
-542}
-543return regions;
-544  }
-545
-546  Collection 
getRegionStateNodes() {
-547return regionsMap.values();
+519  public void deleteRegions(final 
List regionInfos) {
+520
regionInfos.forEach(this::deleteRegion);
+521  }
+522
+523  ArrayList 
getTableRegionStateNodes(final TableName tableName) {
+524final 
ArrayList regions = new 
ArrayList();
+525for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
+526  if 
(!node.getTable().equals(tableName)) break;
+527  regions.add(node);
+528}
+529return regions;
+530  }
+531
+532  ArrayList 
getTableRegionStates(final TableName tableName) {
+533final ArrayList 
regions = new ArrayList();
+534for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
+535  if 
(!node.getTable().equals(tableName)) break;
+536  
regions.add(node.toRegionState());
+537}
+538return regions;
+539  }
+540
+541  ArrayList 
getTableRegionsInfo(final TableName tableName) {
+542final ArrayList 
regions = new ArrayList();
+543for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
+544  if 
(!node.getTable().equals(tableName)) break;
+545  
regions.add(node.getRegionInfo());
+546}
+547return regions;
 548  }
 549
-550  public ArrayList 
getRegionStates() {
-551final ArrayList 
regions = new ArrayList(regionsMap.size());
-552for (RegionStateNode node: 
regionsMap.values()) {
-553  
regions.add(node.toRegionState());
-554}
-555return regions;
-556  }
-557
-558  // 
==
-559  //  RegionState helpers
-560  // 
==
-561  public RegionState getRegionState(final 
RegionInfo regionInfo) {
-562RegionStateNode regionStateNode = 
getRegionStateNode(regionInfo);
-563return regionStateNode == null ? null 
: regionStateNode.toRegionState();
-564  }
-565
-566  public RegionState getRegionState(final 
String encodedRegionName) {
-567// TODO: Need a map  but it is just dispatch merge...
-568for (RegionStateNode node: 
regionsMap.values()) {
-569  if 
(node.getRegionInfo().getEncodedName().equals(encodedRegionName)) {
-570return node.toRegionState();
-571  }
-572}
-573return null;
-574  }
-575
-576  // 

-577  //  TODO: helpers
-578  // 

-579  public boolean 
hasTableRegionStates(final TableName tableName) {
-580// TODO
-581return 
!getTableRegionStates(tableName).isEmpty();
-582  }
-583
-584  /**
-585   * @return Return online regions of 
table; does not include OFFLINE or SPLITTING regions.
-586   */
-587  public List

[24/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LoadCandidateGenerator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LoadCandidateGenerator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LoadCandidateGenerator.html
index 233dba3..91b9055 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LoadCandidateGenerator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LoadCandidateGenerator.html
@@ -540,1205 +540,1204 @@
 532  
sm.getRegionMetrics().forEach((byte[] regionName, RegionMetrics rm) -> {
 533Deque 
rLoads = oldLoads.get(Bytes.toString(regionName));
 534if (rLoads == null) {
-535  // There was nothing there
-536  rLoads = new 
ArrayDeque<>();
-537} else if (rLoads.size() >= 
numRegionLoadsToRemember) {
-538  rLoads.remove();
-539}
-540rLoads.add(new 
BalancerRegionLoad(rm));
-541
loads.put(Bytes.toString(regionName), rLoads);
-542  });
-543});
-544
-545for(CostFromRegionLoadFunction cost : 
regionLoadFunctions) {
-546  cost.setLoads(loads);
-547}
-548  }
-549
-550  protected void initCosts(Cluster 
cluster) {
-551for (CostFunction c:costFunctions) 
{
-552  c.init(cluster);
-553}
-554  }
-555
-556  protected void 
updateCostsWithAction(Cluster cluster, Action action) {
-557for (CostFunction c : costFunctions) 
{
-558  c.postAction(action);
-559}
-560  }
-561
-562  /**
-563   * Get the names of the cost 
functions
-564   */
-565  public String[] getCostFunctionNames() 
{
-566if (costFunctions == null) return 
null;
-567String[] ret = new 
String[costFunctions.length];
-568for (int i = 0; i < 
costFunctions.length; i++) {
-569  CostFunction c = 
costFunctions[i];
-570  ret[i] = 
c.getClass().getSimpleName();
-571}
-572
-573return ret;
-574  }
-575
-576  /**
-577   * This is the main cost function.  It 
will compute a cost associated with a proposed cluster
-578   * state.  All different costs will be 
combined with their multipliers to produce a double cost.
-579   *
-580   * @param cluster The state of the 
cluster
-581   * @param previousCost the previous 
cost. This is used as an early out.
-582   * @return a double of a cost 
associated with the proposed cluster state.  This cost is an
-583   * aggregate of all individual 
cost functions.
-584   */
-585  protected double computeCost(Cluster 
cluster, double previousCost) {
-586double total = 0;
-587
-588for (int i = 0; i < 
costFunctions.length; i++) {
-589  CostFunction c = 
costFunctions[i];
-590  this.tempFunctionCosts[i] = 0.0;
-591
-592  if (c.getMultiplier() <= 0) {
-593continue;
-594  }
-595
-596  Float multiplier = 
c.getMultiplier();
-597  Double cost = c.cost();
-598
-599  this.tempFunctionCosts[i] = 
multiplier*cost;
-600  total += 
this.tempFunctionCosts[i];
-601
-602  if (total > previousCost) {
-603break;
-604  }
-605}
-606
-607return total;
-608  }
-609
-610  /** Generates a candidate action to be 
applied to the cluster for cost function search */
-611  abstract static class 
CandidateGenerator {
-612abstract Cluster.Action 
generate(Cluster cluster);
-613
-614/**
-615 * From a list of regions pick a 
random one. Null can be returned which
-616 * {@link 
StochasticLoadBalancer#balanceCluster(Map)} recognize as signal to try a region 
move
-617 * rather than swap.
-618 *
-619 * @param clusterThe state of 
the cluster
-620 * @param server index of the 
server
-621 * @param chanceOfNoSwap Chance that 
this will decide to try a move rather
-622 *   than a 
swap.
-623 * @return a random {@link 
RegionInfo} or null if an asymmetrical move is
-624 * suggested.
-625 */
-626protected int 
pickRandomRegion(Cluster cluster, int server, double chanceOfNoSwap) {
-627  // Check to see if this is just a 
move.
-628  if 
(cluster.regionsPerServer[server].length == 0 || RANDOM.nextFloat() < 
chanceOfNoSwap) {
-629// signal a move only.
-630return -1;
-631  }
-632  int rand = 
RANDOM.nextInt(cluster.regionsPerServer[server].length);
-633  return 
cluster.regionsPerServer[server][rand];
-634
-635}
-636protected int 
pickRandomServer(Cluster cluster) {
-637  if (cluster.numServers < 1) {
-638return -1;
-639  }
-640
-641  return 
RANDOM.nextInt(cluster.numServers);
-642}
-643
-644protected int pickRandomRack(Cluster 
cluster) {
-645  if (cluster.numRacks < 1) {
-646return -1;
-64

[02/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.RegionReplicaReplayCallable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.RegionReplicaReplayCallable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.RegionReplicaReplayCallable.html
index 3d7093a..9917ee8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.RegionReplicaReplayCallable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.RegionReplicaReplayCallable.html
@@ -39,594 +39,612 @@
 031import 
java.util.concurrent.ThreadPoolExecutor;
 032import java.util.concurrent.TimeUnit;
 033import 
java.util.concurrent.atomic.AtomicLong;
-034
-035import 
org.apache.hadoop.conf.Configuration;
-036import org.apache.hadoop.fs.Path;
-037import 
org.apache.hadoop.hbase.CellScanner;
-038import 
org.apache.hadoop.hbase.CellUtil;
-039import 
org.apache.hadoop.hbase.HBaseConfiguration;
-040import 
org.apache.hadoop.hbase.HBaseIOException;
-041import 
org.apache.hadoop.hbase.HConstants;
-042import 
org.apache.hadoop.hbase.HRegionLocation;
-043import 
org.apache.hadoop.hbase.RegionLocations;
-044import 
org.apache.hadoop.hbase.TableDescriptors;
-045import 
org.apache.hadoop.hbase.TableName;
-046import 
org.apache.hadoop.hbase.TableNotFoundException;
-047import 
org.apache.hadoop.hbase.client.ClusterConnection;
-048import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-049import 
org.apache.hadoop.hbase.client.RegionAdminServiceCallable;
-050import 
org.apache.hadoop.hbase.client.RegionInfo;
-051import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-052import 
org.apache.hadoop.hbase.client.RetryingCallable;
-053import 
org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
-054import 
org.apache.hadoop.hbase.client.TableDescriptor;
-055import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-056import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-057import 
org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil;
-058import 
org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint;
-059import 
org.apache.hadoop.hbase.replication.WALEntryFilter;
-060import 
org.apache.hadoop.hbase.util.Bytes;
-061import 
org.apache.hadoop.hbase.util.Pair;
-062import 
org.apache.hadoop.hbase.util.Threads;
-063import 
org.apache.hadoop.hbase.wal.WAL.Entry;
-064import 
org.apache.hadoop.hbase.wal.WALSplitter.EntryBuffers;
-065import 
org.apache.hadoop.hbase.wal.WALSplitter.OutputSink;
-066import 
org.apache.hadoop.hbase.wal.WALSplitter.PipelineController;
-067import 
org.apache.hadoop.hbase.wal.WALSplitter.RegionEntryBuffer;
-068import 
org.apache.hadoop.hbase.wal.WALSplitter.SinkWriter;
-069import 
org.apache.hadoop.util.StringUtils;
-070import 
org.apache.yetus.audience.InterfaceAudience;
-071import org.slf4j.Logger;
-072import org.slf4j.LoggerFactory;
+034import 
org.apache.hadoop.conf.Configuration;
+035import org.apache.hadoop.fs.Path;
+036import 
org.apache.hadoop.hbase.CellScanner;
+037import 
org.apache.hadoop.hbase.CellUtil;
+038import 
org.apache.hadoop.hbase.HBaseConfiguration;
+039import 
org.apache.hadoop.hbase.HBaseIOException;
+040import 
org.apache.hadoop.hbase.HConstants;
+041import 
org.apache.hadoop.hbase.HRegionLocation;
+042import 
org.apache.hadoop.hbase.RegionLocations;
+043import 
org.apache.hadoop.hbase.TableDescriptors;
+044import 
org.apache.hadoop.hbase.TableName;
+045import 
org.apache.hadoop.hbase.TableNotFoundException;
+046import 
org.apache.hadoop.hbase.client.ClusterConnection;
+047import 
org.apache.hadoop.hbase.client.ConnectionFactory;
+048import 
org.apache.hadoop.hbase.client.RegionAdminServiceCallable;
+049import 
org.apache.hadoop.hbase.client.RegionInfo;
+050import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
+051import 
org.apache.hadoop.hbase.client.RetryingCallable;
+052import 
org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
+053import 
org.apache.hadoop.hbase.client.TableDescriptor;
+054import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
+055import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+056import 
org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil;
+057import 
org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint;
+058import 
org.apache.hadoop.hbase.replication.WALEntryFilter;
+059import 
org.apache.hadoop.hbase.util.Bytes;
+060import 
org.apache.hadoop.hbase.util.Pair;
+061import 
org.apache.hadoop.hbase.util.Threads;
+062import 
org.apache.hadoop.hbase.wal.WAL.Entry;
+063import 
org.apache.hadoop.hbase.wal.WALSplitter.EntryBuffers;
+064import 
org.apache.hadoop.hbase.wal.WALSplitter.OutputSink;
+065import 
org.apache.hadoop.hbase.wal.WALSpl

[22/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCostFunction.html
index 233dba3..91b9055 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCostFunction.html
@@ -540,1205 +540,1204 @@
 532  
sm.getRegionMetrics().forEach((byte[] regionName, RegionMetrics rm) -> {
 533Deque 
rLoads = oldLoads.get(Bytes.toString(regionName));
 534if (rLoads == null) {
-535  // There was nothing there
-536  rLoads = new 
ArrayDeque<>();
-537} else if (rLoads.size() >= 
numRegionLoadsToRemember) {
-538  rLoads.remove();
-539}
-540rLoads.add(new 
BalancerRegionLoad(rm));
-541
loads.put(Bytes.toString(regionName), rLoads);
-542  });
-543});
-544
-545for(CostFromRegionLoadFunction cost : 
regionLoadFunctions) {
-546  cost.setLoads(loads);
-547}
-548  }
-549
-550  protected void initCosts(Cluster 
cluster) {
-551for (CostFunction c:costFunctions) 
{
-552  c.init(cluster);
-553}
-554  }
-555
-556  protected void 
updateCostsWithAction(Cluster cluster, Action action) {
-557for (CostFunction c : costFunctions) 
{
-558  c.postAction(action);
-559}
-560  }
-561
-562  /**
-563   * Get the names of the cost 
functions
-564   */
-565  public String[] getCostFunctionNames() 
{
-566if (costFunctions == null) return 
null;
-567String[] ret = new 
String[costFunctions.length];
-568for (int i = 0; i < 
costFunctions.length; i++) {
-569  CostFunction c = 
costFunctions[i];
-570  ret[i] = 
c.getClass().getSimpleName();
-571}
-572
-573return ret;
-574  }
-575
-576  /**
-577   * This is the main cost function.  It 
will compute a cost associated with a proposed cluster
-578   * state.  All different costs will be 
combined with their multipliers to produce a double cost.
-579   *
-580   * @param cluster The state of the 
cluster
-581   * @param previousCost the previous 
cost. This is used as an early out.
-582   * @return a double of a cost 
associated with the proposed cluster state.  This cost is an
-583   * aggregate of all individual 
cost functions.
-584   */
-585  protected double computeCost(Cluster 
cluster, double previousCost) {
-586double total = 0;
-587
-588for (int i = 0; i < 
costFunctions.length; i++) {
-589  CostFunction c = 
costFunctions[i];
-590  this.tempFunctionCosts[i] = 0.0;
-591
-592  if (c.getMultiplier() <= 0) {
-593continue;
-594  }
-595
-596  Float multiplier = 
c.getMultiplier();
-597  Double cost = c.cost();
-598
-599  this.tempFunctionCosts[i] = 
multiplier*cost;
-600  total += 
this.tempFunctionCosts[i];
-601
-602  if (total > previousCost) {
-603break;
-604  }
-605}
-606
-607return total;
-608  }
-609
-610  /** Generates a candidate action to be 
applied to the cluster for cost function search */
-611  abstract static class 
CandidateGenerator {
-612abstract Cluster.Action 
generate(Cluster cluster);
-613
-614/**
-615 * From a list of regions pick a 
random one. Null can be returned which
-616 * {@link 
StochasticLoadBalancer#balanceCluster(Map)} recognize as signal to try a region 
move
-617 * rather than swap.
-618 *
-619 * @param clusterThe state of 
the cluster
-620 * @param server index of the 
server
-621 * @param chanceOfNoSwap Chance that 
this will decide to try a move rather
-622 *   than a 
swap.
-623 * @return a random {@link 
RegionInfo} or null if an asymmetrical move is
-624 * suggested.
-625 */
-626protected int 
pickRandomRegion(Cluster cluster, int server, double chanceOfNoSwap) {
-627  // Check to see if this is just a 
move.
-628  if 
(cluster.regionsPerServer[server].length == 0 || RANDOM.nextFloat() < 
chanceOfNoSwap) {
-629// signal a move only.
-630return -1;
-631  }
-632  int rand = 
RANDOM.nextInt(cluster.regionsPerServer[server].length);
-633  return 
cluster.regionsPerServer[server][rand];
-634
-635}
-636protected int 
pickRandomServer(Cluster cluster) {
-637  if (cluster.numServers < 1) {
-638return -1;
-639  }
-640
-641  return 
RANDOM.nextInt(cluster.numServers);
-642}
-643
-644protected int pickRandomRack(Cluster 
cluster) {
-645  if (cluster.numRacks < 1) {
-646   

[30/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.Action.Type.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.Action.Type.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.Action.Type.html
index d4bf03c..c372545 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.Action.Type.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.Action.Type.html
@@ -574,7 +574,7 @@
 566 * and rack have the highest locality 
for region
 567 */
 568private void 
computeCachedLocalities() {
-569  rackLocalities = new 
float[numRegions][numServers];
+569  rackLocalities = new 
float[numRegions][numRacks];
 570  regionsToMostLocalEntities = new 
int[LocalityType.values().length][numRegions];
 571
 572  // Compute localities and find most 
local server per region

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.Action.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.Action.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.Action.html
index d4bf03c..c372545 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.Action.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.Action.html
@@ -574,7 +574,7 @@
 566 * and rack have the highest locality 
for region
 567 */
 568private void 
computeCachedLocalities() {
-569  rackLocalities = new 
float[numRegions][numServers];
+569  rackLocalities = new 
float[numRegions][numRacks];
 570  regionsToMostLocalEntities = new 
int[LocalityType.values().length][numRegions];
 571
 572  // Compute localities and find most 
local server per region

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.AssignRegionAction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.AssignRegionAction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.AssignRegionAction.html
index d4bf03c..c372545 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.AssignRegionAction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.AssignRegionAction.html
@@ -574,7 +574,7 @@
 566 * and rack have the highest locality 
for region
 567 */
 568private void 
computeCachedLocalities() {
-569  rackLocalities = new 
float[numRegions][numServers];
+569  rackLocalities = new 
float[numRegions][numRacks];
 570  regionsToMostLocalEntities = new 
int[LocalityType.values().length][numRegions];
 571
 572  // Compute localities and find most 
local server per region

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.LocalityType.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.LocalityType.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.LocalityType.html
index d4bf03c..c372545 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.LocalityType.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.LocalityType.html
@@ -574,7 +574,7 @@
 566 * and rack have the highest locality 
for region
 567 */
 568private void 
computeCachedLocalities() {
-569  rackLocalities = new 
float[numRegions][numServers];
+569  rackLocalities = new 
float[numRegions][numRacks];
 570  regionsToMostLocalEntities = new 
int[LocalityType.values().length][numRegions];
 571
 572  // Compute localities and find most 
local server per region

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.MoveRegionAction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.MoveRegionAction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/B

[12/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaRackCandidateGenerator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaRackCandidateGenerator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaRackCandidateGenerator.html
index 233dba3..91b9055 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaRackCandidateGenerator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaRackCandidateGenerator.html
@@ -540,1205 +540,1204 @@
 532  
sm.getRegionMetrics().forEach((byte[] regionName, RegionMetrics rm) -> {
 533Deque 
rLoads = oldLoads.get(Bytes.toString(regionName));
 534if (rLoads == null) {
-535  // There was nothing there
-536  rLoads = new 
ArrayDeque<>();
-537} else if (rLoads.size() >= 
numRegionLoadsToRemember) {
-538  rLoads.remove();
-539}
-540rLoads.add(new 
BalancerRegionLoad(rm));
-541
loads.put(Bytes.toString(regionName), rLoads);
-542  });
-543});
-544
-545for(CostFromRegionLoadFunction cost : 
regionLoadFunctions) {
-546  cost.setLoads(loads);
-547}
-548  }
-549
-550  protected void initCosts(Cluster 
cluster) {
-551for (CostFunction c:costFunctions) 
{
-552  c.init(cluster);
-553}
-554  }
-555
-556  protected void 
updateCostsWithAction(Cluster cluster, Action action) {
-557for (CostFunction c : costFunctions) 
{
-558  c.postAction(action);
-559}
-560  }
-561
-562  /**
-563   * Get the names of the cost 
functions
-564   */
-565  public String[] getCostFunctionNames() 
{
-566if (costFunctions == null) return 
null;
-567String[] ret = new 
String[costFunctions.length];
-568for (int i = 0; i < 
costFunctions.length; i++) {
-569  CostFunction c = 
costFunctions[i];
-570  ret[i] = 
c.getClass().getSimpleName();
-571}
-572
-573return ret;
-574  }
-575
-576  /**
-577   * This is the main cost function.  It 
will compute a cost associated with a proposed cluster
-578   * state.  All different costs will be 
combined with their multipliers to produce a double cost.
-579   *
-580   * @param cluster The state of the 
cluster
-581   * @param previousCost the previous 
cost. This is used as an early out.
-582   * @return a double of a cost 
associated with the proposed cluster state.  This cost is an
-583   * aggregate of all individual 
cost functions.
-584   */
-585  protected double computeCost(Cluster 
cluster, double previousCost) {
-586double total = 0;
-587
-588for (int i = 0; i < 
costFunctions.length; i++) {
-589  CostFunction c = 
costFunctions[i];
-590  this.tempFunctionCosts[i] = 0.0;
-591
-592  if (c.getMultiplier() <= 0) {
-593continue;
-594  }
-595
-596  Float multiplier = 
c.getMultiplier();
-597  Double cost = c.cost();
-598
-599  this.tempFunctionCosts[i] = 
multiplier*cost;
-600  total += 
this.tempFunctionCosts[i];
-601
-602  if (total > previousCost) {
-603break;
-604  }
-605}
-606
-607return total;
-608  }
-609
-610  /** Generates a candidate action to be 
applied to the cluster for cost function search */
-611  abstract static class 
CandidateGenerator {
-612abstract Cluster.Action 
generate(Cluster cluster);
-613
-614/**
-615 * From a list of regions pick a 
random one. Null can be returned which
-616 * {@link 
StochasticLoadBalancer#balanceCluster(Map)} recognize as signal to try a region 
move
-617 * rather than swap.
-618 *
-619 * @param clusterThe state of 
the cluster
-620 * @param server index of the 
server
-621 * @param chanceOfNoSwap Chance that 
this will decide to try a move rather
-622 *   than a 
swap.
-623 * @return a random {@link 
RegionInfo} or null if an asymmetrical move is
-624 * suggested.
-625 */
-626protected int 
pickRandomRegion(Cluster cluster, int server, double chanceOfNoSwap) {
-627  // Check to see if this is just a 
move.
-628  if 
(cluster.regionsPerServer[server].length == 0 || RANDOM.nextFloat() < 
chanceOfNoSwap) {
-629// signal a move only.
-630return -1;
-631  }
-632  int rand = 
RANDOM.nextInt(cluster.regionsPerServer[server].length);
-633  return 
cluster.regionsPerServer[server][rand];
-634
-635}
-636protected int 
pickRandomServer(Cluster cluster) {
-637  if (cluster.numServers < 1) {
-638return -1;
-639  }
-640
-641  return 
RANDOM.nextInt(cluster.numServers);
-642}
-643
-644protected int pickRandomRack(Cluster 
cluster) {

[07/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.WriteRequestCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.WriteRequestCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.WriteRequestCostFunction.html
index 233dba3..91b9055 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.WriteRequestCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.WriteRequestCostFunction.html
@@ -540,1205 +540,1204 @@
 532  
sm.getRegionMetrics().forEach((byte[] regionName, RegionMetrics rm) -> {
 533Deque 
rLoads = oldLoads.get(Bytes.toString(regionName));
 534if (rLoads == null) {
-535  // There was nothing there
-536  rLoads = new 
ArrayDeque<>();
-537} else if (rLoads.size() >= 
numRegionLoadsToRemember) {
-538  rLoads.remove();
-539}
-540rLoads.add(new 
BalancerRegionLoad(rm));
-541
loads.put(Bytes.toString(regionName), rLoads);
-542  });
-543});
-544
-545for(CostFromRegionLoadFunction cost : 
regionLoadFunctions) {
-546  cost.setLoads(loads);
-547}
-548  }
-549
-550  protected void initCosts(Cluster 
cluster) {
-551for (CostFunction c:costFunctions) 
{
-552  c.init(cluster);
-553}
-554  }
-555
-556  protected void 
updateCostsWithAction(Cluster cluster, Action action) {
-557for (CostFunction c : costFunctions) 
{
-558  c.postAction(action);
-559}
-560  }
-561
-562  /**
-563   * Get the names of the cost 
functions
-564   */
-565  public String[] getCostFunctionNames() 
{
-566if (costFunctions == null) return 
null;
-567String[] ret = new 
String[costFunctions.length];
-568for (int i = 0; i < 
costFunctions.length; i++) {
-569  CostFunction c = 
costFunctions[i];
-570  ret[i] = 
c.getClass().getSimpleName();
-571}
-572
-573return ret;
-574  }
-575
-576  /**
-577   * This is the main cost function.  It 
will compute a cost associated with a proposed cluster
-578   * state.  All different costs will be 
combined with their multipliers to produce a double cost.
-579   *
-580   * @param cluster The state of the 
cluster
-581   * @param previousCost the previous 
cost. This is used as an early out.
-582   * @return a double of a cost 
associated with the proposed cluster state.  This cost is an
-583   * aggregate of all individual 
cost functions.
-584   */
-585  protected double computeCost(Cluster 
cluster, double previousCost) {
-586double total = 0;
-587
-588for (int i = 0; i < 
costFunctions.length; i++) {
-589  CostFunction c = 
costFunctions[i];
-590  this.tempFunctionCosts[i] = 0.0;
-591
-592  if (c.getMultiplier() <= 0) {
-593continue;
-594  }
-595
-596  Float multiplier = 
c.getMultiplier();
-597  Double cost = c.cost();
-598
-599  this.tempFunctionCosts[i] = 
multiplier*cost;
-600  total += 
this.tempFunctionCosts[i];
-601
-602  if (total > previousCost) {
-603break;
-604  }
-605}
-606
-607return total;
-608  }
-609
-610  /** Generates a candidate action to be 
applied to the cluster for cost function search */
-611  abstract static class 
CandidateGenerator {
-612abstract Cluster.Action 
generate(Cluster cluster);
-613
-614/**
-615 * From a list of regions pick a 
random one. Null can be returned which
-616 * {@link 
StochasticLoadBalancer#balanceCluster(Map)} recognize as signal to try a region 
move
-617 * rather than swap.
-618 *
-619 * @param clusterThe state of 
the cluster
-620 * @param server index of the 
server
-621 * @param chanceOfNoSwap Chance that 
this will decide to try a move rather
-622 *   than a 
swap.
-623 * @return a random {@link 
RegionInfo} or null if an asymmetrical move is
-624 * suggested.
-625 */
-626protected int 
pickRandomRegion(Cluster cluster, int server, double chanceOfNoSwap) {
-627  // Check to see if this is just a 
move.
-628  if 
(cluster.regionsPerServer[server].length == 0 || RANDOM.nextFloat() < 
chanceOfNoSwap) {
-629// signal a move only.
-630return -1;
-631  }
-632  int rand = 
RANDOM.nextInt(cluster.regionsPerServer[server].length);
-633  return 
cluster.regionsPerServer[server][rand];
-634
-635}
-636protected int 
pickRandomServer(Cluster cluster) {
-637  if (cluster.numServers < 1) {
-638return -1;
-639  }
-640
-641  return 
RANDOM.nextInt(cluster.numServers);
-642}
-643
-644protected int pickRandomRack(Cluster 
cluster) {
-645  if (cluster.numRacks < 1) {
-646retu

[29/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CPRequestCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CPRequestCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CPRequestCostFunction.html
index 233dba3..91b9055 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CPRequestCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CPRequestCostFunction.html
@@ -540,1205 +540,1204 @@
 532  
sm.getRegionMetrics().forEach((byte[] regionName, RegionMetrics rm) -> {
 533Deque 
rLoads = oldLoads.get(Bytes.toString(regionName));
 534if (rLoads == null) {
-535  // There was nothing there
-536  rLoads = new 
ArrayDeque<>();
-537} else if (rLoads.size() >= 
numRegionLoadsToRemember) {
-538  rLoads.remove();
-539}
-540rLoads.add(new 
BalancerRegionLoad(rm));
-541
loads.put(Bytes.toString(regionName), rLoads);
-542  });
-543});
-544
-545for(CostFromRegionLoadFunction cost : 
regionLoadFunctions) {
-546  cost.setLoads(loads);
-547}
-548  }
-549
-550  protected void initCosts(Cluster 
cluster) {
-551for (CostFunction c:costFunctions) 
{
-552  c.init(cluster);
-553}
-554  }
-555
-556  protected void 
updateCostsWithAction(Cluster cluster, Action action) {
-557for (CostFunction c : costFunctions) 
{
-558  c.postAction(action);
-559}
-560  }
-561
-562  /**
-563   * Get the names of the cost 
functions
-564   */
-565  public String[] getCostFunctionNames() 
{
-566if (costFunctions == null) return 
null;
-567String[] ret = new 
String[costFunctions.length];
-568for (int i = 0; i < 
costFunctions.length; i++) {
-569  CostFunction c = 
costFunctions[i];
-570  ret[i] = 
c.getClass().getSimpleName();
-571}
-572
-573return ret;
-574  }
-575
-576  /**
-577   * This is the main cost function.  It 
will compute a cost associated with a proposed cluster
-578   * state.  All different costs will be 
combined with their multipliers to produce a double cost.
-579   *
-580   * @param cluster The state of the 
cluster
-581   * @param previousCost the previous 
cost. This is used as an early out.
-582   * @return a double of a cost 
associated with the proposed cluster state.  This cost is an
-583   * aggregate of all individual 
cost functions.
-584   */
-585  protected double computeCost(Cluster 
cluster, double previousCost) {
-586double total = 0;
-587
-588for (int i = 0; i < 
costFunctions.length; i++) {
-589  CostFunction c = 
costFunctions[i];
-590  this.tempFunctionCosts[i] = 0.0;
-591
-592  if (c.getMultiplier() <= 0) {
-593continue;
-594  }
-595
-596  Float multiplier = 
c.getMultiplier();
-597  Double cost = c.cost();
-598
-599  this.tempFunctionCosts[i] = 
multiplier*cost;
-600  total += 
this.tempFunctionCosts[i];
-601
-602  if (total > previousCost) {
-603break;
-604  }
-605}
-606
-607return total;
-608  }
-609
-610  /** Generates a candidate action to be 
applied to the cluster for cost function search */
-611  abstract static class 
CandidateGenerator {
-612abstract Cluster.Action 
generate(Cluster cluster);
-613
-614/**
-615 * From a list of regions pick a 
random one. Null can be returned which
-616 * {@link 
StochasticLoadBalancer#balanceCluster(Map)} recognize as signal to try a region 
move
-617 * rather than swap.
-618 *
-619 * @param clusterThe state of 
the cluster
-620 * @param server index of the 
server
-621 * @param chanceOfNoSwap Chance that 
this will decide to try a move rather
-622 *   than a 
swap.
-623 * @return a random {@link 
RegionInfo} or null if an asymmetrical move is
-624 * suggested.
-625 */
-626protected int 
pickRandomRegion(Cluster cluster, int server, double chanceOfNoSwap) {
-627  // Check to see if this is just a 
move.
-628  if 
(cluster.regionsPerServer[server].length == 0 || RANDOM.nextFloat() < 
chanceOfNoSwap) {
-629// signal a move only.
-630return -1;
-631  }
-632  int rand = 
RANDOM.nextInt(cluster.regionsPerServer[server].length);
-633  return 
cluster.regionsPerServer[server][rand];
-634
-635}
-636protected int 
pickRandomServer(Cluster cluster) {
-637  if (cluster.numServers < 1) {
-638return -1;
-639  }
-640
-641  return 
RANDOM.nextInt(cluster.numServers);
-642}
-643
-644protected int pickRandomRack(Cluster 
cluster) {
-645  if (cluster.numRacks < 1) {
-646return -1;
-647

[42/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.html
index 012416e..9d1748f 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -129,7 +129,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class RestoreSnapshotProcedure
+public class RestoreSnapshotProcedure
 extends AbstractStateMachineTableProcedure
 
 
@@ -280,110 +280,126 @@ extends 
+private void
+addRegionsToInMemoryStates(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List regionInfos,
+  MasterProcedureEnv env,
+  int regionReplication)
+Add regions to in-memory states
+
+
+
+private void
+deleteRegionsFromInMemoryStates(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List regionInfos,
+   MasterProcedureEnv env,
+   int regionReplication)
+Delete regions from in-memory states
+
+
+
 protected void
 deserializeStateData(ProcedureStateSerializer serializer)
 Called on store load to allow the user to decode the 
previously serialized
  state.
 
 
-
+
 protected StateMachineProcedure.Flow
 executeFromState(MasterProcedureEnv env,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RestoreSnapshotState state)
 called to perform a single step of the specified 'state' of 
the procedure
 
 
-
+
 protected 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RestoreSnapshotState
 getInitialState()
 Return the initial state object that will be used for the 
first call to executeFromState().
 
 
-
+
 private MonitoredTask
 getMonitorStatus()
 Set up monitor status if it is not created.
 
 
-
+
 protected 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RestoreSnapshotState
 getState(int stateId)
 Convert an ordinal (or state id) to an Enum (or more 
descriptive) state object.
 
 
-
+
 protected int
 getStateId(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RestoreSnapshotState state)
 Convert the Enum (or more descriptive) state object to an 
ordinal (or state id).
 
 
-
+
 TableName
 getTableName() 
 
-
+
 TableProcedureInterface.TableOperationType
 getTableOperationType()
 Given an operation type we can take decisions about what to 
do with pending operations.
 
 
-
+
 protected boolean
 isRollbackSupported(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RestoreSnapshotState state)
 Used by the default implementation of abort() to know if 
the current state can be aborted
  and rollback can be triggered.
 
 
-
+
 private https://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true";
 title="class or interface in java.lang">Boolean
 isTraceEnabled()
 The procedure could be restarted from a different 
machine.
 
 
-
+
 private void
 prepareRestore(MasterProcedureEnv env)
 Action before any real action of restoring from 
snapshot.
 
 
-
+
 private void
 restoreSnapshot(MasterProcedureEnv env)
 Execute the on-disk Restore
 
 
-
+
 private void
 restoreSnapshotAcl(MasterProcedureEnv env) 
 
-
+
 protected void
 rollbackState(MasterProcedureEnv env,
  
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RestoreSnapshotState state)
 called to perform the rollback of the specified state
 
 
-
+
 protected void
 serializeStateData(ProcedureStateSerializer serializer)
 The user-level code of the procedure may have some state to
  persist (e.g.
 
 
-
+
 void
 toStringClassDetails(https://docs.oracle.com/javase/8/docs/api/java/lang/StringBuilder.html?is-external=true";
 title="class or interface in java.lang">StringBuilder sb)
 Extend the toStri

[44/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CPRequestCostFunction.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CPRequestCostFunction.html
 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CPRequestCostFunction.html
index 8967a31..0e9c872 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CPRequestCostFunction.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CPRequestCostFunction.html
@@ -128,7 +128,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class StochasticLoadBalancer.CPRequestCostFunction
+static class StochasticLoadBalancer.CPRequestCostFunction
 extends StochasticLoadBalancer.CostFromRegionLoadAsRateFunction
 Compute the cost of total number of coprocessor requests  
The more unbalanced the higher the
  computed cost will be.  This uses a rolling average of regionload.
@@ -250,7 +250,7 @@ extends 
 
 CP_REQUEST_COST_KEY
-private static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String CP_REQUEST_COST_KEY
+private static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String CP_REQUEST_COST_KEY
 
 See Also:
 Constant
 Field Values
@@ -263,7 +263,7 @@ extends 
 
 DEFAULT_CP_REQUEST_COST
-private static final float DEFAULT_CP_REQUEST_COST
+private static final float DEFAULT_CP_REQUEST_COST
 
 See Also:
 Constant
 Field Values
@@ -284,7 +284,7 @@ extends 
 
 CPRequestCostFunction
-CPRequestCostFunction(org.apache.hadoop.conf.Configuration conf)
+CPRequestCostFunction(org.apache.hadoop.conf.Configuration conf)
 
 
 
@@ -301,7 +301,7 @@ extends 
 
 getCostFromRl
-protected double getCostFromRl(BalancerRegionLoad rl)
+protected double getCostFromRl(BalancerRegionLoad rl)
 
 Specified by:
 getCostFromRl in
 class StochasticLoadBalancer.CostFromRegionLoadFunction

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CandidateGenerator.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CandidateGenerator.html
 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CandidateGenerator.html
index 5c20e64..b661a94 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CandidateGenerator.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CandidateGenerator.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-abstract static class StochasticLoadBalancer.CandidateGenerator
+abstract static class StochasticLoadBalancer.CandidateGenerator
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 Generates a candidate action to be applied to the cluster 
for cost function search
 
@@ -232,7 +232,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 CandidateGenerator
-CandidateGenerator()
+CandidateGenerator()
 
 
 
@@ -249,7 +249,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 generate
-abstract BaseLoadBalancer.Cluster.Action generate(BaseLoadBalancer.Cluster cluster)
+abstract BaseLoadBalancer.Cluster.Action generate(BaseLoadBalancer.Cluster cluster)
 
 
 
@@ -258,7 +258,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 pickRandomRegion
-protected int pickRandomRegion(BaseLoadBalancer.Cluster cluster,
+protected int pickRandomRegion(BaseLoadBalancer.Cluster cluster,
int server,
double chanceOfNoSwap)
 From a list of regions pick a random one. Null can be 
returned which
@@ -282,7 +282,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 pickRandomServer
-protected int pickRandomServer(BaseLoadBalancer.Cluster cluster)
+protected int pickRandomServer(BaseLoadBalancer.Cluster cluster)
 
 
 
@@ -291,7 +291,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 pickRandomRack
-protected int pickRandomRack(BaseLoadBalancer.Cluster cluster)
+protected int pickRandomRack(BaseLoadBalancer.Cluster cluster)
 
 
 
@@ -300,7 +300,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 pickOtherRandomServer
-protected int pickOtherRandomServer(BaseLoadBalancer.Cluster cluster,
+protected int pickOtherRandomServer(BaseLoadBalancer.Cluster cluster,
 int serverIndex)
 
 
@@ -310,7 +310,7 @@ ex

[23/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCandidateGenerator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCandidateGenerator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCandidateGenerator.html
index 233dba3..91b9055 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCandidateGenerator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCandidateGenerator.html
@@ -540,1205 +540,1204 @@
 532  
sm.getRegionMetrics().forEach((byte[] regionName, RegionMetrics rm) -> {
 533Deque 
rLoads = oldLoads.get(Bytes.toString(regionName));
 534if (rLoads == null) {
-535  // There was nothing there
-536  rLoads = new 
ArrayDeque<>();
-537} else if (rLoads.size() >= 
numRegionLoadsToRemember) {
-538  rLoads.remove();
-539}
-540rLoads.add(new 
BalancerRegionLoad(rm));
-541
loads.put(Bytes.toString(regionName), rLoads);
-542  });
-543});
-544
-545for(CostFromRegionLoadFunction cost : 
regionLoadFunctions) {
-546  cost.setLoads(loads);
-547}
-548  }
-549
-550  protected void initCosts(Cluster 
cluster) {
-551for (CostFunction c:costFunctions) 
{
-552  c.init(cluster);
-553}
-554  }
-555
-556  protected void 
updateCostsWithAction(Cluster cluster, Action action) {
-557for (CostFunction c : costFunctions) 
{
-558  c.postAction(action);
-559}
-560  }
-561
-562  /**
-563   * Get the names of the cost 
functions
-564   */
-565  public String[] getCostFunctionNames() 
{
-566if (costFunctions == null) return 
null;
-567String[] ret = new 
String[costFunctions.length];
-568for (int i = 0; i < 
costFunctions.length; i++) {
-569  CostFunction c = 
costFunctions[i];
-570  ret[i] = 
c.getClass().getSimpleName();
-571}
-572
-573return ret;
-574  }
-575
-576  /**
-577   * This is the main cost function.  It 
will compute a cost associated with a proposed cluster
-578   * state.  All different costs will be 
combined with their multipliers to produce a double cost.
-579   *
-580   * @param cluster The state of the 
cluster
-581   * @param previousCost the previous 
cost. This is used as an early out.
-582   * @return a double of a cost 
associated with the proposed cluster state.  This cost is an
-583   * aggregate of all individual 
cost functions.
-584   */
-585  protected double computeCost(Cluster 
cluster, double previousCost) {
-586double total = 0;
-587
-588for (int i = 0; i < 
costFunctions.length; i++) {
-589  CostFunction c = 
costFunctions[i];
-590  this.tempFunctionCosts[i] = 0.0;
-591
-592  if (c.getMultiplier() <= 0) {
-593continue;
-594  }
-595
-596  Float multiplier = 
c.getMultiplier();
-597  Double cost = c.cost();
-598
-599  this.tempFunctionCosts[i] = 
multiplier*cost;
-600  total += 
this.tempFunctionCosts[i];
-601
-602  if (total > previousCost) {
-603break;
-604  }
-605}
-606
-607return total;
-608  }
-609
-610  /** Generates a candidate action to be 
applied to the cluster for cost function search */
-611  abstract static class 
CandidateGenerator {
-612abstract Cluster.Action 
generate(Cluster cluster);
-613
-614/**
-615 * From a list of regions pick a 
random one. Null can be returned which
-616 * {@link 
StochasticLoadBalancer#balanceCluster(Map)} recognize as signal to try a region 
move
-617 * rather than swap.
-618 *
-619 * @param clusterThe state of 
the cluster
-620 * @param server index of the 
server
-621 * @param chanceOfNoSwap Chance that 
this will decide to try a move rather
-622 *   than a 
swap.
-623 * @return a random {@link 
RegionInfo} or null if an asymmetrical move is
-624 * suggested.
-625 */
-626protected int 
pickRandomRegion(Cluster cluster, int server, double chanceOfNoSwap) {
-627  // Check to see if this is just a 
move.
-628  if 
(cluster.regionsPerServer[server].length == 0 || RANDOM.nextFloat() < 
chanceOfNoSwap) {
-629// signal a move only.
-630return -1;
-631  }
-632  int rand = 
RANDOM.nextInt(cluster.regionsPerServer[server].length);
-633  return 
cluster.regionsPerServer[server][rand];
-634
-635}
-636protected int 
pickRandomServer(Cluster cluster) {
-637  if (cluster.numServers < 1) {
-638return -1;
-639  }
-640
-641  return 
RANDOM.nextInt(cluster.numServers);
-642}
-643
-644protected int pickRandomRack(Cluster 
cluster) {
-645  if (clust

[10/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.ServerLocalityCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.ServerLocalityCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.ServerLocalityCostFunction.html
index 233dba3..91b9055 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.ServerLocalityCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.ServerLocalityCostFunction.html
@@ -540,1205 +540,1204 @@
 532  
sm.getRegionMetrics().forEach((byte[] regionName, RegionMetrics rm) -> {
 533Deque 
rLoads = oldLoads.get(Bytes.toString(regionName));
 534if (rLoads == null) {
-535  // There was nothing there
-536  rLoads = new 
ArrayDeque<>();
-537} else if (rLoads.size() >= 
numRegionLoadsToRemember) {
-538  rLoads.remove();
-539}
-540rLoads.add(new 
BalancerRegionLoad(rm));
-541
loads.put(Bytes.toString(regionName), rLoads);
-542  });
-543});
-544
-545for(CostFromRegionLoadFunction cost : 
regionLoadFunctions) {
-546  cost.setLoads(loads);
-547}
-548  }
-549
-550  protected void initCosts(Cluster 
cluster) {
-551for (CostFunction c:costFunctions) 
{
-552  c.init(cluster);
-553}
-554  }
-555
-556  protected void 
updateCostsWithAction(Cluster cluster, Action action) {
-557for (CostFunction c : costFunctions) 
{
-558  c.postAction(action);
-559}
-560  }
-561
-562  /**
-563   * Get the names of the cost 
functions
-564   */
-565  public String[] getCostFunctionNames() 
{
-566if (costFunctions == null) return 
null;
-567String[] ret = new 
String[costFunctions.length];
-568for (int i = 0; i < 
costFunctions.length; i++) {
-569  CostFunction c = 
costFunctions[i];
-570  ret[i] = 
c.getClass().getSimpleName();
-571}
-572
-573return ret;
-574  }
-575
-576  /**
-577   * This is the main cost function.  It 
will compute a cost associated with a proposed cluster
-578   * state.  All different costs will be 
combined with their multipliers to produce a double cost.
-579   *
-580   * @param cluster The state of the 
cluster
-581   * @param previousCost the previous 
cost. This is used as an early out.
-582   * @return a double of a cost 
associated with the proposed cluster state.  This cost is an
-583   * aggregate of all individual 
cost functions.
-584   */
-585  protected double computeCost(Cluster 
cluster, double previousCost) {
-586double total = 0;
-587
-588for (int i = 0; i < 
costFunctions.length; i++) {
-589  CostFunction c = 
costFunctions[i];
-590  this.tempFunctionCosts[i] = 0.0;
-591
-592  if (c.getMultiplier() <= 0) {
-593continue;
-594  }
-595
-596  Float multiplier = 
c.getMultiplier();
-597  Double cost = c.cost();
-598
-599  this.tempFunctionCosts[i] = 
multiplier*cost;
-600  total += 
this.tempFunctionCosts[i];
-601
-602  if (total > previousCost) {
-603break;
-604  }
-605}
-606
-607return total;
-608  }
-609
-610  /** Generates a candidate action to be 
applied to the cluster for cost function search */
-611  abstract static class 
CandidateGenerator {
-612abstract Cluster.Action 
generate(Cluster cluster);
-613
-614/**
-615 * From a list of regions pick a 
random one. Null can be returned which
-616 * {@link 
StochasticLoadBalancer#balanceCluster(Map)} recognize as signal to try a region 
move
-617 * rather than swap.
-618 *
-619 * @param clusterThe state of 
the cluster
-620 * @param server index of the 
server
-621 * @param chanceOfNoSwap Chance that 
this will decide to try a move rather
-622 *   than a 
swap.
-623 * @return a random {@link 
RegionInfo} or null if an asymmetrical move is
-624 * suggested.
-625 */
-626protected int 
pickRandomRegion(Cluster cluster, int server, double chanceOfNoSwap) {
-627  // Check to see if this is just a 
move.
-628  if 
(cluster.regionsPerServer[server].length == 0 || RANDOM.nextFloat() < 
chanceOfNoSwap) {
-629// signal a move only.
-630return -1;
-631  }
-632  int rand = 
RANDOM.nextInt(cluster.regionsPerServer[server].length);
-633  return 
cluster.regionsPerServer[server][rand];
-634
-635}
-636protected int 
pickRandomServer(Cluster cluster) {
-637  if (cluster.numServers < 1) {
-638return -1;
-639  }
-640
-641  return 
RANDOM.nextInt(cluster.numServers);
-642}
-643
-644protected int pickRandomRack(Cluster 
cluster) {
-645  if (cluster.numRacks < 1) {
-646  

[47/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
index 48c3c8d..0529e65 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
@@ -1760,68 +1760,72 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
   ServerName tertiaryHost) 
 
 
+void
+FavoredNodesManager.deleteFavoredNodesForRegion(RegionInfo regionInfo) 
+
+
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
 FavoredNodeAssignmentHelper.generateFavoredNodes(RegionInfo hri) 
 
-
+
 void
 FavoredNodesPromoter.generateFavoredNodesForDaughter(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List servers,
RegionInfo parent,
RegionInfo hriA,
RegionInfo hriB) 
 
-
+
 void
 FavoredNodeLoadBalancer.generateFavoredNodesForDaughter(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List servers,
RegionInfo parent,
RegionInfo regionA,
RegionInfo regionB) 
 
-
+
 void
 FavoredNodesPromoter.generateFavoredNodesForMergedRegion(RegionInfo merged,
RegionInfo hriA,
RegionInfo hriB) 
 
-
+
 void
 FavoredNodeLoadBalancer.generateFavoredNodesForMergedRegion(RegionInfo merged,
RegionInfo regionA,
RegionInfo regionB) 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
 FavoredNodeLoadBalancer.getFavoredNodes(RegionInfo regionInfo) 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
 FavoredNodesManager.getFavoredNodes(RegionInfo regionInfo) 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
 FavoredNodesPlan.getFavoredNodes(RegionInfo region) 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
 FavoredNodesManager.getFavoredNodesWithDNPort(RegionInfo regionInfo) 
 
-
+
 ServerName[]
 FavoredNodeAssignmentHelper.getSecondaryAndTertiary(RegionInfo regionInfo,
ServerName primaryRS) 
 
-
+
 static boolean
 FavoredNodesManager.isFavoredNodeApplicable(RegionInfo regionInfo) 
 
-
+
 private static Put
 FavoredNodeAssignmentHelper.makePutFromRegionInfo(RegionInfo regionInfo,
  https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List favoredNodeList)
 Generates and returns a Put containing the region info for 
the catalog table and the servers
 
 
-
+
 private ServerName[]
 FavoredNodeAssignmentHelper.multiRackCase(RegionInfo regionInfo,
  ServerName primaryRS,
@@ -1829,7 +1833,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 Place secondary and tertiary nodes in a multi rack 
case.
 
 
-
+
 private ServerName[]
 FavoredNodeAssignmentHelper.multiRackCaseWithRestrictions(https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapSet> serverToPrimaries,
  https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">Map secondaryAndTertiaryMap,
@@ -1837,24 +1841,24 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
  ServerName primaryRS,
  RegionInfo regionInfo) 
 
-
+
 ServerName
 FavoredNodeLoadBalancer.randomAssignment(RegionInfo regionInfo,
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List servers) 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class 

[15/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionCountSkewCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionCountSkewCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionCountSkewCostFunction.html
index 233dba3..91b9055 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionCountSkewCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionCountSkewCostFunction.html
@@ -540,1205 +540,1204 @@
 532  
sm.getRegionMetrics().forEach((byte[] regionName, RegionMetrics rm) -> {
 533Deque 
rLoads = oldLoads.get(Bytes.toString(regionName));
 534if (rLoads == null) {
-535  // There was nothing there
-536  rLoads = new 
ArrayDeque<>();
-537} else if (rLoads.size() >= 
numRegionLoadsToRemember) {
-538  rLoads.remove();
-539}
-540rLoads.add(new 
BalancerRegionLoad(rm));
-541
loads.put(Bytes.toString(regionName), rLoads);
-542  });
-543});
-544
-545for(CostFromRegionLoadFunction cost : 
regionLoadFunctions) {
-546  cost.setLoads(loads);
-547}
-548  }
-549
-550  protected void initCosts(Cluster 
cluster) {
-551for (CostFunction c:costFunctions) 
{
-552  c.init(cluster);
-553}
-554  }
-555
-556  protected void 
updateCostsWithAction(Cluster cluster, Action action) {
-557for (CostFunction c : costFunctions) 
{
-558  c.postAction(action);
-559}
-560  }
-561
-562  /**
-563   * Get the names of the cost 
functions
-564   */
-565  public String[] getCostFunctionNames() 
{
-566if (costFunctions == null) return 
null;
-567String[] ret = new 
String[costFunctions.length];
-568for (int i = 0; i < 
costFunctions.length; i++) {
-569  CostFunction c = 
costFunctions[i];
-570  ret[i] = 
c.getClass().getSimpleName();
-571}
-572
-573return ret;
-574  }
-575
-576  /**
-577   * This is the main cost function.  It 
will compute a cost associated with a proposed cluster
-578   * state.  All different costs will be 
combined with their multipliers to produce a double cost.
-579   *
-580   * @param cluster The state of the 
cluster
-581   * @param previousCost the previous 
cost. This is used as an early out.
-582   * @return a double of a cost 
associated with the proposed cluster state.  This cost is an
-583   * aggregate of all individual 
cost functions.
-584   */
-585  protected double computeCost(Cluster 
cluster, double previousCost) {
-586double total = 0;
-587
-588for (int i = 0; i < 
costFunctions.length; i++) {
-589  CostFunction c = 
costFunctions[i];
-590  this.tempFunctionCosts[i] = 0.0;
-591
-592  if (c.getMultiplier() <= 0) {
-593continue;
-594  }
-595
-596  Float multiplier = 
c.getMultiplier();
-597  Double cost = c.cost();
-598
-599  this.tempFunctionCosts[i] = 
multiplier*cost;
-600  total += 
this.tempFunctionCosts[i];
-601
-602  if (total > previousCost) {
-603break;
-604  }
-605}
-606
-607return total;
-608  }
-609
-610  /** Generates a candidate action to be 
applied to the cluster for cost function search */
-611  abstract static class 
CandidateGenerator {
-612abstract Cluster.Action 
generate(Cluster cluster);
-613
-614/**
-615 * From a list of regions pick a 
random one. Null can be returned which
-616 * {@link 
StochasticLoadBalancer#balanceCluster(Map)} recognize as signal to try a region 
move
-617 * rather than swap.
-618 *
-619 * @param clusterThe state of 
the cluster
-620 * @param server index of the 
server
-621 * @param chanceOfNoSwap Chance that 
this will decide to try a move rather
-622 *   than a 
swap.
-623 * @return a random {@link 
RegionInfo} or null if an asymmetrical move is
-624 * suggested.
-625 */
-626protected int 
pickRandomRegion(Cluster cluster, int server, double chanceOfNoSwap) {
-627  // Check to see if this is just a 
move.
-628  if 
(cluster.regionsPerServer[server].length == 0 || RANDOM.nextFloat() < 
chanceOfNoSwap) {
-629// signal a move only.
-630return -1;
-631  }
-632  int rand = 
RANDOM.nextInt(cluster.regionsPerServer[server].length);
-633  return 
cluster.regionsPerServer[server][rand];
-634
-635}
-636protected int 
pickRandomServer(Cluster cluster) {
-637  if (cluster.numServers < 1) {
-638return -1;
-639  }
-640
-641  return 
RANDOM.nextInt(cluster.numServers);
-642}
-643
-644protected int pickRandomRack(Cluster 
cluster) {
-645  if (cluster.numRacks < 1) {
-

[32/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerStateNode.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerStateNode.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerStateNode.html
index 5f41fe7..c8158b5 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerStateNode.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerStateNode.html
@@ -524,636 +524,639 @@
 516}
 517  }
 518
-519  ArrayList 
getTableRegionStateNodes(final TableName tableName) {
-520final 
ArrayList regions = new 
ArrayList();
-521for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
-522  if 
(!node.getTable().equals(tableName)) break;
-523  regions.add(node);
-524}
-525return regions;
-526  }
-527
-528  ArrayList 
getTableRegionStates(final TableName tableName) {
-529final ArrayList 
regions = new ArrayList();
-530for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
-531  if 
(!node.getTable().equals(tableName)) break;
-532  
regions.add(node.toRegionState());
-533}
-534return regions;
-535  }
-536
-537  ArrayList 
getTableRegionsInfo(final TableName tableName) {
-538final ArrayList 
regions = new ArrayList();
-539for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
-540  if 
(!node.getTable().equals(tableName)) break;
-541  
regions.add(node.getRegionInfo());
-542}
-543return regions;
-544  }
-545
-546  Collection 
getRegionStateNodes() {
-547return regionsMap.values();
+519  public void deleteRegions(final 
List regionInfos) {
+520
regionInfos.forEach(this::deleteRegion);
+521  }
+522
+523  ArrayList 
getTableRegionStateNodes(final TableName tableName) {
+524final 
ArrayList regions = new 
ArrayList();
+525for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
+526  if 
(!node.getTable().equals(tableName)) break;
+527  regions.add(node);
+528}
+529return regions;
+530  }
+531
+532  ArrayList 
getTableRegionStates(final TableName tableName) {
+533final ArrayList 
regions = new ArrayList();
+534for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
+535  if 
(!node.getTable().equals(tableName)) break;
+536  
regions.add(node.toRegionState());
+537}
+538return regions;
+539  }
+540
+541  ArrayList 
getTableRegionsInfo(final TableName tableName) {
+542final ArrayList 
regions = new ArrayList();
+543for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
+544  if 
(!node.getTable().equals(tableName)) break;
+545  
regions.add(node.getRegionInfo());
+546}
+547return regions;
 548  }
 549
-550  public ArrayList 
getRegionStates() {
-551final ArrayList 
regions = new ArrayList(regionsMap.size());
-552for (RegionStateNode node: 
regionsMap.values()) {
-553  
regions.add(node.toRegionState());
-554}
-555return regions;
-556  }
-557
-558  // 
==
-559  //  RegionState helpers
-560  // 
==
-561  public RegionState getRegionState(final 
RegionInfo regionInfo) {
-562RegionStateNode regionStateNode = 
getRegionStateNode(regionInfo);
-563return regionStateNode == null ? null 
: regionStateNode.toRegionState();
-564  }
-565
-566  public RegionState getRegionState(final 
String encodedRegionName) {
-567// TODO: Need a map  but it is just dispatch merge...
-568for (RegionStateNode node: 
regionsMap.values()) {
-569  if 
(node.getRegionInfo().getEncodedName().equals(encodedRegionName)) {
-570return node.toRegionState();
-571  }
-572}
-573return null;
-574  }
-575
-576  // 

-577  //  TODO: helpers
-578  // 

-579  public boolean 
hasTableRegionStates(final TableName tableName) {
-580// TODO
-581return 
!getTableRegionStates(tableName).isEmpty();
-582  }
-583
-584  /**
-585   * @return Return online regions of 
table; does not include OFFLINE or SPLITTING regions.
-586   */
-587  public List

[34/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerReportEvent.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerReportEvent.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerReportEvent.html
index 5f41fe7..c8158b5 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerReportEvent.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerReportEvent.html
@@ -524,636 +524,639 @@
 516}
 517  }
 518
-519  ArrayList 
getTableRegionStateNodes(final TableName tableName) {
-520final 
ArrayList regions = new 
ArrayList();
-521for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
-522  if 
(!node.getTable().equals(tableName)) break;
-523  regions.add(node);
-524}
-525return regions;
-526  }
-527
-528  ArrayList 
getTableRegionStates(final TableName tableName) {
-529final ArrayList 
regions = new ArrayList();
-530for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
-531  if 
(!node.getTable().equals(tableName)) break;
-532  
regions.add(node.toRegionState());
-533}
-534return regions;
-535  }
-536
-537  ArrayList 
getTableRegionsInfo(final TableName tableName) {
-538final ArrayList 
regions = new ArrayList();
-539for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
-540  if 
(!node.getTable().equals(tableName)) break;
-541  
regions.add(node.getRegionInfo());
-542}
-543return regions;
-544  }
-545
-546  Collection 
getRegionStateNodes() {
-547return regionsMap.values();
+519  public void deleteRegions(final 
List regionInfos) {
+520
regionInfos.forEach(this::deleteRegion);
+521  }
+522
+523  ArrayList 
getTableRegionStateNodes(final TableName tableName) {
+524final 
ArrayList regions = new 
ArrayList();
+525for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
+526  if 
(!node.getTable().equals(tableName)) break;
+527  regions.add(node);
+528}
+529return regions;
+530  }
+531
+532  ArrayList 
getTableRegionStates(final TableName tableName) {
+533final ArrayList 
regions = new ArrayList();
+534for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
+535  if 
(!node.getTable().equals(tableName)) break;
+536  
regions.add(node.toRegionState());
+537}
+538return regions;
+539  }
+540
+541  ArrayList 
getTableRegionsInfo(final TableName tableName) {
+542final ArrayList 
regions = new ArrayList();
+543for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
+544  if 
(!node.getTable().equals(tableName)) break;
+545  
regions.add(node.getRegionInfo());
+546}
+547return regions;
 548  }
 549
-550  public ArrayList 
getRegionStates() {
-551final ArrayList 
regions = new ArrayList(regionsMap.size());
-552for (RegionStateNode node: 
regionsMap.values()) {
-553  
regions.add(node.toRegionState());
-554}
-555return regions;
-556  }
-557
-558  // 
==
-559  //  RegionState helpers
-560  // 
==
-561  public RegionState getRegionState(final 
RegionInfo regionInfo) {
-562RegionStateNode regionStateNode = 
getRegionStateNode(regionInfo);
-563return regionStateNode == null ? null 
: regionStateNode.toRegionState();
-564  }
-565
-566  public RegionState getRegionState(final 
String encodedRegionName) {
-567// TODO: Need a map  but it is just dispatch merge...
-568for (RegionStateNode node: 
regionsMap.values()) {
-569  if 
(node.getRegionInfo().getEncodedName().equals(encodedRegionName)) {
-570return node.toRegionState();
-571  }
-572}
-573return null;
-574  }
-575
-576  // 

-577  //  TODO: helpers
-578  // 

-579  public boolean 
hasTableRegionStates(final TableName tableName) {
-580// TODO
-581return 
!getTableRegionStates(tableName).isEmpty();
-582  }
-583
-584  /**
-585   * @return Return online regions of 
table; does not include OFFLINE or SPLITTING regions.
-586   */
-587  public L

[49/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index 0072320..8e84ec6 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Checkstyle Results
 
@@ -284,7 +284,7 @@
 3690
 0
 0
-15669
+15670
 
 Files
 
@@ -677,7 +677,7 @@
 org/apache/hadoop/hbase/ServerName.java
 0
 0
-24
+25
 
 org/apache/hadoop/hbase/SplitLogCounters.java
 0
@@ -9893,7 +9893,7 @@
 sortStaticImportsAlphabetically: "true"
 groups: 
"*,org.apache.hbase.thirdparty,org.apache.hadoop.hbase.shaded"
 option: "top"
-1162
+1163
  Error
 
 
@@ -12732,385 +12732,385 @@
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-2437
+2447
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-2438
+2448
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-2641
+2657
 
  Error
 indentation
 Indentation
 'method def' child has incorrect indentation level 3, expected level 
should be 4.
-2679
+2695
 
  Error
 indentation
 Indentation
 'method def modifier' has incorrect indentation level 6, expected level 
should be one of the following: 8, 10.
-2710
+2726
 
  Error
 indentation
 Indentation
 'method def' child has incorrect indentation level 8, expected level 
should be one of the following: 10, 12.
-2712
+2728
 
  Error
 indentation
 Indentation
 'method def rcurly' has incorrect indentation level 6, expected level 
should be one of the following: 8, 10.
-2713
+2729
 
  Error
 indentation
 Indentation
 'object def rcurly' has incorrect indentation level 4, expected level 
should be one of the following: 6, 8.
-2714
+2730
 
  Error
 indentation
 Indentation
 'while' child has incorrect indentation level 7, expected level should be 
6.
-2725
+2741
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-2766
+2782
 
  Error
 indentation
 Indentation
 'throws' has incorrect indentation level 2, expected level should be 
4.
-2787
+2803
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-2842
+2858
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-2874
+2890
 
  Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
-2901
+2917
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-2922
+2938
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-2923
+2939
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-2939
+2955
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-2940
+2956
 
  Error
 indentation
 Indentation
 'throws' has incorrect indentation level 2, expected level should be 
4.
-2943
+2959
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-2994
+3010
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-2997
+3013
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-3014
+3030
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-3015
+3031
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-3016
+3032
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-3029
+3045
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-3030
+3046
 
  Error
 indentation
 Indentation
 'throws' has incorrect indentation level 2, expected level should be 
4.
-3033
+3049
 
  Error
 indentation
 Indentation
 'throws' has incorrect indentation level 2, expected level should be 
4.
-3038
+3054
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-3046
+3062
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-3047
+3063
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-3063
+3079
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-3064
+3080
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-3076
+3092
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-3097
+3113
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-3129
+3145
 
  Error
 indentation
 Indentation
 'throws' has incorrect indentation level 2, expected level should be 
4.
-3133
+3149
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause sh

[08/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.TableSkewCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.TableSkewCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.TableSkewCostFunction.html
index 233dba3..91b9055 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.TableSkewCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.TableSkewCostFunction.html
@@ -540,1205 +540,1204 @@
 532  
sm.getRegionMetrics().forEach((byte[] regionName, RegionMetrics rm) -> {
 533Deque 
rLoads = oldLoads.get(Bytes.toString(regionName));
 534if (rLoads == null) {
-535  // There was nothing there
-536  rLoads = new 
ArrayDeque<>();
-537} else if (rLoads.size() >= 
numRegionLoadsToRemember) {
-538  rLoads.remove();
-539}
-540rLoads.add(new 
BalancerRegionLoad(rm));
-541
loads.put(Bytes.toString(regionName), rLoads);
-542  });
-543});
-544
-545for(CostFromRegionLoadFunction cost : 
regionLoadFunctions) {
-546  cost.setLoads(loads);
-547}
-548  }
-549
-550  protected void initCosts(Cluster 
cluster) {
-551for (CostFunction c:costFunctions) 
{
-552  c.init(cluster);
-553}
-554  }
-555
-556  protected void 
updateCostsWithAction(Cluster cluster, Action action) {
-557for (CostFunction c : costFunctions) 
{
-558  c.postAction(action);
-559}
-560  }
-561
-562  /**
-563   * Get the names of the cost 
functions
-564   */
-565  public String[] getCostFunctionNames() 
{
-566if (costFunctions == null) return 
null;
-567String[] ret = new 
String[costFunctions.length];
-568for (int i = 0; i < 
costFunctions.length; i++) {
-569  CostFunction c = 
costFunctions[i];
-570  ret[i] = 
c.getClass().getSimpleName();
-571}
-572
-573return ret;
-574  }
-575
-576  /**
-577   * This is the main cost function.  It 
will compute a cost associated with a proposed cluster
-578   * state.  All different costs will be 
combined with their multipliers to produce a double cost.
-579   *
-580   * @param cluster The state of the 
cluster
-581   * @param previousCost the previous 
cost. This is used as an early out.
-582   * @return a double of a cost 
associated with the proposed cluster state.  This cost is an
-583   * aggregate of all individual 
cost functions.
-584   */
-585  protected double computeCost(Cluster 
cluster, double previousCost) {
-586double total = 0;
-587
-588for (int i = 0; i < 
costFunctions.length; i++) {
-589  CostFunction c = 
costFunctions[i];
-590  this.tempFunctionCosts[i] = 0.0;
-591
-592  if (c.getMultiplier() <= 0) {
-593continue;
-594  }
-595
-596  Float multiplier = 
c.getMultiplier();
-597  Double cost = c.cost();
-598
-599  this.tempFunctionCosts[i] = 
multiplier*cost;
-600  total += 
this.tempFunctionCosts[i];
-601
-602  if (total > previousCost) {
-603break;
-604  }
-605}
-606
-607return total;
-608  }
-609
-610  /** Generates a candidate action to be 
applied to the cluster for cost function search */
-611  abstract static class 
CandidateGenerator {
-612abstract Cluster.Action 
generate(Cluster cluster);
-613
-614/**
-615 * From a list of regions pick a 
random one. Null can be returned which
-616 * {@link 
StochasticLoadBalancer#balanceCluster(Map)} recognize as signal to try a region 
move
-617 * rather than swap.
-618 *
-619 * @param clusterThe state of 
the cluster
-620 * @param server index of the 
server
-621 * @param chanceOfNoSwap Chance that 
this will decide to try a move rather
-622 *   than a 
swap.
-623 * @return a random {@link 
RegionInfo} or null if an asymmetrical move is
-624 * suggested.
-625 */
-626protected int 
pickRandomRegion(Cluster cluster, int server, double chanceOfNoSwap) {
-627  // Check to see if this is just a 
move.
-628  if 
(cluster.regionsPerServer[server].length == 0 || RANDOM.nextFloat() < 
chanceOfNoSwap) {
-629// signal a move only.
-630return -1;
-631  }
-632  int rand = 
RANDOM.nextInt(cluster.regionsPerServer[server].length);
-633  return 
cluster.regionsPerServer[server][rand];
-634
-635}
-636protected int 
pickRandomServer(Cluster cluster) {
-637  if (cluster.numServers < 1) {
-638return -1;
-639  }
-640
-641  return 
RANDOM.nextInt(cluster.numServers);
-642}
-643
-644protected int pickRandomRack(Cluster 
cluster) {
-645  if (cluster.numRacks < 1) {
-646return -1;
-647

[41/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/org/apache/hadoop/hbase/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/package-tree.html
index db92bf5..b409b75 100644
--- a/devapidocs/org/apache/hadoop/hbase/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/package-tree.html
@@ -437,19 +437,19 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.KeepDeletedCells
 org.apache.hadoop.hbase.Size.Unit
-org.apache.hadoop.hbase.KeyValue.Type
-org.apache.hadoop.hbase.MetaTableAccessor.QueryType
-org.apache.hadoop.hbase.Coprocessor.State
-org.apache.hadoop.hbase.ClusterMetrics.Option
-org.apache.hadoop.hbase.CompareOperator
+org.apache.hadoop.hbase.MemoryCompactionPolicy
 org.apache.hadoop.hbase.HealthChecker.HealthCheckerExitStatus
-org.apache.hadoop.hbase.CellBuilderType
-org.apache.hadoop.hbase.CompatibilitySingletonFactory.SingletonStorage
+org.apache.hadoop.hbase.Coprocessor.State
 org.apache.hadoop.hbase.HConstants.OperationStatusCode
-org.apache.hadoop.hbase.MemoryCompactionPolicy
+org.apache.hadoop.hbase.MetaTableAccessor.QueryType
 org.apache.hadoop.hbase.Cell.Type
+org.apache.hadoop.hbase.CompatibilitySingletonFactory.SingletonStorage
+org.apache.hadoop.hbase.CellBuilderType
+org.apache.hadoop.hbase.ClusterMetrics.Option
+org.apache.hadoop.hbase.KeyValue.Type
+org.apache.hadoop.hbase.CompareOperator
+org.apache.hadoop.hbase.KeepDeletedCells
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureDeque.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureDeque.html 
b/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureDeque.html
index 7ce9548..4f64733 100644
--- a/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureDeque.html
+++ b/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureDeque.html
@@ -211,7 +211,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/util/ArrayDeque.
 
 
 ProcedureDeque
-public ProcedureDeque()
+public ProcedureDeque()
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
index aac3557..fb1f489 100644
--- a/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
@@ -216,11 +216,11 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
+org.apache.hadoop.hbase.procedure2.LockedResourceType
+org.apache.hadoop.hbase.procedure2.Procedure.LockState
 org.apache.hadoop.hbase.procedure2.RootProcedureState.State
-org.apache.hadoop.hbase.procedure2.LockType
 org.apache.hadoop.hbase.procedure2.StateMachineProcedure.Flow
-org.apache.hadoop.hbase.procedure2.Procedure.LockState
-org.apache.hadoop.hbase.procedure2.LockedResourceType
+org.apache.hadoop.hbase.procedure2.LockType
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
index 7e246b8..fe1b911 100644
--- a/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
@@ -230,12 +230,12 @@
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serial

[18/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RackLocalityCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RackLocalityCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RackLocalityCostFunction.html
index 233dba3..91b9055 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RackLocalityCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RackLocalityCostFunction.html
@@ -540,1205 +540,1204 @@
 532  
sm.getRegionMetrics().forEach((byte[] regionName, RegionMetrics rm) -> {
 533Deque 
rLoads = oldLoads.get(Bytes.toString(regionName));
 534if (rLoads == null) {
-535  // There was nothing there
-536  rLoads = new 
ArrayDeque<>();
-537} else if (rLoads.size() >= 
numRegionLoadsToRemember) {
-538  rLoads.remove();
-539}
-540rLoads.add(new 
BalancerRegionLoad(rm));
-541
loads.put(Bytes.toString(regionName), rLoads);
-542  });
-543});
-544
-545for(CostFromRegionLoadFunction cost : 
regionLoadFunctions) {
-546  cost.setLoads(loads);
-547}
-548  }
-549
-550  protected void initCosts(Cluster 
cluster) {
-551for (CostFunction c:costFunctions) 
{
-552  c.init(cluster);
-553}
-554  }
-555
-556  protected void 
updateCostsWithAction(Cluster cluster, Action action) {
-557for (CostFunction c : costFunctions) 
{
-558  c.postAction(action);
-559}
-560  }
-561
-562  /**
-563   * Get the names of the cost 
functions
-564   */
-565  public String[] getCostFunctionNames() 
{
-566if (costFunctions == null) return 
null;
-567String[] ret = new 
String[costFunctions.length];
-568for (int i = 0; i < 
costFunctions.length; i++) {
-569  CostFunction c = 
costFunctions[i];
-570  ret[i] = 
c.getClass().getSimpleName();
-571}
-572
-573return ret;
-574  }
-575
-576  /**
-577   * This is the main cost function.  It 
will compute a cost associated with a proposed cluster
-578   * state.  All different costs will be 
combined with their multipliers to produce a double cost.
-579   *
-580   * @param cluster The state of the 
cluster
-581   * @param previousCost the previous 
cost. This is used as an early out.
-582   * @return a double of a cost 
associated with the proposed cluster state.  This cost is an
-583   * aggregate of all individual 
cost functions.
-584   */
-585  protected double computeCost(Cluster 
cluster, double previousCost) {
-586double total = 0;
-587
-588for (int i = 0; i < 
costFunctions.length; i++) {
-589  CostFunction c = 
costFunctions[i];
-590  this.tempFunctionCosts[i] = 0.0;
-591
-592  if (c.getMultiplier() <= 0) {
-593continue;
-594  }
-595
-596  Float multiplier = 
c.getMultiplier();
-597  Double cost = c.cost();
-598
-599  this.tempFunctionCosts[i] = 
multiplier*cost;
-600  total += 
this.tempFunctionCosts[i];
-601
-602  if (total > previousCost) {
-603break;
-604  }
-605}
-606
-607return total;
-608  }
-609
-610  /** Generates a candidate action to be 
applied to the cluster for cost function search */
-611  abstract static class 
CandidateGenerator {
-612abstract Cluster.Action 
generate(Cluster cluster);
-613
-614/**
-615 * From a list of regions pick a 
random one. Null can be returned which
-616 * {@link 
StochasticLoadBalancer#balanceCluster(Map)} recognize as signal to try a region 
move
-617 * rather than swap.
-618 *
-619 * @param clusterThe state of 
the cluster
-620 * @param server index of the 
server
-621 * @param chanceOfNoSwap Chance that 
this will decide to try a move rather
-622 *   than a 
swap.
-623 * @return a random {@link 
RegionInfo} or null if an asymmetrical move is
-624 * suggested.
-625 */
-626protected int 
pickRandomRegion(Cluster cluster, int server, double chanceOfNoSwap) {
-627  // Check to see if this is just a 
move.
-628  if 
(cluster.regionsPerServer[server].length == 0 || RANDOM.nextFloat() < 
chanceOfNoSwap) {
-629// signal a move only.
-630return -1;
-631  }
-632  int rand = 
RANDOM.nextInt(cluster.regionsPerServer[server].length);
-633  return 
cluster.regionsPerServer[server][rand];
-634
-635}
-636protected int 
pickRandomServer(Cluster cluster) {
-637  if (cluster.numServers < 1) {
-638return -1;
-639  }
-640
-641  return 
RANDOM.nextInt(cluster.numServers);
-642}
-643
-644protected int pickRandomRack(Cluster 
cluster) {
-645  if (cluster.numRacks < 1) {
-646retu

hbase-site git commit: INFRA-10751 Empty commit

2018-07-22 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site df8fd1d31 -> 0700ce287


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/0700ce28
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/0700ce28
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/0700ce28

Branch: refs/heads/asf-site
Commit: 0700ce287c6a07afc00aa0fb901c4c9d3756f43c
Parents: df8fd1d
Author: jenkins 
Authored: Sun Jul 22 09:30:57 2018 +
Committer: jenkins 
Committed: Sun Jul 22 09:30:57 2018 +

--

--




[51/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/df8fd1d3
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/df8fd1d3
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/df8fd1d3

Branch: refs/heads/asf-site
Commit: df8fd1d3184376cae6648bfecdf3d5725a8ff0b1
Parents: 972a3c8
Author: jenkins 
Authored: Sun Jul 22 09:30:13 2018 +
Committer: jenkins 
Committed: Sun Jul 22 09:30:13 2018 +

--
 acid-semantics.html | 4 +-
 apache_hbase_reference_guide.pdf| 4 +-
 apidocs/org/apache/hadoop/hbase/ServerName.html |66 +-
 .../org/apache/hadoop/hbase/ServerName.html |   747 +-
 book.html   | 2 +-
 bulk-loads.html | 4 +-
 checkstyle-aggregate.html   | 32944 +
 checkstyle.rss  | 4 +-
 coc.html| 4 +-
 dependencies.html   | 4 +-
 dependency-convergence.html | 4 +-
 dependency-info.html| 4 +-
 dependency-management.html  | 4 +-
 devapidocs/constant-values.html | 6 +-
 devapidocs/index-all.html   |20 +-
 .../org/apache/hadoop/hbase/ServerName.html |   126 +-
 .../hadoop/hbase/backup/package-tree.html   | 2 +-
 .../hadoop/hbase/class-use/ServerName.html  | 8 +-
 .../hbase/class-use/TableDescriptors.html   |11 +
 .../client/class-use/ClusterConnection.html | 5 +-
 .../hbase/client/class-use/RegionInfo.html  |86 +-
 .../hadoop/hbase/client/package-tree.html   |26 +-
 .../hadoop/hbase/coprocessor/package-tree.html  | 2 +-
 .../hbase/favored/FavoredNodesManager.html  |41 +-
 .../hadoop/hbase/filter/package-tree.html   |10 +-
 .../hadoop/hbase/io/hfile/package-tree.html | 6 +-
 .../apache/hadoop/hbase/ipc/package-tree.html   | 2 +-
 .../hadoop/hbase/mapreduce/package-tree.html| 2 +-
 .../RegionStates.RegionFailedOpen.html  |22 +-
 .../hbase/master/assignment/RegionStates.html   |   229 +-
 ...asticLoadBalancer.CPRequestCostFunction.html |10 +-
 ...ochasticLoadBalancer.CandidateGenerator.html |22 +-
 ...lancer.CostFromRegionLoadAsRateFunction.html | 6 +-
 ...LoadBalancer.CostFromRegionLoadFunction.html |20 +-
 .../StochasticLoadBalancer.CostFunction.html|28 +-
 ...sticLoadBalancer.LoadCandidateGenerator.html |10 +-
 ...alancer.LocalityBasedCandidateGenerator.html |14 +-
 ...cLoadBalancer.LocalityBasedCostFunction.html |26 +-
 ...icLoadBalancer.MemStoreSizeCostFunction.html |10 +-
 ...StochasticLoadBalancer.MoveCostFunction.html |18 +-
 ...ncer.PrimaryRegionCountSkewCostFunction.html |12 +-
 ...icLoadBalancer.RackLocalityCostFunction.html |10 +-
 ...icLoadBalancer.RandomCandidateGenerator.html | 6 +-
 ...ticLoadBalancer.ReadRequestCostFunction.html |10 +-
 ...oadBalancer.RegionCountSkewCostFunction.html |12 +-
 ...alancer.RegionReplicaCandidateGenerator.html |10 +-
 ...dBalancer.RegionReplicaHostCostFunction.html |26 +-
 ...cer.RegionReplicaRackCandidateGenerator.html | 6 +-
 ...dBalancer.RegionReplicaRackCostFunction.html |12 +-
 ...LoadBalancer.ServerLocalityCostFunction.html |10 +-
 ...asticLoadBalancer.StoreFileCostFunction.html |10 +-
 ...asticLoadBalancer.TableSkewCostFunction.html |10 +-
 ...icLoadBalancer.WriteRequestCostFunction.html |10 +-
 .../master/balancer/StochasticLoadBalancer.html |10 +-
 .../hadoop/hbase/master/package-tree.html   | 6 +-
 .../procedure/RestoreSnapshotProcedure.html |   154 +-
 .../procedure/class-use/MasterProcedureEnv.html |70 +-
 .../hbase/master/procedure/package-tree.html| 2 +-
 .../org/apache/hadoop/hbase/package-tree.html   |18 +-
 .../hadoop/hbase/procedure2/ProcedureDeque.html | 2 +-
 .../hadoop/hbase/procedure2/package-tree.html   | 6 +-
 .../hadoop/hbase/quotas/package-tree.html   | 6 +-
 .../hadoop/hbase/regionserver/package-tree.html |16 +-
 .../regionserver/querymatcher/package-tree.html | 2 +-
 .../hbase/regionserver/wal/package-tree.html| 2 +-
 ...icationEndpoint.RegionReplicaOutputSink.html |26 +-
 ...ionEndpoint.RegionReplicaReplayCallable.html |12 +-
 ...icationEndpoint.RegionReplicaSinkWriter.html |45 +-
 ...ReplicationEndpoint.RetryingRpcCallable.html |12 +-
 .../RegionReplicaReplicationEndpoint.html   |40 +-
 ...icationEndpoint.RegionReplicaOutputSink.html | 5 +-
 .../hbase/security/access/package-tree.html | 4 +-
 .../hadoop/hbase/security/package-tre

[19/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.PrimaryRegionCountSkewCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.PrimaryRegionCountSkewCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.PrimaryRegionCountSkewCostFunction.html
index 233dba3..91b9055 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.PrimaryRegionCountSkewCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.PrimaryRegionCountSkewCostFunction.html
@@ -540,1205 +540,1204 @@
 532  
sm.getRegionMetrics().forEach((byte[] regionName, RegionMetrics rm) -> {
 533Deque 
rLoads = oldLoads.get(Bytes.toString(regionName));
 534if (rLoads == null) {
-535  // There was nothing there
-536  rLoads = new 
ArrayDeque<>();
-537} else if (rLoads.size() >= 
numRegionLoadsToRemember) {
-538  rLoads.remove();
-539}
-540rLoads.add(new 
BalancerRegionLoad(rm));
-541
loads.put(Bytes.toString(regionName), rLoads);
-542  });
-543});
-544
-545for(CostFromRegionLoadFunction cost : 
regionLoadFunctions) {
-546  cost.setLoads(loads);
-547}
-548  }
-549
-550  protected void initCosts(Cluster 
cluster) {
-551for (CostFunction c:costFunctions) 
{
-552  c.init(cluster);
-553}
-554  }
-555
-556  protected void 
updateCostsWithAction(Cluster cluster, Action action) {
-557for (CostFunction c : costFunctions) 
{
-558  c.postAction(action);
-559}
-560  }
-561
-562  /**
-563   * Get the names of the cost 
functions
-564   */
-565  public String[] getCostFunctionNames() 
{
-566if (costFunctions == null) return 
null;
-567String[] ret = new 
String[costFunctions.length];
-568for (int i = 0; i < 
costFunctions.length; i++) {
-569  CostFunction c = 
costFunctions[i];
-570  ret[i] = 
c.getClass().getSimpleName();
-571}
-572
-573return ret;
-574  }
-575
-576  /**
-577   * This is the main cost function.  It 
will compute a cost associated with a proposed cluster
-578   * state.  All different costs will be 
combined with their multipliers to produce a double cost.
-579   *
-580   * @param cluster The state of the 
cluster
-581   * @param previousCost the previous 
cost. This is used as an early out.
-582   * @return a double of a cost 
associated with the proposed cluster state.  This cost is an
-583   * aggregate of all individual 
cost functions.
-584   */
-585  protected double computeCost(Cluster 
cluster, double previousCost) {
-586double total = 0;
-587
-588for (int i = 0; i < 
costFunctions.length; i++) {
-589  CostFunction c = 
costFunctions[i];
-590  this.tempFunctionCosts[i] = 0.0;
-591
-592  if (c.getMultiplier() <= 0) {
-593continue;
-594  }
-595
-596  Float multiplier = 
c.getMultiplier();
-597  Double cost = c.cost();
-598
-599  this.tempFunctionCosts[i] = 
multiplier*cost;
-600  total += 
this.tempFunctionCosts[i];
-601
-602  if (total > previousCost) {
-603break;
-604  }
-605}
-606
-607return total;
-608  }
-609
-610  /** Generates a candidate action to be 
applied to the cluster for cost function search */
-611  abstract static class 
CandidateGenerator {
-612abstract Cluster.Action 
generate(Cluster cluster);
-613
-614/**
-615 * From a list of regions pick a 
random one. Null can be returned which
-616 * {@link 
StochasticLoadBalancer#balanceCluster(Map)} recognize as signal to try a region 
move
-617 * rather than swap.
-618 *
-619 * @param clusterThe state of 
the cluster
-620 * @param server index of the 
server
-621 * @param chanceOfNoSwap Chance that 
this will decide to try a move rather
-622 *   than a 
swap.
-623 * @return a random {@link 
RegionInfo} or null if an asymmetrical move is
-624 * suggested.
-625 */
-626protected int 
pickRandomRegion(Cluster cluster, int server, double chanceOfNoSwap) {
-627  // Check to see if this is just a 
move.
-628  if 
(cluster.regionsPerServer[server].length == 0 || RANDOM.nextFloat() < 
chanceOfNoSwap) {
-629// signal a move only.
-630return -1;
-631  }
-632  int rand = 
RANDOM.nextInt(cluster.regionsPerServer[server].length);
-633  return 
cluster.regionsPerServer[server][rand];
-634
-635}
-636protected int 
pickRandomServer(Cluster cluster) {
-637  if (cluster.numServers < 1) {
-638return -1;
-639  }
-640
-641  return 
RANDOM.nextInt(cluster.numServers);
-642}
-643
-644protected int pickRandomRack(Cluster 
cluster) {
-645

[14/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaCandidateGenerator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaCandidateGenerator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaCandidateGenerator.html
index 233dba3..91b9055 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaCandidateGenerator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaCandidateGenerator.html
@@ -540,1205 +540,1204 @@
 532  
sm.getRegionMetrics().forEach((byte[] regionName, RegionMetrics rm) -> {
 533Deque 
rLoads = oldLoads.get(Bytes.toString(regionName));
 534if (rLoads == null) {
-535  // There was nothing there
-536  rLoads = new 
ArrayDeque<>();
-537} else if (rLoads.size() >= 
numRegionLoadsToRemember) {
-538  rLoads.remove();
-539}
-540rLoads.add(new 
BalancerRegionLoad(rm));
-541
loads.put(Bytes.toString(regionName), rLoads);
-542  });
-543});
-544
-545for(CostFromRegionLoadFunction cost : 
regionLoadFunctions) {
-546  cost.setLoads(loads);
-547}
-548  }
-549
-550  protected void initCosts(Cluster 
cluster) {
-551for (CostFunction c:costFunctions) 
{
-552  c.init(cluster);
-553}
-554  }
-555
-556  protected void 
updateCostsWithAction(Cluster cluster, Action action) {
-557for (CostFunction c : costFunctions) 
{
-558  c.postAction(action);
-559}
-560  }
-561
-562  /**
-563   * Get the names of the cost 
functions
-564   */
-565  public String[] getCostFunctionNames() 
{
-566if (costFunctions == null) return 
null;
-567String[] ret = new 
String[costFunctions.length];
-568for (int i = 0; i < 
costFunctions.length; i++) {
-569  CostFunction c = 
costFunctions[i];
-570  ret[i] = 
c.getClass().getSimpleName();
-571}
-572
-573return ret;
-574  }
-575
-576  /**
-577   * This is the main cost function.  It 
will compute a cost associated with a proposed cluster
-578   * state.  All different costs will be 
combined with their multipliers to produce a double cost.
-579   *
-580   * @param cluster The state of the 
cluster
-581   * @param previousCost the previous 
cost. This is used as an early out.
-582   * @return a double of a cost 
associated with the proposed cluster state.  This cost is an
-583   * aggregate of all individual 
cost functions.
-584   */
-585  protected double computeCost(Cluster 
cluster, double previousCost) {
-586double total = 0;
-587
-588for (int i = 0; i < 
costFunctions.length; i++) {
-589  CostFunction c = 
costFunctions[i];
-590  this.tempFunctionCosts[i] = 0.0;
-591
-592  if (c.getMultiplier() <= 0) {
-593continue;
-594  }
-595
-596  Float multiplier = 
c.getMultiplier();
-597  Double cost = c.cost();
-598
-599  this.tempFunctionCosts[i] = 
multiplier*cost;
-600  total += 
this.tempFunctionCosts[i];
-601
-602  if (total > previousCost) {
-603break;
-604  }
-605}
-606
-607return total;
-608  }
-609
-610  /** Generates a candidate action to be 
applied to the cluster for cost function search */
-611  abstract static class 
CandidateGenerator {
-612abstract Cluster.Action 
generate(Cluster cluster);
-613
-614/**
-615 * From a list of regions pick a 
random one. Null can be returned which
-616 * {@link 
StochasticLoadBalancer#balanceCluster(Map)} recognize as signal to try a region 
move
-617 * rather than swap.
-618 *
-619 * @param clusterThe state of 
the cluster
-620 * @param server index of the 
server
-621 * @param chanceOfNoSwap Chance that 
this will decide to try a move rather
-622 *   than a 
swap.
-623 * @return a random {@link 
RegionInfo} or null if an asymmetrical move is
-624 * suggested.
-625 */
-626protected int 
pickRandomRegion(Cluster cluster, int server, double chanceOfNoSwap) {
-627  // Check to see if this is just a 
move.
-628  if 
(cluster.regionsPerServer[server].length == 0 || RANDOM.nextFloat() < 
chanceOfNoSwap) {
-629// signal a move only.
-630return -1;
-631  }
-632  int rand = 
RANDOM.nextInt(cluster.regionsPerServer[server].length);
-633  return 
cluster.regionsPerServer[server][rand];
-634
-635}
-636protected int 
pickRandomServer(Cluster cluster) {
-637  if (cluster.numServers < 1) {
-638return -1;
-639  }
-640
-641  return 
RANDOM.nextInt(cluster.numServers);
-642}
-643
-644protected int pickRandomRack(Cluster 
cluster) {
-645  if (clust

[35/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateStampComparator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateStampComparator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateStampComparator.html
index 5f41fe7..c8158b5 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateStampComparator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateStampComparator.html
@@ -524,636 +524,639 @@
 516}
 517  }
 518
-519  ArrayList 
getTableRegionStateNodes(final TableName tableName) {
-520final 
ArrayList regions = new 
ArrayList();
-521for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
-522  if 
(!node.getTable().equals(tableName)) break;
-523  regions.add(node);
-524}
-525return regions;
-526  }
-527
-528  ArrayList 
getTableRegionStates(final TableName tableName) {
-529final ArrayList 
regions = new ArrayList();
-530for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
-531  if 
(!node.getTable().equals(tableName)) break;
-532  
regions.add(node.toRegionState());
-533}
-534return regions;
-535  }
-536
-537  ArrayList 
getTableRegionsInfo(final TableName tableName) {
-538final ArrayList 
regions = new ArrayList();
-539for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
-540  if 
(!node.getTable().equals(tableName)) break;
-541  
regions.add(node.getRegionInfo());
-542}
-543return regions;
-544  }
-545
-546  Collection 
getRegionStateNodes() {
-547return regionsMap.values();
+519  public void deleteRegions(final 
List regionInfos) {
+520
regionInfos.forEach(this::deleteRegion);
+521  }
+522
+523  ArrayList 
getTableRegionStateNodes(final TableName tableName) {
+524final 
ArrayList regions = new 
ArrayList();
+525for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
+526  if 
(!node.getTable().equals(tableName)) break;
+527  regions.add(node);
+528}
+529return regions;
+530  }
+531
+532  ArrayList 
getTableRegionStates(final TableName tableName) {
+533final ArrayList 
regions = new ArrayList();
+534for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
+535  if 
(!node.getTable().equals(tableName)) break;
+536  
regions.add(node.toRegionState());
+537}
+538return regions;
+539  }
+540
+541  ArrayList 
getTableRegionsInfo(final TableName tableName) {
+542final ArrayList 
regions = new ArrayList();
+543for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
+544  if 
(!node.getTable().equals(tableName)) break;
+545  
regions.add(node.getRegionInfo());
+546}
+547return regions;
 548  }
 549
-550  public ArrayList 
getRegionStates() {
-551final ArrayList 
regions = new ArrayList(regionsMap.size());
-552for (RegionStateNode node: 
regionsMap.values()) {
-553  
regions.add(node.toRegionState());
-554}
-555return regions;
-556  }
-557
-558  // 
==
-559  //  RegionState helpers
-560  // 
==
-561  public RegionState getRegionState(final 
RegionInfo regionInfo) {
-562RegionStateNode regionStateNode = 
getRegionStateNode(regionInfo);
-563return regionStateNode == null ? null 
: regionStateNode.toRegionState();
-564  }
-565
-566  public RegionState getRegionState(final 
String encodedRegionName) {
-567// TODO: Need a map  but it is just dispatch merge...
-568for (RegionStateNode node: 
regionsMap.values()) {
-569  if 
(node.getRegionInfo().getEncodedName().equals(encodedRegionName)) {
-570return node.toRegionState();
-571  }
-572}
-573return null;
-574  }
-575
-576  // 

-577  //  TODO: helpers
-578  // 

-579  public boolean 
hasTableRegionStates(final TableName tableName) {
-580// TODO
-581return 
!getTableRegionStates(tableName).isEmpty();
-582  }
-583
-584  /**
-585   * @return Return online regions of 
table; does not include OFFLINE o

[04/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureDeque.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureDeque.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureDeque.html
index 7766509..1b983c2 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureDeque.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureDeque.html
@@ -39,7 +39,11 @@
 031 */
 032@InterfaceAudience.Private
 033public class ProcedureDeque extends 
ArrayDeque {
-034}
+034  public ProcedureDeque() {
+035// Default is 16 for a list that is 
rarely used; elements will resize if too small.
+036super(2);
+037  }
+038}
 
 
 



[31/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.html
index 5f41fe7..c8158b5 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.html
@@ -524,636 +524,639 @@
 516}
 517  }
 518
-519  ArrayList 
getTableRegionStateNodes(final TableName tableName) {
-520final 
ArrayList regions = new 
ArrayList();
-521for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
-522  if 
(!node.getTable().equals(tableName)) break;
-523  regions.add(node);
-524}
-525return regions;
-526  }
-527
-528  ArrayList 
getTableRegionStates(final TableName tableName) {
-529final ArrayList 
regions = new ArrayList();
-530for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
-531  if 
(!node.getTable().equals(tableName)) break;
-532  
regions.add(node.toRegionState());
-533}
-534return regions;
-535  }
-536
-537  ArrayList 
getTableRegionsInfo(final TableName tableName) {
-538final ArrayList 
regions = new ArrayList();
-539for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
-540  if 
(!node.getTable().equals(tableName)) break;
-541  
regions.add(node.getRegionInfo());
-542}
-543return regions;
-544  }
-545
-546  Collection 
getRegionStateNodes() {
-547return regionsMap.values();
+519  public void deleteRegions(final 
List regionInfos) {
+520
regionInfos.forEach(this::deleteRegion);
+521  }
+522
+523  ArrayList 
getTableRegionStateNodes(final TableName tableName) {
+524final 
ArrayList regions = new 
ArrayList();
+525for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
+526  if 
(!node.getTable().equals(tableName)) break;
+527  regions.add(node);
+528}
+529return regions;
+530  }
+531
+532  ArrayList 
getTableRegionStates(final TableName tableName) {
+533final ArrayList 
regions = new ArrayList();
+534for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
+535  if 
(!node.getTable().equals(tableName)) break;
+536  
regions.add(node.toRegionState());
+537}
+538return regions;
+539  }
+540
+541  ArrayList 
getTableRegionsInfo(final TableName tableName) {
+542final ArrayList 
regions = new ArrayList();
+543for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
+544  if 
(!node.getTable().equals(tableName)) break;
+545  
regions.add(node.getRegionInfo());
+546}
+547return regions;
 548  }
 549
-550  public ArrayList 
getRegionStates() {
-551final ArrayList 
regions = new ArrayList(regionsMap.size());
-552for (RegionStateNode node: 
regionsMap.values()) {
-553  
regions.add(node.toRegionState());
-554}
-555return regions;
-556  }
-557
-558  // 
==
-559  //  RegionState helpers
-560  // 
==
-561  public RegionState getRegionState(final 
RegionInfo regionInfo) {
-562RegionStateNode regionStateNode = 
getRegionStateNode(regionInfo);
-563return regionStateNode == null ? null 
: regionStateNode.toRegionState();
-564  }
-565
-566  public RegionState getRegionState(final 
String encodedRegionName) {
-567// TODO: Need a map  but it is just dispatch merge...
-568for (RegionStateNode node: 
regionsMap.values()) {
-569  if 
(node.getRegionInfo().getEncodedName().equals(encodedRegionName)) {
-570return node.toRegionState();
-571  }
-572}
-573return null;
-574  }
-575
-576  // 

-577  //  TODO: helpers
-578  // 

-579  public boolean 
hasTableRegionStates(final TableName tableName) {
-580// TODO
-581return 
!getTableRegionStates(tableName).isEmpty();
-582  }
-583
-584  /**
-585   * @return Return online regions of 
table; does not include OFFLINE or SPLITTING regions.
-586   */
-587  public List 
getRegionsOfTable(final TableName table) {
-588return getRegionsOfTab

[09/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.StoreFileCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.StoreFileCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.StoreFileCostFunction.html
index 233dba3..91b9055 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.StoreFileCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.StoreFileCostFunction.html
@@ -540,1205 +540,1204 @@
 532  
sm.getRegionMetrics().forEach((byte[] regionName, RegionMetrics rm) -> {
 533Deque 
rLoads = oldLoads.get(Bytes.toString(regionName));
 534if (rLoads == null) {
-535  // There was nothing there
-536  rLoads = new 
ArrayDeque<>();
-537} else if (rLoads.size() >= 
numRegionLoadsToRemember) {
-538  rLoads.remove();
-539}
-540rLoads.add(new 
BalancerRegionLoad(rm));
-541
loads.put(Bytes.toString(regionName), rLoads);
-542  });
-543});
-544
-545for(CostFromRegionLoadFunction cost : 
regionLoadFunctions) {
-546  cost.setLoads(loads);
-547}
-548  }
-549
-550  protected void initCosts(Cluster 
cluster) {
-551for (CostFunction c:costFunctions) 
{
-552  c.init(cluster);
-553}
-554  }
-555
-556  protected void 
updateCostsWithAction(Cluster cluster, Action action) {
-557for (CostFunction c : costFunctions) 
{
-558  c.postAction(action);
-559}
-560  }
-561
-562  /**
-563   * Get the names of the cost 
functions
-564   */
-565  public String[] getCostFunctionNames() 
{
-566if (costFunctions == null) return 
null;
-567String[] ret = new 
String[costFunctions.length];
-568for (int i = 0; i < 
costFunctions.length; i++) {
-569  CostFunction c = 
costFunctions[i];
-570  ret[i] = 
c.getClass().getSimpleName();
-571}
-572
-573return ret;
-574  }
-575
-576  /**
-577   * This is the main cost function.  It 
will compute a cost associated with a proposed cluster
-578   * state.  All different costs will be 
combined with their multipliers to produce a double cost.
-579   *
-580   * @param cluster The state of the 
cluster
-581   * @param previousCost the previous 
cost. This is used as an early out.
-582   * @return a double of a cost 
associated with the proposed cluster state.  This cost is an
-583   * aggregate of all individual 
cost functions.
-584   */
-585  protected double computeCost(Cluster 
cluster, double previousCost) {
-586double total = 0;
-587
-588for (int i = 0; i < 
costFunctions.length; i++) {
-589  CostFunction c = 
costFunctions[i];
-590  this.tempFunctionCosts[i] = 0.0;
-591
-592  if (c.getMultiplier() <= 0) {
-593continue;
-594  }
-595
-596  Float multiplier = 
c.getMultiplier();
-597  Double cost = c.cost();
-598
-599  this.tempFunctionCosts[i] = 
multiplier*cost;
-600  total += 
this.tempFunctionCosts[i];
-601
-602  if (total > previousCost) {
-603break;
-604  }
-605}
-606
-607return total;
-608  }
-609
-610  /** Generates a candidate action to be 
applied to the cluster for cost function search */
-611  abstract static class 
CandidateGenerator {
-612abstract Cluster.Action 
generate(Cluster cluster);
-613
-614/**
-615 * From a list of regions pick a 
random one. Null can be returned which
-616 * {@link 
StochasticLoadBalancer#balanceCluster(Map)} recognize as signal to try a region 
move
-617 * rather than swap.
-618 *
-619 * @param clusterThe state of 
the cluster
-620 * @param server index of the 
server
-621 * @param chanceOfNoSwap Chance that 
this will decide to try a move rather
-622 *   than a 
swap.
-623 * @return a random {@link 
RegionInfo} or null if an asymmetrical move is
-624 * suggested.
-625 */
-626protected int 
pickRandomRegion(Cluster cluster, int server, double chanceOfNoSwap) {
-627  // Check to see if this is just a 
move.
-628  if 
(cluster.regionsPerServer[server].length == 0 || RANDOM.nextFloat() < 
chanceOfNoSwap) {
-629// signal a move only.
-630return -1;
-631  }
-632  int rand = 
RANDOM.nextInt(cluster.regionsPerServer[server].length);
-633  return 
cluster.regionsPerServer[server][rand];
-634
-635}
-636protected int 
pickRandomServer(Cluster cluster) {
-637  if (cluster.numServers < 1) {
-638return -1;
-639  }
-640
-641  return 
RANDOM.nextInt(cluster.numServers);
-642}
-643
-644protected int pickRandomRack(Cluster 
cluster) {
-645  if (cluster.numRacks < 1) {
-646return -1;
-647

[39/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/ServerName.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/ServerName.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/ServerName.html
index f9b4f3e..88e84b4 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/ServerName.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/ServerName.html
@@ -35,156 +35,156 @@
 027import 
org.apache.hadoop.hbase.net.Address;
 028import 
org.apache.hadoop.hbase.util.Addressing;
 029import 
org.apache.hadoop.hbase.util.Bytes;
-030import 
org.apache.yetus.audience.InterfaceAudience;
-031
+030import 
org.apache.hbase.thirdparty.com.google.common.collect.Interner;
+031import 
org.apache.hbase.thirdparty.com.google.common.collect.Interners;
 032import 
org.apache.hbase.thirdparty.com.google.common.net.InetAddresses;
-033
+033import 
org.apache.yetus.audience.InterfaceAudience;
 034
-035/**
-036 * Name of a particular incarnation of an 
HBase Server.
-037 * A {@link ServerName} is used uniquely 
identifying a server instance in a cluster and is made
-038 * of the combination of hostname, port, 
and startcode.  The startcode distinguishes restarted
-039 * servers on same hostname and port 
(startcode is usually timestamp of server startup). The
-040 * {@link #toString()} format of 
ServerName is safe to use in the  filesystem and as znode name
-041 * up in ZooKeeper.  Its format is:
-042 * <hostname> 
'{@link #SERVERNAME_SEPARATOR}' <port>
-043 * '{@link #SERVERNAME_SEPARATOR}' 
<startcode>.
-044 * For example, if hostname is 
www.example.org, port is 
1234,
-045 * and the startcode for the regionserver 
is 1212121212, then
-046 * the {@link #toString()} would be 
www.example.org,1234,1212121212.
-047 *
-048 * 

You can obtain a versioned serialized form of this class by calling -049 * {@link #getVersionedBytes()}. To deserialize, call -050 * {@link #parseVersionedServerName(byte[])}. -051 * -052 *

Use {@link #getAddress()} to obtain the Server hostname + port -053 * (Endpoint/Socket Address). -054 * -055 *

Immutable. -056 */ -057@InterfaceAudience.Public -058public class ServerName implements Comparable, Serializable { -059 private static final long serialVersionUID = 1367463982557264981L; -060 -061 /** -062 * Version for this class. -063 * Its a short rather than a byte so I can for sure distinguish between this -064 * version of this class and the version previous to this which did not have -065 * a version. -066 */ -067 private static final short VERSION = 0; -068 static final byte [] VERSION_BYTES = Bytes.toBytes(VERSION); -069 -070 /** -071 * What to use if no startcode supplied. -072 */ -073 public static final int NON_STARTCODE = -1; -074 -075 /** -076 * This character is used as separator between server hostname, port and -077 * startcode. -078 */ -079 public static final String SERVERNAME_SEPARATOR = ","; -080 -081 public static final Pattern SERVERNAME_PATTERN = -082Pattern.compile("[^" + SERVERNAME_SEPARATOR + "]+" + -083 SERVERNAME_SEPARATOR + Addressing.VALID_PORT_REGEX + -084 SERVERNAME_SEPARATOR + Addressing.VALID_PORT_REGEX + "$"); -085 -086 /** -087 * What to use if server name is unknown. -088 */ -089 public static final String UNKNOWN_SERVERNAME = "#unknown#"; -090 -091 private final String servername; -092 private final long startcode; -093 private transient Address address; -094 -095 /** -096 * Cached versioned bytes of this ServerName instance. -097 * @see #getVersionedBytes() -098 */ -099 private byte [] bytes; -100 public static final List EMPTY_SERVER_LIST = new ArrayList<>(0); -101 -102 protected ServerName(final String hostname, final int port, final long startcode) { -103this(Address.fromParts(hostname, port), startcode); -104 } -105 -106 private ServerName(final Address address, final long startcode) { -107// Use HostAndPort to host port and hostname. Does validation and can do ipv6 -108this.address = address; -109this.startcode = startcode; -110this.servername = getServerName(this.address.getHostname(), -111this.address.getPort(), startcode); -112 } -113 -114 private ServerName(final String serverName) { -115this(parseHostname(serverName), parsePort(serverName), -116 parseStartcode(serverName)); -117 } -118 -119 private ServerName(final String hostAndPort, final long startCode) { -120this(Address.fromString(hostAndPort), startCode); +035 +036 +037/** +038 * Name of a particular incarnation of an HBase Server. +039 * A {@link ServerName} is used uniquely identifying a server instance in a cluster and is made +040 * of the combination of hostname, port, and startcode. The startcode distinguishes re


[50/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/apidocs/src-html/org/apache/hadoop/hbase/ServerName.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/ServerName.html 
b/apidocs/src-html/org/apache/hadoop/hbase/ServerName.html
index f9b4f3e..88e84b4 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/ServerName.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/ServerName.html
@@ -35,156 +35,156 @@
 027import 
org.apache.hadoop.hbase.net.Address;
 028import 
org.apache.hadoop.hbase.util.Addressing;
 029import 
org.apache.hadoop.hbase.util.Bytes;
-030import 
org.apache.yetus.audience.InterfaceAudience;
-031
+030import 
org.apache.hbase.thirdparty.com.google.common.collect.Interner;
+031import 
org.apache.hbase.thirdparty.com.google.common.collect.Interners;
 032import 
org.apache.hbase.thirdparty.com.google.common.net.InetAddresses;
-033
+033import 
org.apache.yetus.audience.InterfaceAudience;
 034
-035/**
-036 * Name of a particular incarnation of an 
HBase Server.
-037 * A {@link ServerName} is used uniquely 
identifying a server instance in a cluster and is made
-038 * of the combination of hostname, port, 
and startcode.  The startcode distinguishes restarted
-039 * servers on same hostname and port 
(startcode is usually timestamp of server startup). The
-040 * {@link #toString()} format of 
ServerName is safe to use in the  filesystem and as znode name
-041 * up in ZooKeeper.  Its format is:
-042 * <hostname> 
'{@link #SERVERNAME_SEPARATOR}' <port>
-043 * '{@link #SERVERNAME_SEPARATOR}' 
<startcode>.
-044 * For example, if hostname is 
www.example.org, port is 
1234,
-045 * and the startcode for the regionserver 
is 1212121212, then
-046 * the {@link #toString()} would be 
www.example.org,1234,1212121212.
-047 *
-048 * 

You can obtain a versioned serialized form of this class by calling -049 * {@link #getVersionedBytes()}. To deserialize, call -050 * {@link #parseVersionedServerName(byte[])}. -051 * -052 *

Use {@link #getAddress()} to obtain the Server hostname + port -053 * (Endpoint/Socket Address). -054 * -055 *

Immutable. -056 */ -057@InterfaceAudience.Public -058public class ServerName implements Comparable, Serializable { -059 private static final long serialVersionUID = 1367463982557264981L; -060 -061 /** -062 * Version for this class. -063 * Its a short rather than a byte so I can for sure distinguish between this -064 * version of this class and the version previous to this which did not have -065 * a version. -066 */ -067 private static final short VERSION = 0; -068 static final byte [] VERSION_BYTES = Bytes.toBytes(VERSION); -069 -070 /** -071 * What to use if no startcode supplied. -072 */ -073 public static final int NON_STARTCODE = -1; -074 -075 /** -076 * This character is used as separator between server hostname, port and -077 * startcode. -078 */ -079 public static final String SERVERNAME_SEPARATOR = ","; -080 -081 public static final Pattern SERVERNAME_PATTERN = -082Pattern.compile("[^" + SERVERNAME_SEPARATOR + "]+" + -083 SERVERNAME_SEPARATOR + Addressing.VALID_PORT_REGEX + -084 SERVERNAME_SEPARATOR + Addressing.VALID_PORT_REGEX + "$"); -085 -086 /** -087 * What to use if server name is unknown. -088 */ -089 public static final String UNKNOWN_SERVERNAME = "#unknown#"; -090 -091 private final String servername; -092 private final long startcode; -093 private transient Address address; -094 -095 /** -096 * Cached versioned bytes of this ServerName instance. -097 * @see #getVersionedBytes() -098 */ -099 private byte [] bytes; -100 public static final List EMPTY_SERVER_LIST = new ArrayList<>(0); -101 -102 protected ServerName(final String hostname, final int port, final long startcode) { -103this(Address.fromParts(hostname, port), startcode); -104 } -105 -106 private ServerName(final Address address, final long startcode) { -107// Use HostAndPort to host port and hostname. Does validation and can do ipv6 -108this.address = address; -109this.startcode = startcode; -110this.servername = getServerName(this.address.getHostname(), -111this.address.getPort(), startcode); -112 } -113 -114 private ServerName(final String serverName) { -115this(parseHostname(serverName), parsePort(serverName), -116 parseStartcode(serverName)); -117 } -118 -119 private ServerName(final String hostAndPort, final long startCode) { -120this(Address.fromString(hostAndPort), startCode); +035 +036 +037/** +038 * Name of a particular incarnation of an HBase Server. +039 * A {@link ServerName} is used uniquely identifying a server instance in a cluster and is made +040 * of the combination of hostname, port, and startcode. The startcode distinguishes restarted +041 *


[13/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaHostCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaHostCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaHostCostFunction.html
index 233dba3..91b9055 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaHostCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaHostCostFunction.html
@@ -540,1205 +540,1204 @@
 532  
sm.getRegionMetrics().forEach((byte[] regionName, RegionMetrics rm) -> {
 533Deque 
rLoads = oldLoads.get(Bytes.toString(regionName));
 534if (rLoads == null) {
-535  // There was nothing there
-536  rLoads = new 
ArrayDeque<>();
-537} else if (rLoads.size() >= 
numRegionLoadsToRemember) {
-538  rLoads.remove();
-539}
-540rLoads.add(new 
BalancerRegionLoad(rm));
-541
loads.put(Bytes.toString(regionName), rLoads);
-542  });
-543});
-544
-545for(CostFromRegionLoadFunction cost : 
regionLoadFunctions) {
-546  cost.setLoads(loads);
-547}
-548  }
-549
-550  protected void initCosts(Cluster 
cluster) {
-551for (CostFunction c:costFunctions) 
{
-552  c.init(cluster);
-553}
-554  }
-555
-556  protected void 
updateCostsWithAction(Cluster cluster, Action action) {
-557for (CostFunction c : costFunctions) 
{
-558  c.postAction(action);
-559}
-560  }
-561
-562  /**
-563   * Get the names of the cost 
functions
-564   */
-565  public String[] getCostFunctionNames() 
{
-566if (costFunctions == null) return 
null;
-567String[] ret = new 
String[costFunctions.length];
-568for (int i = 0; i < 
costFunctions.length; i++) {
-569  CostFunction c = 
costFunctions[i];
-570  ret[i] = 
c.getClass().getSimpleName();
-571}
-572
-573return ret;
-574  }
-575
-576  /**
-577   * This is the main cost function.  It 
will compute a cost associated with a proposed cluster
-578   * state.  All different costs will be 
combined with their multipliers to produce a double cost.
-579   *
-580   * @param cluster The state of the 
cluster
-581   * @param previousCost the previous 
cost. This is used as an early out.
-582   * @return a double of a cost 
associated with the proposed cluster state.  This cost is an
-583   * aggregate of all individual 
cost functions.
-584   */
-585  protected double computeCost(Cluster 
cluster, double previousCost) {
-586double total = 0;
-587
-588for (int i = 0; i < 
costFunctions.length; i++) {
-589  CostFunction c = 
costFunctions[i];
-590  this.tempFunctionCosts[i] = 0.0;
-591
-592  if (c.getMultiplier() <= 0) {
-593continue;
-594  }
-595
-596  Float multiplier = 
c.getMultiplier();
-597  Double cost = c.cost();
-598
-599  this.tempFunctionCosts[i] = 
multiplier*cost;
-600  total += 
this.tempFunctionCosts[i];
-601
-602  if (total > previousCost) {
-603break;
-604  }
-605}
-606
-607return total;
-608  }
-609
-610  /** Generates a candidate action to be 
applied to the cluster for cost function search */
-611  abstract static class 
CandidateGenerator {
-612abstract Cluster.Action 
generate(Cluster cluster);
-613
-614/**
-615 * From a list of regions pick a 
random one. Null can be returned which
-616 * {@link 
StochasticLoadBalancer#balanceCluster(Map)} recognize as signal to try a region 
move
-617 * rather than swap.
-618 *
-619 * @param clusterThe state of 
the cluster
-620 * @param server index of the 
server
-621 * @param chanceOfNoSwap Chance that 
this will decide to try a move rather
-622 *   than a 
swap.
-623 * @return a random {@link 
RegionInfo} or null if an asymmetrical move is
-624 * suggested.
-625 */
-626protected int 
pickRandomRegion(Cluster cluster, int server, double chanceOfNoSwap) {
-627  // Check to see if this is just a 
move.
-628  if 
(cluster.regionsPerServer[server].length == 0 || RANDOM.nextFloat() < 
chanceOfNoSwap) {
-629// signal a move only.
-630return -1;
-631  }
-632  int rand = 
RANDOM.nextInt(cluster.regionsPerServer[server].length);
-633  return 
cluster.regionsPerServer[server][rand];
-634
-635}
-636protected int 
pickRandomServer(Cluster cluster) {
-637  if (cluster.numServers < 1) {
-638return -1;
-639  }
-640
-641  return 
RANDOM.nextInt(cluster.numServers);
-642}
-643
-644protected int pickRandomRack(Cluster 
cluster) {
-645  if (cluster.numRack

[06/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html
index 233dba3..91b9055 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html
@@ -540,1205 +540,1204 @@
 532  
sm.getRegionMetrics().forEach((byte[] regionName, RegionMetrics rm) -> {
 533Deque 
rLoads = oldLoads.get(Bytes.toString(regionName));
 534if (rLoads == null) {
-535  // There was nothing there
-536  rLoads = new 
ArrayDeque<>();
-537} else if (rLoads.size() >= 
numRegionLoadsToRemember) {
-538  rLoads.remove();
-539}
-540rLoads.add(new 
BalancerRegionLoad(rm));
-541
loads.put(Bytes.toString(regionName), rLoads);
-542  });
-543});
-544
-545for(CostFromRegionLoadFunction cost : 
regionLoadFunctions) {
-546  cost.setLoads(loads);
-547}
-548  }
-549
-550  protected void initCosts(Cluster 
cluster) {
-551for (CostFunction c:costFunctions) 
{
-552  c.init(cluster);
-553}
-554  }
-555
-556  protected void 
updateCostsWithAction(Cluster cluster, Action action) {
-557for (CostFunction c : costFunctions) 
{
-558  c.postAction(action);
-559}
-560  }
-561
-562  /**
-563   * Get the names of the cost 
functions
-564   */
-565  public String[] getCostFunctionNames() 
{
-566if (costFunctions == null) return 
null;
-567String[] ret = new 
String[costFunctions.length];
-568for (int i = 0; i < 
costFunctions.length; i++) {
-569  CostFunction c = 
costFunctions[i];
-570  ret[i] = 
c.getClass().getSimpleName();
-571}
-572
-573return ret;
-574  }
-575
-576  /**
-577   * This is the main cost function.  It 
will compute a cost associated with a proposed cluster
-578   * state.  All different costs will be 
combined with their multipliers to produce a double cost.
-579   *
-580   * @param cluster The state of the 
cluster
-581   * @param previousCost the previous 
cost. This is used as an early out.
-582   * @return a double of a cost 
associated with the proposed cluster state.  This cost is an
-583   * aggregate of all individual 
cost functions.
-584   */
-585  protected double computeCost(Cluster 
cluster, double previousCost) {
-586double total = 0;
-587
-588for (int i = 0; i < 
costFunctions.length; i++) {
-589  CostFunction c = 
costFunctions[i];
-590  this.tempFunctionCosts[i] = 0.0;
-591
-592  if (c.getMultiplier() <= 0) {
-593continue;
-594  }
-595
-596  Float multiplier = 
c.getMultiplier();
-597  Double cost = c.cost();
-598
-599  this.tempFunctionCosts[i] = 
multiplier*cost;
-600  total += 
this.tempFunctionCosts[i];
-601
-602  if (total > previousCost) {
-603break;
-604  }
-605}
-606
-607return total;
-608  }
-609
-610  /** Generates a candidate action to be 
applied to the cluster for cost function search */
-611  abstract static class 
CandidateGenerator {
-612abstract Cluster.Action 
generate(Cluster cluster);
-613
-614/**
-615 * From a list of regions pick a 
random one. Null can be returned which
-616 * {@link 
StochasticLoadBalancer#balanceCluster(Map)} recognize as signal to try a region 
move
-617 * rather than swap.
-618 *
-619 * @param clusterThe state of 
the cluster
-620 * @param server index of the 
server
-621 * @param chanceOfNoSwap Chance that 
this will decide to try a move rather
-622 *   than a 
swap.
-623 * @return a random {@link 
RegionInfo} or null if an asymmetrical move is
-624 * suggested.
-625 */
-626protected int 
pickRandomRegion(Cluster cluster, int server, double chanceOfNoSwap) {
-627  // Check to see if this is just a 
move.
-628  if 
(cluster.regionsPerServer[server].length == 0 || RANDOM.nextFloat() < 
chanceOfNoSwap) {
-629// signal a move only.
-630return -1;
-631  }
-632  int rand = 
RANDOM.nextInt(cluster.regionsPerServer[server].length);
-633  return 
cluster.regionsPerServer[server][rand];
-634
-635}
-636protected int 
pickRandomServer(Cluster cluster) {
-637  if (cluster.numServers < 1) {
-638return -1;
-639  }
-640
-641  return 
RANDOM.nextInt(cluster.numServers);
-642}
-643
-644protected int pickRandomRack(Cluster 
cluster) {
-645  if (cluster.numRacks < 1) {
-646return -1;
-647  }
-648
-649  return 
RANDOM.nextInt(cluster.numRacks);
-650}
-651
-652protected int 
pickOtherRa

[17/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RandomCandidateGenerator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RandomCandidateGenerator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RandomCandidateGenerator.html
index 233dba3..91b9055 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RandomCandidateGenerator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RandomCandidateGenerator.html
@@ -540,1205 +540,1204 @@
 532  
sm.getRegionMetrics().forEach((byte[] regionName, RegionMetrics rm) -> {
 533Deque 
rLoads = oldLoads.get(Bytes.toString(regionName));
 534if (rLoads == null) {
-535  // There was nothing there
-536  rLoads = new 
ArrayDeque<>();
-537} else if (rLoads.size() >= 
numRegionLoadsToRemember) {
-538  rLoads.remove();
-539}
-540rLoads.add(new 
BalancerRegionLoad(rm));
-541
loads.put(Bytes.toString(regionName), rLoads);
-542  });
-543});
-544
-545for(CostFromRegionLoadFunction cost : 
regionLoadFunctions) {
-546  cost.setLoads(loads);
-547}
-548  }
-549
-550  protected void initCosts(Cluster 
cluster) {
-551for (CostFunction c:costFunctions) 
{
-552  c.init(cluster);
-553}
-554  }
-555
-556  protected void 
updateCostsWithAction(Cluster cluster, Action action) {
-557for (CostFunction c : costFunctions) 
{
-558  c.postAction(action);
-559}
-560  }
-561
-562  /**
-563   * Get the names of the cost 
functions
-564   */
-565  public String[] getCostFunctionNames() 
{
-566if (costFunctions == null) return 
null;
-567String[] ret = new 
String[costFunctions.length];
-568for (int i = 0; i < 
costFunctions.length; i++) {
-569  CostFunction c = 
costFunctions[i];
-570  ret[i] = 
c.getClass().getSimpleName();
-571}
-572
-573return ret;
-574  }
-575
-576  /**
-577   * This is the main cost function.  It 
will compute a cost associated with a proposed cluster
-578   * state.  All different costs will be 
combined with their multipliers to produce a double cost.
-579   *
-580   * @param cluster The state of the 
cluster
-581   * @param previousCost the previous 
cost. This is used as an early out.
-582   * @return a double of a cost 
associated with the proposed cluster state.  This cost is an
-583   * aggregate of all individual 
cost functions.
-584   */
-585  protected double computeCost(Cluster 
cluster, double previousCost) {
-586double total = 0;
-587
-588for (int i = 0; i < 
costFunctions.length; i++) {
-589  CostFunction c = 
costFunctions[i];
-590  this.tempFunctionCosts[i] = 0.0;
-591
-592  if (c.getMultiplier() <= 0) {
-593continue;
-594  }
-595
-596  Float multiplier = 
c.getMultiplier();
-597  Double cost = c.cost();
-598
-599  this.tempFunctionCosts[i] = 
multiplier*cost;
-600  total += 
this.tempFunctionCosts[i];
-601
-602  if (total > previousCost) {
-603break;
-604  }
-605}
-606
-607return total;
-608  }
-609
-610  /** Generates a candidate action to be 
applied to the cluster for cost function search */
-611  abstract static class 
CandidateGenerator {
-612abstract Cluster.Action 
generate(Cluster cluster);
-613
-614/**
-615 * From a list of regions pick a 
random one. Null can be returned which
-616 * {@link 
StochasticLoadBalancer#balanceCluster(Map)} recognize as signal to try a region 
move
-617 * rather than swap.
-618 *
-619 * @param clusterThe state of 
the cluster
-620 * @param server index of the 
server
-621 * @param chanceOfNoSwap Chance that 
this will decide to try a move rather
-622 *   than a 
swap.
-623 * @return a random {@link 
RegionInfo} or null if an asymmetrical move is
-624 * suggested.
-625 */
-626protected int 
pickRandomRegion(Cluster cluster, int server, double chanceOfNoSwap) {
-627  // Check to see if this is just a 
move.
-628  if 
(cluster.regionsPerServer[server].length == 0 || RANDOM.nextFloat() < 
chanceOfNoSwap) {
-629// signal a move only.
-630return -1;
-631  }
-632  int rand = 
RANDOM.nextInt(cluster.regionsPerServer[server].length);
-633  return 
cluster.regionsPerServer[server][rand];
-634
-635}
-636protected int 
pickRandomServer(Cluster cluster) {
-637  if (cluster.numServers < 1) {
-638return -1;
-639  }
-640
-641  return 
RANDOM.nextInt(cluster.numServers);
-642}
-643
-644protected int pickRandomRack(Cluster 
cluster) {
-645  if (cluster.numRacks < 1) {
-646retu

[43/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.PrimaryRegionCountSkewCostFunction.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.PrimaryRegionCountSkewCostFunction.html
 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.PrimaryRegionCountSkewCostFunction.html
index 386162a..6e644bf 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.PrimaryRegionCountSkewCostFunction.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.PrimaryRegionCountSkewCostFunction.html
@@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class StochasticLoadBalancer.PrimaryRegionCountSkewCostFunction
+static class StochasticLoadBalancer.PrimaryRegionCountSkewCostFunction
 extends StochasticLoadBalancer.CostFunction
 Compute the cost of a potential cluster state from skew in 
number of
  primary regions on a cluster.
@@ -230,7 +230,7 @@ extends 
 
 PRIMARY_REGION_COUNT_SKEW_COST_KEY
-private static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String PRIMARY_REGION_COUNT_SKEW_COST_KEY
+private static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String PRIMARY_REGION_COUNT_SKEW_COST_KEY
 
 See Also:
 Constant
 Field Values
@@ -243,7 +243,7 @@ extends 
 
 DEFAULT_PRIMARY_REGION_COUNT_SKEW_COST
-private static final float DEFAULT_PRIMARY_REGION_COUNT_SKEW_COST
+private static final float DEFAULT_PRIMARY_REGION_COUNT_SKEW_COST
 
 See Also:
 Constant
 Field Values
@@ -256,7 +256,7 @@ extends 
 
 stats
-private double[] stats
+private double[] stats
 
 
 
@@ -273,7 +273,7 @@ extends 
 
 PrimaryRegionCountSkewCostFunction
-PrimaryRegionCountSkewCostFunction(org.apache.hadoop.conf.Configuration conf)
+PrimaryRegionCountSkewCostFunction(org.apache.hadoop.conf.Configuration conf)
 
 
 
@@ -290,7 +290,7 @@ extends 
 
 cost
-double cost()
+double cost()
 
 Specified by:
 cost in
 class StochasticLoadBalancer.CostFunction

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RackLocalityCostFunction.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RackLocalityCostFunction.html
 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RackLocalityCostFunction.html
index 1030aa6..cd4a941 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RackLocalityCostFunction.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RackLocalityCostFunction.html
@@ -123,7 +123,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class StochasticLoadBalancer.RackLocalityCostFunction
+static class StochasticLoadBalancer.RackLocalityCostFunction
 extends StochasticLoadBalancer.LocalityBasedCostFunction
 
 
@@ -239,7 +239,7 @@ extends 
 
 RACK_LOCALITY_COST_KEY
-private static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String RACK_LOCALITY_COST_KEY
+private static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String RACK_LOCALITY_COST_KEY
 
 See Also:
 Constant
 Field Values
@@ -252,7 +252,7 @@ extends 
 
 DEFAULT_RACK_LOCALITY_COST
-private static final float DEFAULT_RACK_LOCALITY_COST
+private static final float DEFAULT_RACK_LOCALITY_COST
 
 See Also:
 Constant
 Field Values
@@ -273,7 +273,7 @@ extends 
 
 RackLocalityCostFunction
-public RackLocalityCostFunction(org.apache.hadoop.conf.Configuration conf,
+public RackLocalityCostFunction(org.apache.hadoop.conf.Configuration conf,
 MasterServices services)
 
 
@@ -291,7 +291,7 @@ extends 
 
 regionIndexToEntityIndex
-int regionIndexToEntityIndex(int region)
+int regionIndexToEntityIndex(int region)
 Description copied from 
class: StochasticLoadBalancer.LocalityBasedCostFunction
 Maps region to the current entity (server or rack) on which 
it is stored
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RandomCandidateGenerator.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RandomCandidateGenerator.html
 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RandomCandidateGenerator.html
index 5b1d26a..aedf61e 100644
--- 
a/d

[38/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.AssignmentProcedureEvent.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.AssignmentProcedureEvent.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.AssignmentProcedureEvent.html
index 5f41fe7..c8158b5 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.AssignmentProcedureEvent.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.AssignmentProcedureEvent.html
@@ -524,636 +524,639 @@
 516}
 517  }
 518
-519  ArrayList 
getTableRegionStateNodes(final TableName tableName) {
-520final 
ArrayList regions = new 
ArrayList();
-521for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
-522  if 
(!node.getTable().equals(tableName)) break;
-523  regions.add(node);
-524}
-525return regions;
-526  }
-527
-528  ArrayList 
getTableRegionStates(final TableName tableName) {
-529final ArrayList 
regions = new ArrayList();
-530for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
-531  if 
(!node.getTable().equals(tableName)) break;
-532  
regions.add(node.toRegionState());
-533}
-534return regions;
-535  }
-536
-537  ArrayList 
getTableRegionsInfo(final TableName tableName) {
-538final ArrayList 
regions = new ArrayList();
-539for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
-540  if 
(!node.getTable().equals(tableName)) break;
-541  
regions.add(node.getRegionInfo());
-542}
-543return regions;
-544  }
-545
-546  Collection 
getRegionStateNodes() {
-547return regionsMap.values();
+519  public void deleteRegions(final 
List regionInfos) {
+520
regionInfos.forEach(this::deleteRegion);
+521  }
+522
+523  ArrayList 
getTableRegionStateNodes(final TableName tableName) {
+524final 
ArrayList regions = new 
ArrayList();
+525for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
+526  if 
(!node.getTable().equals(tableName)) break;
+527  regions.add(node);
+528}
+529return regions;
+530  }
+531
+532  ArrayList 
getTableRegionStates(final TableName tableName) {
+533final ArrayList 
regions = new ArrayList();
+534for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
+535  if 
(!node.getTable().equals(tableName)) break;
+536  
regions.add(node.toRegionState());
+537}
+538return regions;
+539  }
+540
+541  ArrayList 
getTableRegionsInfo(final TableName tableName) {
+542final ArrayList 
regions = new ArrayList();
+543for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
+544  if 
(!node.getTable().equals(tableName)) break;
+545  
regions.add(node.getRegionInfo());
+546}
+547return regions;
 548  }
 549
-550  public ArrayList 
getRegionStates() {
-551final ArrayList 
regions = new ArrayList(regionsMap.size());
-552for (RegionStateNode node: 
regionsMap.values()) {
-553  
regions.add(node.toRegionState());
-554}
-555return regions;
-556  }
-557
-558  // 
==
-559  //  RegionState helpers
-560  // 
==
-561  public RegionState getRegionState(final 
RegionInfo regionInfo) {
-562RegionStateNode regionStateNode = 
getRegionStateNode(regionInfo);
-563return regionStateNode == null ? null 
: regionStateNode.toRegionState();
-564  }
-565
-566  public RegionState getRegionState(final 
String encodedRegionName) {
-567// TODO: Need a map  but it is just dispatch merge...
-568for (RegionStateNode node: 
regionsMap.values()) {
-569  if 
(node.getRegionInfo().getEncodedName().equals(encodedRegionName)) {
-570return node.toRegionState();
-571  }
-572}
-573return null;
-574  }
-575
-576  // 

-577  //  TODO: helpers
-578  // 

-579  public boolean 
hasTableRegionStates(final TableName tableName) {
-580// TODO
-581return 
!getTableRegionStates(tableName).isEmpty();
-582  }
-583
-584  /**
-585   * @return Return online regions of 
table; does not include OFFLINE or SPLITTIN

[26/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFromRegionLoadFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFromRegionLoadFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFromRegionLoadFunction.html
index 233dba3..91b9055 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFromRegionLoadFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFromRegionLoadFunction.html
@@ -540,1205 +540,1204 @@
 532  
sm.getRegionMetrics().forEach((byte[] regionName, RegionMetrics rm) -> {
 533Deque 
rLoads = oldLoads.get(Bytes.toString(regionName));
 534if (rLoads == null) {
-535  // There was nothing there
-536  rLoads = new 
ArrayDeque<>();
-537} else if (rLoads.size() >= 
numRegionLoadsToRemember) {
-538  rLoads.remove();
-539}
-540rLoads.add(new 
BalancerRegionLoad(rm));
-541
loads.put(Bytes.toString(regionName), rLoads);
-542  });
-543});
-544
-545for(CostFromRegionLoadFunction cost : 
regionLoadFunctions) {
-546  cost.setLoads(loads);
-547}
-548  }
-549
-550  protected void initCosts(Cluster 
cluster) {
-551for (CostFunction c:costFunctions) 
{
-552  c.init(cluster);
-553}
-554  }
-555
-556  protected void 
updateCostsWithAction(Cluster cluster, Action action) {
-557for (CostFunction c : costFunctions) 
{
-558  c.postAction(action);
-559}
-560  }
-561
-562  /**
-563   * Get the names of the cost 
functions
-564   */
-565  public String[] getCostFunctionNames() 
{
-566if (costFunctions == null) return 
null;
-567String[] ret = new 
String[costFunctions.length];
-568for (int i = 0; i < 
costFunctions.length; i++) {
-569  CostFunction c = 
costFunctions[i];
-570  ret[i] = 
c.getClass().getSimpleName();
-571}
-572
-573return ret;
-574  }
-575
-576  /**
-577   * This is the main cost function.  It 
will compute a cost associated with a proposed cluster
-578   * state.  All different costs will be 
combined with their multipliers to produce a double cost.
-579   *
-580   * @param cluster The state of the 
cluster
-581   * @param previousCost the previous 
cost. This is used as an early out.
-582   * @return a double of a cost 
associated with the proposed cluster state.  This cost is an
-583   * aggregate of all individual 
cost functions.
-584   */
-585  protected double computeCost(Cluster 
cluster, double previousCost) {
-586double total = 0;
-587
-588for (int i = 0; i < 
costFunctions.length; i++) {
-589  CostFunction c = 
costFunctions[i];
-590  this.tempFunctionCosts[i] = 0.0;
-591
-592  if (c.getMultiplier() <= 0) {
-593continue;
-594  }
-595
-596  Float multiplier = 
c.getMultiplier();
-597  Double cost = c.cost();
-598
-599  this.tempFunctionCosts[i] = 
multiplier*cost;
-600  total += 
this.tempFunctionCosts[i];
-601
-602  if (total > previousCost) {
-603break;
-604  }
-605}
-606
-607return total;
-608  }
-609
-610  /** Generates a candidate action to be 
applied to the cluster for cost function search */
-611  abstract static class 
CandidateGenerator {
-612abstract Cluster.Action 
generate(Cluster cluster);
-613
-614/**
-615 * From a list of regions pick a 
random one. Null can be returned which
-616 * {@link 
StochasticLoadBalancer#balanceCluster(Map)} recognize as signal to try a region 
move
-617 * rather than swap.
-618 *
-619 * @param clusterThe state of 
the cluster
-620 * @param server index of the 
server
-621 * @param chanceOfNoSwap Chance that 
this will decide to try a move rather
-622 *   than a 
swap.
-623 * @return a random {@link 
RegionInfo} or null if an asymmetrical move is
-624 * suggested.
-625 */
-626protected int 
pickRandomRegion(Cluster cluster, int server, double chanceOfNoSwap) {
-627  // Check to see if this is just a 
move.
-628  if 
(cluster.regionsPerServer[server].length == 0 || RANDOM.nextFloat() < 
chanceOfNoSwap) {
-629// signal a move only.
-630return -1;
-631  }
-632  int rand = 
RANDOM.nextInt(cluster.regionsPerServer[server].length);
-633  return 
cluster.regionsPerServer[server][rand];
-634
-635}
-636protected int 
pickRandomServer(Cluster cluster) {
-637  if (cluster.numServers < 1) {
-638return -1;
-639  }
-640
-641  return 
RANDOM.nextInt(cluster.numServers);
-642}
-643
-644protected int pickRandomRack(Cluster 
cluster) {
-645  if (cluster.numRacks < 1) {
-646  

[37/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionFailedOpen.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionFailedOpen.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionFailedOpen.html
index 5f41fe7..c8158b5 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionFailedOpen.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionFailedOpen.html
@@ -524,636 +524,639 @@
 516}
 517  }
 518
-519  ArrayList 
getTableRegionStateNodes(final TableName tableName) {
-520final 
ArrayList regions = new 
ArrayList();
-521for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
-522  if 
(!node.getTable().equals(tableName)) break;
-523  regions.add(node);
-524}
-525return regions;
-526  }
-527
-528  ArrayList 
getTableRegionStates(final TableName tableName) {
-529final ArrayList 
regions = new ArrayList();
-530for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
-531  if 
(!node.getTable().equals(tableName)) break;
-532  
regions.add(node.toRegionState());
-533}
-534return regions;
-535  }
-536
-537  ArrayList 
getTableRegionsInfo(final TableName tableName) {
-538final ArrayList 
regions = new ArrayList();
-539for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
-540  if 
(!node.getTable().equals(tableName)) break;
-541  
regions.add(node.getRegionInfo());
-542}
-543return regions;
-544  }
-545
-546  Collection 
getRegionStateNodes() {
-547return regionsMap.values();
+519  public void deleteRegions(final 
List regionInfos) {
+520
regionInfos.forEach(this::deleteRegion);
+521  }
+522
+523  ArrayList 
getTableRegionStateNodes(final TableName tableName) {
+524final 
ArrayList regions = new 
ArrayList();
+525for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
+526  if 
(!node.getTable().equals(tableName)) break;
+527  regions.add(node);
+528}
+529return regions;
+530  }
+531
+532  ArrayList 
getTableRegionStates(final TableName tableName) {
+533final ArrayList 
regions = new ArrayList();
+534for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
+535  if 
(!node.getTable().equals(tableName)) break;
+536  
regions.add(node.toRegionState());
+537}
+538return regions;
+539  }
+540
+541  ArrayList 
getTableRegionsInfo(final TableName tableName) {
+542final ArrayList 
regions = new ArrayList();
+543for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
+544  if 
(!node.getTable().equals(tableName)) break;
+545  
regions.add(node.getRegionInfo());
+546}
+547return regions;
 548  }
 549
-550  public ArrayList 
getRegionStates() {
-551final ArrayList 
regions = new ArrayList(regionsMap.size());
-552for (RegionStateNode node: 
regionsMap.values()) {
-553  
regions.add(node.toRegionState());
-554}
-555return regions;
-556  }
-557
-558  // 
==
-559  //  RegionState helpers
-560  // 
==
-561  public RegionState getRegionState(final 
RegionInfo regionInfo) {
-562RegionStateNode regionStateNode = 
getRegionStateNode(regionInfo);
-563return regionStateNode == null ? null 
: regionStateNode.toRegionState();
-564  }
-565
-566  public RegionState getRegionState(final 
String encodedRegionName) {
-567// TODO: Need a map  but it is just dispatch merge...
-568for (RegionStateNode node: 
regionsMap.values()) {
-569  if 
(node.getRegionInfo().getEncodedName().equals(encodedRegionName)) {
-570return node.toRegionState();
-571  }
-572}
-573return null;
-574  }
-575
-576  // 

-577  //  TODO: helpers
-578  // 

-579  public boolean 
hasTableRegionStates(final TableName tableName) {
-580// TODO
-581return 
!getTableRegionStates(tableName).isEmpty();
-582  }
-583
-584  /**
-585   * @return Return online regions of 
table; does not include OFFLINE or SPLITTING regions.
-586   */
-587  public List

[05/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.html
index b341b0d..10ab3d1 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.html
@@ -42,473 +42,534 @@
 034import 
org.apache.hadoop.hbase.TableNotFoundException;
 035import 
org.apache.hadoop.hbase.client.Connection;
 036import 
org.apache.hadoop.hbase.client.RegionInfo;
-037import 
org.apache.hadoop.hbase.client.TableDescriptor;
-038import 
org.apache.hadoop.hbase.errorhandling.ForeignException;
-039import 
org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
-040import 
org.apache.hadoop.hbase.master.MasterFileSystem;
-041import 
org.apache.hadoop.hbase.master.MetricsSnapshot;
-042import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-043import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-044import 
org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
-045import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-046import 
org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
-047import 
org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
-048import 
org.apache.hadoop.hbase.snapshot.SnapshotManifest;
-049import 
org.apache.hadoop.hbase.util.Pair;
-050import 
org.apache.yetus.audience.InterfaceAudience;
-051import org.slf4j.Logger;
-052import org.slf4j.LoggerFactory;
-053import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-054import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-055import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
-056import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RestoreSnapshotState;
-057import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
-058
-059@InterfaceAudience.Private
-060public class RestoreSnapshotProcedure
-061extends 
AbstractStateMachineTableProcedure {
-062  private static final Logger LOG = 
LoggerFactory.getLogger(RestoreSnapshotProcedure.class);
-063
-064  private TableDescriptor 
modifiedTableDescriptor;
-065  private List 
regionsToRestore = null;
-066  private List 
regionsToRemove = null;
-067  private List 
regionsToAdd = null;
-068  private Map> parentsToChildrenPairMap = new HashMap<>();
-069
-070  private SnapshotDescription snapshot;
-071  private boolean restoreAcl;
-072
-073  // Monitor
-074  private MonitoredTask monitorStatus = 
null;
-075
-076  private Boolean traceEnabled = null;
-077
-078  /**
-079   * Constructor (for failover)
-080   */
-081  public RestoreSnapshotProcedure() {
-082  }
-083
-084  public RestoreSnapshotProcedure(final 
MasterProcedureEnv env,
-085  final TableDescriptor 
tableDescriptor, final SnapshotDescription snapshot)
-086  throws HBaseIOException {
-087this(env, tableDescriptor, snapshot, 
false);
-088  }
-089  /**
-090   * Constructor
-091   * @param env MasterProcedureEnv
-092   * @param tableDescriptor the table to 
operate on
-093   * @param snapshot snapshot to restore 
from
-094   * @throws IOException
-095   */
-096  public RestoreSnapshotProcedure(
-097  final MasterProcedureEnv env,
-098  final TableDescriptor 
tableDescriptor,
-099  final SnapshotDescription 
snapshot,
-100  final boolean restoreAcl)
-101  throws HBaseIOException {
-102super(env);
-103// This is the new schema we are 
going to write out as this modification.
-104this.modifiedTableDescriptor = 
tableDescriptor;
-105preflightChecks(env, null/*Table can 
be online when restore is called?*/);
-106// Snapshot information
-107this.snapshot = snapshot;
-108this.restoreAcl = restoreAcl;
-109
-110// Monitor
-111getMonitorStatus();
-112  }
+037import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
+038import 
org.apache.hadoop.hbase.client.TableDescriptor;
+039import 
org.apache.hadoop.hbase.errorhandling.ForeignException;
+040import 
org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
+041import 
org.apache.hadoop.hbase.favored.FavoredNodesManager;
+042import 
org.apache.hadoop.hbase.master.MasterFileSystem;
+043import 
org.apache.hadoop.hbase.master.MetricsSnapshot;
+044import 
org.apache.hadoop.hbase.master.RegionState;
+045import 
org.apache.hadoop.hbase.master.assignment.AssignmentManager;
+046import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
+047import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
+048import 
org.apache.hadoop.hbase.proc

[16/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.ReadRequestCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.ReadRequestCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.ReadRequestCostFunction.html
index 233dba3..91b9055 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.ReadRequestCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.ReadRequestCostFunction.html
@@ -540,1205 +540,1204 @@
 532  
sm.getRegionMetrics().forEach((byte[] regionName, RegionMetrics rm) -> {
 533Deque 
rLoads = oldLoads.get(Bytes.toString(regionName));
 534if (rLoads == null) {
-535  // There was nothing there
-536  rLoads = new 
ArrayDeque<>();
-537} else if (rLoads.size() >= 
numRegionLoadsToRemember) {
-538  rLoads.remove();
-539}
-540rLoads.add(new 
BalancerRegionLoad(rm));
-541
loads.put(Bytes.toString(regionName), rLoads);
-542  });
-543});
-544
-545for(CostFromRegionLoadFunction cost : 
regionLoadFunctions) {
-546  cost.setLoads(loads);
-547}
-548  }
-549
-550  protected void initCosts(Cluster 
cluster) {
-551for (CostFunction c:costFunctions) 
{
-552  c.init(cluster);
-553}
-554  }
-555
-556  protected void 
updateCostsWithAction(Cluster cluster, Action action) {
-557for (CostFunction c : costFunctions) 
{
-558  c.postAction(action);
-559}
-560  }
-561
-562  /**
-563   * Get the names of the cost 
functions
-564   */
-565  public String[] getCostFunctionNames() 
{
-566if (costFunctions == null) return 
null;
-567String[] ret = new 
String[costFunctions.length];
-568for (int i = 0; i < 
costFunctions.length; i++) {
-569  CostFunction c = 
costFunctions[i];
-570  ret[i] = 
c.getClass().getSimpleName();
-571}
-572
-573return ret;
-574  }
-575
-576  /**
-577   * This is the main cost function.  It 
will compute a cost associated with a proposed cluster
-578   * state.  All different costs will be 
combined with their multipliers to produce a double cost.
-579   *
-580   * @param cluster The state of the 
cluster
-581   * @param previousCost the previous 
cost. This is used as an early out.
-582   * @return a double of a cost 
associated with the proposed cluster state.  This cost is an
-583   * aggregate of all individual 
cost functions.
-584   */
-585  protected double computeCost(Cluster 
cluster, double previousCost) {
-586double total = 0;
-587
-588for (int i = 0; i < 
costFunctions.length; i++) {
-589  CostFunction c = 
costFunctions[i];
-590  this.tempFunctionCosts[i] = 0.0;
-591
-592  if (c.getMultiplier() <= 0) {
-593continue;
-594  }
-595
-596  Float multiplier = 
c.getMultiplier();
-597  Double cost = c.cost();
-598
-599  this.tempFunctionCosts[i] = 
multiplier*cost;
-600  total += 
this.tempFunctionCosts[i];
-601
-602  if (total > previousCost) {
-603break;
-604  }
-605}
-606
-607return total;
-608  }
-609
-610  /** Generates a candidate action to be 
applied to the cluster for cost function search */
-611  abstract static class 
CandidateGenerator {
-612abstract Cluster.Action 
generate(Cluster cluster);
-613
-614/**
-615 * From a list of regions pick a 
random one. Null can be returned which
-616 * {@link 
StochasticLoadBalancer#balanceCluster(Map)} recognize as signal to try a region 
move
-617 * rather than swap.
-618 *
-619 * @param clusterThe state of 
the cluster
-620 * @param server index of the 
server
-621 * @param chanceOfNoSwap Chance that 
this will decide to try a move rather
-622 *   than a 
swap.
-623 * @return a random {@link 
RegionInfo} or null if an asymmetrical move is
-624 * suggested.
-625 */
-626protected int 
pickRandomRegion(Cluster cluster, int server, double chanceOfNoSwap) {
-627  // Check to see if this is just a 
move.
-628  if 
(cluster.regionsPerServer[server].length == 0 || RANDOM.nextFloat() < 
chanceOfNoSwap) {
-629// signal a move only.
-630return -1;
-631  }
-632  int rand = 
RANDOM.nextInt(cluster.regionsPerServer[server].length);
-633  return 
cluster.regionsPerServer[server][rand];
-634
-635}
-636protected int 
pickRandomServer(Cluster cluster) {
-637  if (cluster.numServers < 1) {
-638return -1;
-639  }
-640
-641  return 
RANDOM.nextInt(cluster.numServers);
-642}
-643
-644protected int pickRandomRack(Cluster 
cluster) {
-645  if (cluster.numRacks < 1) {
-646return -1

[45/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.html 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.html
index aa9649d..4b73973 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":9,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":9,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -294,241 +294,245 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 deleteRegion(RegionInfo regionInfo) 
 
 
+void
+deleteRegions(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List regionInfos) 
+
+
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
 getAssignedRegions() 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapMapList>>
 getAssignmentsByTable() 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapMapList>>
 getAssignmentsByTable(boolean forceByCluster)
 This is an EXPENSIVE clone.
 
 
-
+
 double
 getAverageLoad() 
 
-
+
 RegionStates.RegionFailedOpen
 getFailedOpen(RegionInfo regionInfo) 
 
-
+
 protected RegionStates.RegionStateNode
 getOrCreateRegionStateNode(RegionInfo regionInfo) 
 
-
+
 RegionStates.ServerStateNode
 getOrCreateServer(ServerName serverName)
 Be judicious calling this method.
 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">Map
 getRegionAssignments() 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapList>
 getRegionByStateOfTable(TableName tableName) 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
 getRegionFailedOpen() 
 
-
+
 ServerName
 getRegionServerOfRegion(RegionInfo regionInfo) 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
 getRegionsInTransition() 
 
-
+
 int
 getRegionsInTransitionCount()
 Get the number of regions in transition.
 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/SortedSet.html?is-external=true";
 title="class or interface in java.util">SortedSet
 getRegionsInTransitionOrderedByTimestamp() 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class

[25/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFunction.html
index 233dba3..91b9055 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFunction.html
@@ -540,1205 +540,1204 @@
 532  
sm.getRegionMetrics().forEach((byte[] regionName, RegionMetrics rm) -> {
 533Deque 
rLoads = oldLoads.get(Bytes.toString(regionName));
 534if (rLoads == null) {
-535  // There was nothing there
-536  rLoads = new 
ArrayDeque<>();
-537} else if (rLoads.size() >= 
numRegionLoadsToRemember) {
-538  rLoads.remove();
-539}
-540rLoads.add(new 
BalancerRegionLoad(rm));
-541
loads.put(Bytes.toString(regionName), rLoads);
-542  });
-543});
-544
-545for(CostFromRegionLoadFunction cost : 
regionLoadFunctions) {
-546  cost.setLoads(loads);
-547}
-548  }
-549
-550  protected void initCosts(Cluster 
cluster) {
-551for (CostFunction c:costFunctions) 
{
-552  c.init(cluster);
-553}
-554  }
-555
-556  protected void 
updateCostsWithAction(Cluster cluster, Action action) {
-557for (CostFunction c : costFunctions) 
{
-558  c.postAction(action);
-559}
-560  }
-561
-562  /**
-563   * Get the names of the cost 
functions
-564   */
-565  public String[] getCostFunctionNames() 
{
-566if (costFunctions == null) return 
null;
-567String[] ret = new 
String[costFunctions.length];
-568for (int i = 0; i < 
costFunctions.length; i++) {
-569  CostFunction c = 
costFunctions[i];
-570  ret[i] = 
c.getClass().getSimpleName();
-571}
-572
-573return ret;
-574  }
-575
-576  /**
-577   * This is the main cost function.  It 
will compute a cost associated with a proposed cluster
-578   * state.  All different costs will be 
combined with their multipliers to produce a double cost.
-579   *
-580   * @param cluster The state of the 
cluster
-581   * @param previousCost the previous 
cost. This is used as an early out.
-582   * @return a double of a cost 
associated with the proposed cluster state.  This cost is an
-583   * aggregate of all individual 
cost functions.
-584   */
-585  protected double computeCost(Cluster 
cluster, double previousCost) {
-586double total = 0;
-587
-588for (int i = 0; i < 
costFunctions.length; i++) {
-589  CostFunction c = 
costFunctions[i];
-590  this.tempFunctionCosts[i] = 0.0;
-591
-592  if (c.getMultiplier() <= 0) {
-593continue;
-594  }
-595
-596  Float multiplier = 
c.getMultiplier();
-597  Double cost = c.cost();
-598
-599  this.tempFunctionCosts[i] = 
multiplier*cost;
-600  total += 
this.tempFunctionCosts[i];
-601
-602  if (total > previousCost) {
-603break;
-604  }
-605}
-606
-607return total;
-608  }
-609
-610  /** Generates a candidate action to be 
applied to the cluster for cost function search */
-611  abstract static class 
CandidateGenerator {
-612abstract Cluster.Action 
generate(Cluster cluster);
-613
-614/**
-615 * From a list of regions pick a 
random one. Null can be returned which
-616 * {@link 
StochasticLoadBalancer#balanceCluster(Map)} recognize as signal to try a region 
move
-617 * rather than swap.
-618 *
-619 * @param clusterThe state of 
the cluster
-620 * @param server index of the 
server
-621 * @param chanceOfNoSwap Chance that 
this will decide to try a move rather
-622 *   than a 
swap.
-623 * @return a random {@link 
RegionInfo} or null if an asymmetrical move is
-624 * suggested.
-625 */
-626protected int 
pickRandomRegion(Cluster cluster, int server, double chanceOfNoSwap) {
-627  // Check to see if this is just a 
move.
-628  if 
(cluster.regionsPerServer[server].length == 0 || RANDOM.nextFloat() < 
chanceOfNoSwap) {
-629// signal a move only.
-630return -1;
-631  }
-632  int rand = 
RANDOM.nextInt(cluster.regionsPerServer[server].length);
-633  return 
cluster.regionsPerServer[server][rand];
-634
-635}
-636protected int 
pickRandomServer(Cluster cluster) {
-637  if (cluster.numServers < 1) {
-638return -1;
-639  }
-640
-641  return 
RANDOM.nextInt(cluster.numServers);
-642}
-643
-644protected int pickRandomRack(Cluster 
cluster) {
-645  if (cluster.numRacks < 1) {
-646return -1;
-647  }
-648
-649  return 
RANDOM.nextInt(clu

[48/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/checkstyle.rss
--
diff --git a/checkstyle.rss b/checkstyle.rss
index a80572d..bc1d57c 100644
--- a/checkstyle.rss
+++ b/checkstyle.rss
@@ -26,7 +26,7 @@ under the License.
 ©2007 - 2018 The Apache Software Foundation
 
   File: 3690,
- Errors: 15669,
+ Errors: 15670,
  Warnings: 0,
  Infos: 0
   
@@ -45289,7 +45289,7 @@ under the License.
   0
 
 
-  24
+  25
 
   
   

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/coc.html
--
diff --git a/coc.html b/coc.html
index 750ea56..84c9704 100644
--- a/coc.html
+++ b/coc.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – 
   Code of Conduct Policy
@@ -375,7 +375,7 @@ email to mailto:priv...@hbase.apache.org";>the priv
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-07-20
+  Last Published: 
2018-07-22
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/dependencies.html
--
diff --git a/dependencies.html b/dependencies.html
index 630d8a4..402c4d2 100644
--- a/dependencies.html
+++ b/dependencies.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Project Dependencies
 
@@ -440,7 +440,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-07-20
+  Last Published: 
2018-07-22
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/dependency-convergence.html
--
diff --git a/dependency-convergence.html b/dependency-convergence.html
index 0d93acf..e6f4edb 100644
--- a/dependency-convergence.html
+++ b/dependency-convergence.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Reactor Dependency Convergence
 
@@ -905,7 +905,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-07-20
+  Last Published: 
2018-07-22
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/dependency-info.html
--
diff --git a/dependency-info.html b/dependency-info.html
index 998d393..f64dc68 100644
--- a/dependency-info.html
+++ b/dependency-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Dependency Information
 
@@ -313,7 +313,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-07-20
+  Last Published: 
2018-07-22
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/dependency-management.html
--
diff --git a/dependency-management.html b/dependency-management.html
index 3212f22..1bcbcfd 100644
--- a/dependency-management.html
+++ b/dependency-management.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Project Dependency Management
 
@@ -1005,7 +1005,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-07-20
+  Last Published: 
2018-07-22
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/constant-values.html
--
diff --git a/devapidocs/constant-values.html b/devapidocs/constant-values.html
index 4b37b51..15c4f42 100644
--- a/devapidocs/constant-values.html
+++ b/devapidocs/constant-values.html
@@ -3789,21 +3789,21 @@
 
 public static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 date
-"Fri Jul 20 14:39:51 UTC 2018"
+"Sun Jul 22 09:20:24 UTC 2018"
 
 
 
 
 public static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 revision
-"03e596c6694a

[33/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerState.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerState.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerState.html
index 5f41fe7..c8158b5 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerState.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerState.html
@@ -524,636 +524,639 @@
 516}
 517  }
 518
-519  ArrayList 
getTableRegionStateNodes(final TableName tableName) {
-520final 
ArrayList regions = new 
ArrayList();
-521for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
-522  if 
(!node.getTable().equals(tableName)) break;
-523  regions.add(node);
-524}
-525return regions;
-526  }
-527
-528  ArrayList 
getTableRegionStates(final TableName tableName) {
-529final ArrayList 
regions = new ArrayList();
-530for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
-531  if 
(!node.getTable().equals(tableName)) break;
-532  
regions.add(node.toRegionState());
-533}
-534return regions;
-535  }
-536
-537  ArrayList 
getTableRegionsInfo(final TableName tableName) {
-538final ArrayList 
regions = new ArrayList();
-539for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
-540  if 
(!node.getTable().equals(tableName)) break;
-541  
regions.add(node.getRegionInfo());
-542}
-543return regions;
-544  }
-545
-546  Collection 
getRegionStateNodes() {
-547return regionsMap.values();
+519  public void deleteRegions(final 
List regionInfos) {
+520
regionInfos.forEach(this::deleteRegion);
+521  }
+522
+523  ArrayList 
getTableRegionStateNodes(final TableName tableName) {
+524final 
ArrayList regions = new 
ArrayList();
+525for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
+526  if 
(!node.getTable().equals(tableName)) break;
+527  regions.add(node);
+528}
+529return regions;
+530  }
+531
+532  ArrayList 
getTableRegionStates(final TableName tableName) {
+533final ArrayList 
regions = new ArrayList();
+534for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
+535  if 
(!node.getTable().equals(tableName)) break;
+536  
regions.add(node.toRegionState());
+537}
+538return regions;
+539  }
+540
+541  ArrayList 
getTableRegionsInfo(final TableName tableName) {
+542final ArrayList 
regions = new ArrayList();
+543for (RegionStateNode node: 
regionsMap.tailMap(tableName.getName()).values()) {
+544  if 
(!node.getTable().equals(tableName)) break;
+545  
regions.add(node.getRegionInfo());
+546}
+547return regions;
 548  }
 549
-550  public ArrayList 
getRegionStates() {
-551final ArrayList 
regions = new ArrayList(regionsMap.size());
-552for (RegionStateNode node: 
regionsMap.values()) {
-553  
regions.add(node.toRegionState());
-554}
-555return regions;
-556  }
-557
-558  // 
==
-559  //  RegionState helpers
-560  // 
==
-561  public RegionState getRegionState(final 
RegionInfo regionInfo) {
-562RegionStateNode regionStateNode = 
getRegionStateNode(regionInfo);
-563return regionStateNode == null ? null 
: regionStateNode.toRegionState();
-564  }
-565
-566  public RegionState getRegionState(final 
String encodedRegionName) {
-567// TODO: Need a map  but it is just dispatch merge...
-568for (RegionStateNode node: 
regionsMap.values()) {
-569  if 
(node.getRegionInfo().getEncodedName().equals(encodedRegionName)) {
-570return node.toRegionState();
-571  }
-572}
-573return null;
-574  }
-575
-576  // 

-577  //  TODO: helpers
-578  // 

-579  public boolean 
hasTableRegionStates(final TableName tableName) {
-580// TODO
-581return 
!getTableRegionStates(tableName).isEmpty();
-582  }
-583
-584  /**
-585   * @return Return online regions of 
table; does not include OFFLINE or SPLITTING regions.
-586   */
-587  public List 
getRegionsOfT

[28/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CandidateGenerator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CandidateGenerator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CandidateGenerator.html
index 233dba3..91b9055 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CandidateGenerator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CandidateGenerator.html
@@ -540,1205 +540,1204 @@
 532  
sm.getRegionMetrics().forEach((byte[] regionName, RegionMetrics rm) -> {
 533Deque 
rLoads = oldLoads.get(Bytes.toString(regionName));
 534if (rLoads == null) {
-535  // There was nothing there
-536  rLoads = new 
ArrayDeque<>();
-537} else if (rLoads.size() >= 
numRegionLoadsToRemember) {
-538  rLoads.remove();
-539}
-540rLoads.add(new 
BalancerRegionLoad(rm));
-541
loads.put(Bytes.toString(regionName), rLoads);
-542  });
-543});
-544
-545for(CostFromRegionLoadFunction cost : 
regionLoadFunctions) {
-546  cost.setLoads(loads);
-547}
-548  }
-549
-550  protected void initCosts(Cluster 
cluster) {
-551for (CostFunction c:costFunctions) 
{
-552  c.init(cluster);
-553}
-554  }
-555
-556  protected void 
updateCostsWithAction(Cluster cluster, Action action) {
-557for (CostFunction c : costFunctions) 
{
-558  c.postAction(action);
-559}
-560  }
-561
-562  /**
-563   * Get the names of the cost 
functions
-564   */
-565  public String[] getCostFunctionNames() 
{
-566if (costFunctions == null) return 
null;
-567String[] ret = new 
String[costFunctions.length];
-568for (int i = 0; i < 
costFunctions.length; i++) {
-569  CostFunction c = 
costFunctions[i];
-570  ret[i] = 
c.getClass().getSimpleName();
-571}
-572
-573return ret;
-574  }
-575
-576  /**
-577   * This is the main cost function.  It 
will compute a cost associated with a proposed cluster
-578   * state.  All different costs will be 
combined with their multipliers to produce a double cost.
-579   *
-580   * @param cluster The state of the 
cluster
-581   * @param previousCost the previous 
cost. This is used as an early out.
-582   * @return a double of a cost 
associated with the proposed cluster state.  This cost is an
-583   * aggregate of all individual 
cost functions.
-584   */
-585  protected double computeCost(Cluster 
cluster, double previousCost) {
-586double total = 0;
-587
-588for (int i = 0; i < 
costFunctions.length; i++) {
-589  CostFunction c = 
costFunctions[i];
-590  this.tempFunctionCosts[i] = 0.0;
-591
-592  if (c.getMultiplier() <= 0) {
-593continue;
-594  }
-595
-596  Float multiplier = 
c.getMultiplier();
-597  Double cost = c.cost();
-598
-599  this.tempFunctionCosts[i] = 
multiplier*cost;
-600  total += 
this.tempFunctionCosts[i];
-601
-602  if (total > previousCost) {
-603break;
-604  }
-605}
-606
-607return total;
-608  }
-609
-610  /** Generates a candidate action to be 
applied to the cluster for cost function search */
-611  abstract static class 
CandidateGenerator {
-612abstract Cluster.Action 
generate(Cluster cluster);
-613
-614/**
-615 * From a list of regions pick a 
random one. Null can be returned which
-616 * {@link 
StochasticLoadBalancer#balanceCluster(Map)} recognize as signal to try a region 
move
-617 * rather than swap.
-618 *
-619 * @param clusterThe state of 
the cluster
-620 * @param server index of the 
server
-621 * @param chanceOfNoSwap Chance that 
this will decide to try a move rather
-622 *   than a 
swap.
-623 * @return a random {@link 
RegionInfo} or null if an asymmetrical move is
-624 * suggested.
-625 */
-626protected int 
pickRandomRegion(Cluster cluster, int server, double chanceOfNoSwap) {
-627  // Check to see if this is just a 
move.
-628  if 
(cluster.regionsPerServer[server].length == 0 || RANDOM.nextFloat() < 
chanceOfNoSwap) {
-629// signal a move only.
-630return -1;
-631  }
-632  int rand = 
RANDOM.nextInt(cluster.regionsPerServer[server].length);
-633  return 
cluster.regionsPerServer[server][rand];
-634
-635}
-636protected int 
pickRandomServer(Cluster cluster) {
-637  if (cluster.numServers < 1) {
-638return -1;
-639  }
-640
-641  return 
RANDOM.nextInt(cluster.numServers);
-642}
-643
-644protected int pickRandomRack(Cluster 
cluster) {
-645  if (cluster.numRacks < 1) {
-646return -1;
-647  }
-648
-649  

[21/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MemStoreSizeCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MemStoreSizeCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MemStoreSizeCostFunction.html
index 233dba3..91b9055 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MemStoreSizeCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MemStoreSizeCostFunction.html
@@ -540,1205 +540,1204 @@
 532  
sm.getRegionMetrics().forEach((byte[] regionName, RegionMetrics rm) -> {
 533Deque 
rLoads = oldLoads.get(Bytes.toString(regionName));
 534if (rLoads == null) {
-535  // There was nothing there
-536  rLoads = new 
ArrayDeque<>();
-537} else if (rLoads.size() >= 
numRegionLoadsToRemember) {
-538  rLoads.remove();
-539}
-540rLoads.add(new 
BalancerRegionLoad(rm));
-541
loads.put(Bytes.toString(regionName), rLoads);
-542  });
-543});
-544
-545for(CostFromRegionLoadFunction cost : 
regionLoadFunctions) {
-546  cost.setLoads(loads);
-547}
-548  }
-549
-550  protected void initCosts(Cluster 
cluster) {
-551for (CostFunction c:costFunctions) 
{
-552  c.init(cluster);
-553}
-554  }
-555
-556  protected void 
updateCostsWithAction(Cluster cluster, Action action) {
-557for (CostFunction c : costFunctions) 
{
-558  c.postAction(action);
-559}
-560  }
-561
-562  /**
-563   * Get the names of the cost 
functions
-564   */
-565  public String[] getCostFunctionNames() 
{
-566if (costFunctions == null) return 
null;
-567String[] ret = new 
String[costFunctions.length];
-568for (int i = 0; i < 
costFunctions.length; i++) {
-569  CostFunction c = 
costFunctions[i];
-570  ret[i] = 
c.getClass().getSimpleName();
-571}
-572
-573return ret;
-574  }
-575
-576  /**
-577   * This is the main cost function.  It 
will compute a cost associated with a proposed cluster
-578   * state.  All different costs will be 
combined with their multipliers to produce a double cost.
-579   *
-580   * @param cluster The state of the 
cluster
-581   * @param previousCost the previous 
cost. This is used as an early out.
-582   * @return a double of a cost 
associated with the proposed cluster state.  This cost is an
-583   * aggregate of all individual 
cost functions.
-584   */
-585  protected double computeCost(Cluster 
cluster, double previousCost) {
-586double total = 0;
-587
-588for (int i = 0; i < 
costFunctions.length; i++) {
-589  CostFunction c = 
costFunctions[i];
-590  this.tempFunctionCosts[i] = 0.0;
-591
-592  if (c.getMultiplier() <= 0) {
-593continue;
-594  }
-595
-596  Float multiplier = 
c.getMultiplier();
-597  Double cost = c.cost();
-598
-599  this.tempFunctionCosts[i] = 
multiplier*cost;
-600  total += 
this.tempFunctionCosts[i];
-601
-602  if (total > previousCost) {
-603break;
-604  }
-605}
-606
-607return total;
-608  }
-609
-610  /** Generates a candidate action to be 
applied to the cluster for cost function search */
-611  abstract static class 
CandidateGenerator {
-612abstract Cluster.Action 
generate(Cluster cluster);
-613
-614/**
-615 * From a list of regions pick a 
random one. Null can be returned which
-616 * {@link 
StochasticLoadBalancer#balanceCluster(Map)} recognize as signal to try a region 
move
-617 * rather than swap.
-618 *
-619 * @param clusterThe state of 
the cluster
-620 * @param server index of the 
server
-621 * @param chanceOfNoSwap Chance that 
this will decide to try a move rather
-622 *   than a 
swap.
-623 * @return a random {@link 
RegionInfo} or null if an asymmetrical move is
-624 * suggested.
-625 */
-626protected int 
pickRandomRegion(Cluster cluster, int server, double chanceOfNoSwap) {
-627  // Check to see if this is just a 
move.
-628  if 
(cluster.regionsPerServer[server].length == 0 || RANDOM.nextFloat() < 
chanceOfNoSwap) {
-629// signal a move only.
-630return -1;
-631  }
-632  int rand = 
RANDOM.nextInt(cluster.regionsPerServer[server].length);
-633  return 
cluster.regionsPerServer[server][rand];
-634
-635}
-636protected int 
pickRandomServer(Cluster cluster) {
-637  if (cluster.numServers < 1) {
-638return -1;
-639  }
-640
-641  return 
RANDOM.nextInt(cluster.numServers);
-642}
-643
-644protected int pickRandomRack(Cluster 
cluster) {
-645  if (cluster.numRacks < 1) {
-646retu

[27/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFromRegionLoadAsRateFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFromRegionLoadAsRateFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFromRegionLoadAsRateFunction.html
index 233dba3..91b9055 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFromRegionLoadAsRateFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFromRegionLoadAsRateFunction.html
@@ -540,1205 +540,1204 @@
 532  
sm.getRegionMetrics().forEach((byte[] regionName, RegionMetrics rm) -> {
 533Deque 
rLoads = oldLoads.get(Bytes.toString(regionName));
 534if (rLoads == null) {
-535  // There was nothing there
-536  rLoads = new 
ArrayDeque<>();
-537} else if (rLoads.size() >= 
numRegionLoadsToRemember) {
-538  rLoads.remove();
-539}
-540rLoads.add(new 
BalancerRegionLoad(rm));
-541
loads.put(Bytes.toString(regionName), rLoads);
-542  });
-543});
-544
-545for(CostFromRegionLoadFunction cost : 
regionLoadFunctions) {
-546  cost.setLoads(loads);
-547}
-548  }
-549
-550  protected void initCosts(Cluster 
cluster) {
-551for (CostFunction c:costFunctions) 
{
-552  c.init(cluster);
-553}
-554  }
-555
-556  protected void 
updateCostsWithAction(Cluster cluster, Action action) {
-557for (CostFunction c : costFunctions) 
{
-558  c.postAction(action);
-559}
-560  }
-561
-562  /**
-563   * Get the names of the cost 
functions
-564   */
-565  public String[] getCostFunctionNames() 
{
-566if (costFunctions == null) return 
null;
-567String[] ret = new 
String[costFunctions.length];
-568for (int i = 0; i < 
costFunctions.length; i++) {
-569  CostFunction c = 
costFunctions[i];
-570  ret[i] = 
c.getClass().getSimpleName();
-571}
-572
-573return ret;
-574  }
-575
-576  /**
-577   * This is the main cost function.  It 
will compute a cost associated with a proposed cluster
-578   * state.  All different costs will be 
combined with their multipliers to produce a double cost.
-579   *
-580   * @param cluster The state of the 
cluster
-581   * @param previousCost the previous 
cost. This is used as an early out.
-582   * @return a double of a cost 
associated with the proposed cluster state.  This cost is an
-583   * aggregate of all individual 
cost functions.
-584   */
-585  protected double computeCost(Cluster 
cluster, double previousCost) {
-586double total = 0;
-587
-588for (int i = 0; i < 
costFunctions.length; i++) {
-589  CostFunction c = 
costFunctions[i];
-590  this.tempFunctionCosts[i] = 0.0;
-591
-592  if (c.getMultiplier() <= 0) {
-593continue;
-594  }
-595
-596  Float multiplier = 
c.getMultiplier();
-597  Double cost = c.cost();
-598
-599  this.tempFunctionCosts[i] = 
multiplier*cost;
-600  total += 
this.tempFunctionCosts[i];
-601
-602  if (total > previousCost) {
-603break;
-604  }
-605}
-606
-607return total;
-608  }
-609
-610  /** Generates a candidate action to be 
applied to the cluster for cost function search */
-611  abstract static class 
CandidateGenerator {
-612abstract Cluster.Action 
generate(Cluster cluster);
-613
-614/**
-615 * From a list of regions pick a 
random one. Null can be returned which
-616 * {@link 
StochasticLoadBalancer#balanceCluster(Map)} recognize as signal to try a region 
move
-617 * rather than swap.
-618 *
-619 * @param clusterThe state of 
the cluster
-620 * @param server index of the 
server
-621 * @param chanceOfNoSwap Chance that 
this will decide to try a move rather
-622 *   than a 
swap.
-623 * @return a random {@link 
RegionInfo} or null if an asymmetrical move is
-624 * suggested.
-625 */
-626protected int 
pickRandomRegion(Cluster cluster, int server, double chanceOfNoSwap) {
-627  // Check to see if this is just a 
move.
-628  if 
(cluster.regionsPerServer[server].length == 0 || RANDOM.nextFloat() < 
chanceOfNoSwap) {
-629// signal a move only.
-630return -1;
-631  }
-632  int rand = 
RANDOM.nextInt(cluster.regionsPerServer[server].length);
-633  return 
cluster.regionsPerServer[server][rand];
-634
-635}
-636protected int 
pickRandomServer(Cluster cluster) {
-637  if (cluster.numServers < 1) {
-638return -1;
-639  }
-640
-641  return 
RANDOM.nextInt(cluster.numServers);
-642}
-643
-644protected int pickRandomRack(Cluster 
cluster) {
-645  if (

[20/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MoveCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MoveCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MoveCostFunction.html
index 233dba3..91b9055 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MoveCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MoveCostFunction.html
@@ -540,1205 +540,1204 @@
 532  
sm.getRegionMetrics().forEach((byte[] regionName, RegionMetrics rm) -> {
 533Deque 
rLoads = oldLoads.get(Bytes.toString(regionName));
 534if (rLoads == null) {
-535  // There was nothing there
-536  rLoads = new 
ArrayDeque<>();
-537} else if (rLoads.size() >= 
numRegionLoadsToRemember) {
-538  rLoads.remove();
-539}
-540rLoads.add(new 
BalancerRegionLoad(rm));
-541
loads.put(Bytes.toString(regionName), rLoads);
-542  });
-543});
-544
-545for(CostFromRegionLoadFunction cost : 
regionLoadFunctions) {
-546  cost.setLoads(loads);
-547}
-548  }
-549
-550  protected void initCosts(Cluster 
cluster) {
-551for (CostFunction c:costFunctions) 
{
-552  c.init(cluster);
-553}
-554  }
-555
-556  protected void 
updateCostsWithAction(Cluster cluster, Action action) {
-557for (CostFunction c : costFunctions) 
{
-558  c.postAction(action);
-559}
-560  }
-561
-562  /**
-563   * Get the names of the cost 
functions
-564   */
-565  public String[] getCostFunctionNames() 
{
-566if (costFunctions == null) return 
null;
-567String[] ret = new 
String[costFunctions.length];
-568for (int i = 0; i < 
costFunctions.length; i++) {
-569  CostFunction c = 
costFunctions[i];
-570  ret[i] = 
c.getClass().getSimpleName();
-571}
-572
-573return ret;
-574  }
-575
-576  /**
-577   * This is the main cost function.  It 
will compute a cost associated with a proposed cluster
-578   * state.  All different costs will be 
combined with their multipliers to produce a double cost.
-579   *
-580   * @param cluster The state of the 
cluster
-581   * @param previousCost the previous 
cost. This is used as an early out.
-582   * @return a double of a cost 
associated with the proposed cluster state.  This cost is an
-583   * aggregate of all individual 
cost functions.
-584   */
-585  protected double computeCost(Cluster 
cluster, double previousCost) {
-586double total = 0;
-587
-588for (int i = 0; i < 
costFunctions.length; i++) {
-589  CostFunction c = 
costFunctions[i];
-590  this.tempFunctionCosts[i] = 0.0;
-591
-592  if (c.getMultiplier() <= 0) {
-593continue;
-594  }
-595
-596  Float multiplier = 
c.getMultiplier();
-597  Double cost = c.cost();
-598
-599  this.tempFunctionCosts[i] = 
multiplier*cost;
-600  total += 
this.tempFunctionCosts[i];
-601
-602  if (total > previousCost) {
-603break;
-604  }
-605}
-606
-607return total;
-608  }
-609
-610  /** Generates a candidate action to be 
applied to the cluster for cost function search */
-611  abstract static class 
CandidateGenerator {
-612abstract Cluster.Action 
generate(Cluster cluster);
-613
-614/**
-615 * From a list of regions pick a 
random one. Null can be returned which
-616 * {@link 
StochasticLoadBalancer#balanceCluster(Map)} recognize as signal to try a region 
move
-617 * rather than swap.
-618 *
-619 * @param clusterThe state of 
the cluster
-620 * @param server index of the 
server
-621 * @param chanceOfNoSwap Chance that 
this will decide to try a move rather
-622 *   than a 
swap.
-623 * @return a random {@link 
RegionInfo} or null if an asymmetrical move is
-624 * suggested.
-625 */
-626protected int 
pickRandomRegion(Cluster cluster, int server, double chanceOfNoSwap) {
-627  // Check to see if this is just a 
move.
-628  if 
(cluster.regionsPerServer[server].length == 0 || RANDOM.nextFloat() < 
chanceOfNoSwap) {
-629// signal a move only.
-630return -1;
-631  }
-632  int rand = 
RANDOM.nextInt(cluster.regionsPerServer[server].length);
-633  return 
cluster.regionsPerServer[server][rand];
-634
-635}
-636protected int 
pickRandomServer(Cluster cluster) {
-637  if (cluster.numServers < 1) {
-638return -1;
-639  }
-640
-641  return 
RANDOM.nextInt(cluster.numServers);
-642}
-643
-644protected int pickRandomRack(Cluster 
cluster) {
-645  if (cluster.numRacks < 1) {
-646return -1;
-647  }
-648
-649  return

[40/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.html
 
b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.html
index 0a50aef..e72eea3 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.html
@@ -129,7 +129,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class RegionReplicaReplicationEndpoint
+public class RegionReplicaReplicationEndpoint
 extends HBaseReplicationEndpoint
 A ReplicationEndpoint 
endpoint
  which receives the WAL edits from the WAL, and sends the edits to replicas
@@ -384,7 +384,7 @@ extends 
 
 LOG
-private static final org.slf4j.Logger LOG
+private static final org.slf4j.Logger LOG
 
 
 
@@ -393,7 +393,7 @@ extends 
 
 CLIENT_RETRIES_NUMBER
-private static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String CLIENT_RETRIES_NUMBER
+private static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String CLIENT_RETRIES_NUMBER
 
 
 
@@ -402,7 +402,7 @@ extends 
 
 conf
-private org.apache.hadoop.conf.Configuration conf
+private org.apache.hadoop.conf.Configuration conf
 
 
 
@@ -411,7 +411,7 @@ extends 
 
 connection
-private ClusterConnection connection
+private ClusterConnection connection
 
 
 
@@ -420,7 +420,7 @@ extends 
 
 tableDescriptors
-private TableDescriptors tableDescriptors
+private TableDescriptors tableDescriptors
 
 
 
@@ -429,7 +429,7 @@ extends 
 
 controller
-private WALSplitter.PipelineController controller
+private WALSplitter.PipelineController controller
 
 
 
@@ -438,7 +438,7 @@ extends 
 
 outputSink
-private RegionReplicaReplicationEndpoint.RegionReplicaOutputSink
 outputSink
+private RegionReplicaReplicationEndpoint.RegionReplicaOutputSink
 outputSink
 
 
 
@@ -447,7 +447,7 @@ extends 
 
 entryBuffers
-private WALSplitter.EntryBuffers entryBuffers
+private WALSplitter.EntryBuffers entryBuffers
 
 
 
@@ -456,7 +456,7 @@ extends 
 
 numWriterThreads
-private int numWriterThreads
+private int numWriterThreads
 
 
 
@@ -465,7 +465,7 @@ extends 
 
 operationTimeout
-private int operationTimeout
+private int operationTimeout
 
 
 
@@ -474,7 +474,7 @@ extends 
 
 pool
-private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true";
 title="class or interface in java.util.concurrent">ExecutorService pool
+private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true";
 title="class or interface in java.util.concurrent">ExecutorService pool
 
 
 
@@ -491,7 +491,7 @@ extends 
 
 RegionReplicaReplicationEndpoint
-public RegionReplicaReplicationEndpoint()
+public RegionReplicaReplicationEndpoint()
 
 
 
@@ -508,7 +508,7 @@ extends 
 
 init
-public void init(ReplicationEndpoint.Context context)
+public void init(ReplicationEndpoint.Context context)
   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 Description copied from 
interface: ReplicationEndpoint
 Initialize the replication endpoint with the given 
context.
@@ -530,7 +530,7 @@ extends 
 
 doStart
-protected void doStart()
+protected void doStart()
 
 Overrides:
 doStart in
 class HBaseReplicationEndpoint
@@ -543,7 +543,7 @@ extends 
 
 doStop
-protected void doStop()
+protected void doStop()
 
 Overrides:
 doStop in
 class HBaseReplicationEndpoint
@@ -556,7 +556,7 @@ extends 
 
 getDefaultThreadPool
-private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true";
 title="class or interface in java.util.concurrent">ExecutorService getDefaultThreadPool(org.apache.hadoop.conf.Configuration conf)
+private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true";
 title="class or interface in java.util.concurrent">ExecutorService getDefaultThreadPool(org.apache.hadoop.conf.Configuration conf)
 Returns a Thread pool for the RPC's to region replicas. 
Similar to
  Connection's thread pool.
 
@@ -567,7 +567,7 @@ extends 
 
 replicate
-public boolean replicate(ReplicationEndpoint.ReplicateContext replicateContext)
+public boolean replicate(ReplicationEndpoint.ReplicateContext replicateContext)
 Description copied from 
interface: ReplicationEndpoint
 Replicate the given set of entries (in the context) to the 
other cluster.
  Can block until all the given entries are replica

[03/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.RegionReplicaOutputSink.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.RegionReplicaOutputSink.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.RegionReplicaOutputSink.html
index 3d7093a..9917ee8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.RegionReplicaOutputSink.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.RegionReplicaOutputSink.html
@@ -39,594 +39,612 @@
 031import 
java.util.concurrent.ThreadPoolExecutor;
 032import java.util.concurrent.TimeUnit;
 033import 
java.util.concurrent.atomic.AtomicLong;
-034
-035import 
org.apache.hadoop.conf.Configuration;
-036import org.apache.hadoop.fs.Path;
-037import 
org.apache.hadoop.hbase.CellScanner;
-038import 
org.apache.hadoop.hbase.CellUtil;
-039import 
org.apache.hadoop.hbase.HBaseConfiguration;
-040import 
org.apache.hadoop.hbase.HBaseIOException;
-041import 
org.apache.hadoop.hbase.HConstants;
-042import 
org.apache.hadoop.hbase.HRegionLocation;
-043import 
org.apache.hadoop.hbase.RegionLocations;
-044import 
org.apache.hadoop.hbase.TableDescriptors;
-045import 
org.apache.hadoop.hbase.TableName;
-046import 
org.apache.hadoop.hbase.TableNotFoundException;
-047import 
org.apache.hadoop.hbase.client.ClusterConnection;
-048import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-049import 
org.apache.hadoop.hbase.client.RegionAdminServiceCallable;
-050import 
org.apache.hadoop.hbase.client.RegionInfo;
-051import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-052import 
org.apache.hadoop.hbase.client.RetryingCallable;
-053import 
org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
-054import 
org.apache.hadoop.hbase.client.TableDescriptor;
-055import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-056import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-057import 
org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil;
-058import 
org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint;
-059import 
org.apache.hadoop.hbase.replication.WALEntryFilter;
-060import 
org.apache.hadoop.hbase.util.Bytes;
-061import 
org.apache.hadoop.hbase.util.Pair;
-062import 
org.apache.hadoop.hbase.util.Threads;
-063import 
org.apache.hadoop.hbase.wal.WAL.Entry;
-064import 
org.apache.hadoop.hbase.wal.WALSplitter.EntryBuffers;
-065import 
org.apache.hadoop.hbase.wal.WALSplitter.OutputSink;
-066import 
org.apache.hadoop.hbase.wal.WALSplitter.PipelineController;
-067import 
org.apache.hadoop.hbase.wal.WALSplitter.RegionEntryBuffer;
-068import 
org.apache.hadoop.hbase.wal.WALSplitter.SinkWriter;
-069import 
org.apache.hadoop.util.StringUtils;
-070import 
org.apache.yetus.audience.InterfaceAudience;
-071import org.slf4j.Logger;
-072import org.slf4j.LoggerFactory;
+034import 
org.apache.hadoop.conf.Configuration;
+035import org.apache.hadoop.fs.Path;
+036import 
org.apache.hadoop.hbase.CellScanner;
+037import 
org.apache.hadoop.hbase.CellUtil;
+038import 
org.apache.hadoop.hbase.HBaseConfiguration;
+039import 
org.apache.hadoop.hbase.HBaseIOException;
+040import 
org.apache.hadoop.hbase.HConstants;
+041import 
org.apache.hadoop.hbase.HRegionLocation;
+042import 
org.apache.hadoop.hbase.RegionLocations;
+043import 
org.apache.hadoop.hbase.TableDescriptors;
+044import 
org.apache.hadoop.hbase.TableName;
+045import 
org.apache.hadoop.hbase.TableNotFoundException;
+046import 
org.apache.hadoop.hbase.client.ClusterConnection;
+047import 
org.apache.hadoop.hbase.client.ConnectionFactory;
+048import 
org.apache.hadoop.hbase.client.RegionAdminServiceCallable;
+049import 
org.apache.hadoop.hbase.client.RegionInfo;
+050import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
+051import 
org.apache.hadoop.hbase.client.RetryingCallable;
+052import 
org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
+053import 
org.apache.hadoop.hbase.client.TableDescriptor;
+054import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
+055import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+056import 
org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil;
+057import 
org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint;
+058import 
org.apache.hadoop.hbase.replication.WALEntryFilter;
+059import 
org.apache.hadoop.hbase.util.Bytes;
+060import 
org.apache.hadoop.hbase.util.Pair;
+061import 
org.apache.hadoop.hbase.util.Threads;
+062import 
org.apache.hadoop.hbase.wal.WAL.Entry;
+063import 
org.apache.hadoop.hbase.wal.WALSplitter.EntryBuffers;
+064import 
org.apache.hadoop.hbase.wal.WALSplitter.OutputSink;
+065import 
org.apache.hadoop.hbase.wal.WALSplitter.PipelineContro

[11/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaRackCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaRackCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaRackCostFunction.html
index 233dba3..91b9055 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaRackCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaRackCostFunction.html
@@ -540,1205 +540,1204 @@
 532  
sm.getRegionMetrics().forEach((byte[] regionName, RegionMetrics rm) -> {
 533Deque 
rLoads = oldLoads.get(Bytes.toString(regionName));
 534if (rLoads == null) {
-535  // There was nothing there
-536  rLoads = new 
ArrayDeque<>();
-537} else if (rLoads.size() >= 
numRegionLoadsToRemember) {
-538  rLoads.remove();
-539}
-540rLoads.add(new 
BalancerRegionLoad(rm));
-541
loads.put(Bytes.toString(regionName), rLoads);
-542  });
-543});
-544
-545for(CostFromRegionLoadFunction cost : 
regionLoadFunctions) {
-546  cost.setLoads(loads);
-547}
-548  }
-549
-550  protected void initCosts(Cluster 
cluster) {
-551for (CostFunction c:costFunctions) 
{
-552  c.init(cluster);
-553}
-554  }
-555
-556  protected void 
updateCostsWithAction(Cluster cluster, Action action) {
-557for (CostFunction c : costFunctions) 
{
-558  c.postAction(action);
-559}
-560  }
-561
-562  /**
-563   * Get the names of the cost 
functions
-564   */
-565  public String[] getCostFunctionNames() 
{
-566if (costFunctions == null) return 
null;
-567String[] ret = new 
String[costFunctions.length];
-568for (int i = 0; i < 
costFunctions.length; i++) {
-569  CostFunction c = 
costFunctions[i];
-570  ret[i] = 
c.getClass().getSimpleName();
-571}
-572
-573return ret;
-574  }
-575
-576  /**
-577   * This is the main cost function.  It 
will compute a cost associated with a proposed cluster
-578   * state.  All different costs will be 
combined with their multipliers to produce a double cost.
-579   *
-580   * @param cluster The state of the 
cluster
-581   * @param previousCost the previous 
cost. This is used as an early out.
-582   * @return a double of a cost 
associated with the proposed cluster state.  This cost is an
-583   * aggregate of all individual 
cost functions.
-584   */
-585  protected double computeCost(Cluster 
cluster, double previousCost) {
-586double total = 0;
-587
-588for (int i = 0; i < 
costFunctions.length; i++) {
-589  CostFunction c = 
costFunctions[i];
-590  this.tempFunctionCosts[i] = 0.0;
-591
-592  if (c.getMultiplier() <= 0) {
-593continue;
-594  }
-595
-596  Float multiplier = 
c.getMultiplier();
-597  Double cost = c.cost();
-598
-599  this.tempFunctionCosts[i] = 
multiplier*cost;
-600  total += 
this.tempFunctionCosts[i];
-601
-602  if (total > previousCost) {
-603break;
-604  }
-605}
-606
-607return total;
-608  }
-609
-610  /** Generates a candidate action to be 
applied to the cluster for cost function search */
-611  abstract static class 
CandidateGenerator {
-612abstract Cluster.Action 
generate(Cluster cluster);
-613
-614/**
-615 * From a list of regions pick a 
random one. Null can be returned which
-616 * {@link 
StochasticLoadBalancer#balanceCluster(Map)} recognize as signal to try a region 
move
-617 * rather than swap.
-618 *
-619 * @param clusterThe state of 
the cluster
-620 * @param server index of the 
server
-621 * @param chanceOfNoSwap Chance that 
this will decide to try a move rather
-622 *   than a 
swap.
-623 * @return a random {@link 
RegionInfo} or null if an asymmetrical move is
-624 * suggested.
-625 */
-626protected int 
pickRandomRegion(Cluster cluster, int server, double chanceOfNoSwap) {
-627  // Check to see if this is just a 
move.
-628  if 
(cluster.regionsPerServer[server].length == 0 || RANDOM.nextFloat() < 
chanceOfNoSwap) {
-629// signal a move only.
-630return -1;
-631  }
-632  int rand = 
RANDOM.nextInt(cluster.regionsPerServer[server].length);
-633  return 
cluster.regionsPerServer[server][rand];
-634
-635}
-636protected int 
pickRandomServer(Cluster cluster) {
-637  if (cluster.numServers < 1) {
-638return -1;
-639  }
-640
-641  return 
RANDOM.nextInt(cluster.numServers);
-642}
-643
-644protected int pickRandomRack(Cluster 
cluster) {
-645  if (cluster.numRack

[01/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 972a3c890 -> df8fd1d31


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.RegionReplicaSinkWriter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.RegionReplicaSinkWriter.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.RegionReplicaSinkWriter.html
index 3d7093a..9917ee8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.RegionReplicaSinkWriter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.RegionReplicaSinkWriter.html
@@ -39,594 +39,612 @@
 031import 
java.util.concurrent.ThreadPoolExecutor;
 032import java.util.concurrent.TimeUnit;
 033import 
java.util.concurrent.atomic.AtomicLong;
-034
-035import 
org.apache.hadoop.conf.Configuration;
-036import org.apache.hadoop.fs.Path;
-037import 
org.apache.hadoop.hbase.CellScanner;
-038import 
org.apache.hadoop.hbase.CellUtil;
-039import 
org.apache.hadoop.hbase.HBaseConfiguration;
-040import 
org.apache.hadoop.hbase.HBaseIOException;
-041import 
org.apache.hadoop.hbase.HConstants;
-042import 
org.apache.hadoop.hbase.HRegionLocation;
-043import 
org.apache.hadoop.hbase.RegionLocations;
-044import 
org.apache.hadoop.hbase.TableDescriptors;
-045import 
org.apache.hadoop.hbase.TableName;
-046import 
org.apache.hadoop.hbase.TableNotFoundException;
-047import 
org.apache.hadoop.hbase.client.ClusterConnection;
-048import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-049import 
org.apache.hadoop.hbase.client.RegionAdminServiceCallable;
-050import 
org.apache.hadoop.hbase.client.RegionInfo;
-051import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-052import 
org.apache.hadoop.hbase.client.RetryingCallable;
-053import 
org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
-054import 
org.apache.hadoop.hbase.client.TableDescriptor;
-055import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-056import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-057import 
org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil;
-058import 
org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint;
-059import 
org.apache.hadoop.hbase.replication.WALEntryFilter;
-060import 
org.apache.hadoop.hbase.util.Bytes;
-061import 
org.apache.hadoop.hbase.util.Pair;
-062import 
org.apache.hadoop.hbase.util.Threads;
-063import 
org.apache.hadoop.hbase.wal.WAL.Entry;
-064import 
org.apache.hadoop.hbase.wal.WALSplitter.EntryBuffers;
-065import 
org.apache.hadoop.hbase.wal.WALSplitter.OutputSink;
-066import 
org.apache.hadoop.hbase.wal.WALSplitter.PipelineController;
-067import 
org.apache.hadoop.hbase.wal.WALSplitter.RegionEntryBuffer;
-068import 
org.apache.hadoop.hbase.wal.WALSplitter.SinkWriter;
-069import 
org.apache.hadoop.util.StringUtils;
-070import 
org.apache.yetus.audience.InterfaceAudience;
-071import org.slf4j.Logger;
-072import org.slf4j.LoggerFactory;
+034import 
org.apache.hadoop.conf.Configuration;
+035import org.apache.hadoop.fs.Path;
+036import 
org.apache.hadoop.hbase.CellScanner;
+037import 
org.apache.hadoop.hbase.CellUtil;
+038import 
org.apache.hadoop.hbase.HBaseConfiguration;
+039import 
org.apache.hadoop.hbase.HBaseIOException;
+040import 
org.apache.hadoop.hbase.HConstants;
+041import 
org.apache.hadoop.hbase.HRegionLocation;
+042import 
org.apache.hadoop.hbase.RegionLocations;
+043import 
org.apache.hadoop.hbase.TableDescriptors;
+044import 
org.apache.hadoop.hbase.TableName;
+045import 
org.apache.hadoop.hbase.TableNotFoundException;
+046import 
org.apache.hadoop.hbase.client.ClusterConnection;
+047import 
org.apache.hadoop.hbase.client.ConnectionFactory;
+048import 
org.apache.hadoop.hbase.client.RegionAdminServiceCallable;
+049import 
org.apache.hadoop.hbase.client.RegionInfo;
+050import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
+051import 
org.apache.hadoop.hbase.client.RetryingCallable;
+052import 
org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
+053import 
org.apache.hadoop.hbase.client.TableDescriptor;
+054import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
+055import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+056import 
org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil;
+057import 
org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint;
+058import 
org.apache.hadoop.hbase.replication.WALEntryFilter;
+059import 
org.apache.hadoop.hbase.util.Bytes;
+060import 
org.apache.hadoop.hbase.util.Pair;
+061import 
org.apache.hadoop.hbase.util.Threads;
+062import 
org.apache.hadoop.hbase.wal.WAL.Entry;
+063import 
org.apache.hadoop.hbase.wal.WALSplitter.EntryBuffers;
+064import 
org.apache.hadoop.hbase.wal.WA

hbase git commit: HBASE-20915 Remove the commit column on our download page

2018-07-22 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/master 682aa6967 -> b4759ce6e


HBASE-20915 Remove the commit column on our download page


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b4759ce6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b4759ce6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b4759ce6

Branch: refs/heads/master
Commit: b4759ce6e72f50ccd9d410bd5917dc5a515414f1
Parents: 682aa69
Author: zhangduo 
Authored: Sun Jul 22 15:35:37 2018 +0800
Committer: zhangduo 
Committed: Sun Jul 22 17:08:27 2018 +0800

--
 src/site/xdoc/downloads.xml | 10 --
 1 file changed, 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b4759ce6/src/site/xdoc/downloads.xml
--
diff --git a/src/site/xdoc/downloads.xml b/src/site/xdoc/downloads.xml
index 8b35ac6..3f3f293 100644
--- a/src/site/xdoc/downloads.xml
+++ b/src/site/xdoc/downloads.xml
@@ -40,7 +40,6 @@ under the License.
   Compatiblity Report
   Changes
   Release Notes
-  Commit
   Download
 
 
@@ -60,9 +59,6 @@ under the License.
 https://apache.org/dist/hbase/2.1.0/RELEASENOTES.md";>Release 
Notes
   
   
-https://git-wip-us.apache.org/repos/asf?p=hbase.git;a=commit;h=e1673bb0bbfea21d6e5dba73e013b09b8b49b89b";>e1673bb0bbfea21d6e5dba73e013b09b8b49b89b
-  
-  
 https://www.apache.org/dyn/closer.lua/hbase/2.1.0/hbase-2.1.0-src.tar.gz";>src
 (https://apache.org/dist/hbase/2.1.0/hbase-2.1.0-src.tar.gz.sha512";>sha512
 https://apache.org/dist/hbase/2.1.0/hbase-2.1.0-src.tar.gz.asc";>asc) 

 https://www.apache.org/dyn/closer.lua/hbase/2.1.0/hbase-2.1.0-bin.tar.gz";>bin
 (https://apache.org/dist/hbase/2.1.0/hbase-2.1.0-bin.tar.gz.sha512";>sha512
 https://apache.org/dist/hbase/2.1.0/hbase-2.1.0-bin.tar.gz.asc";>asc) 

 https://www.apache.org/dyn/closer.lua/hbase/2.1.0/hbase-2.1.0-client-bin.tar.gz";>client-bin
 (https://apache.org/dist/hbase/2.1.0/hbase-2.1.0-client-bin.tar.gz.sha512";>sha512
 https://apache.org/dist/hbase/2.1.0/hbase-2.1.0-client-bin.tar.gz.asc";>asc)
@@ -85,9 +81,6 @@ under the License.
 https://apache.org/dist/hbase/2.0.1/RELEASENOTES.md";>Release 
Notes
   
   
-https://git-wip-us.apache.org/repos/asf?p=hbase.git;a=commit;h=987f7b6d37c2fcacc942cc66e5c5122aba8fdfbe";>987f7b6d37c2fcacc942cc66e5c5122aba8fdfbe
-  
-  
 https://www.apache.org/dyn/closer.lua/hbase/2.0.1/hbase-2.0.1-src.tar.gz";>src
 (https://apache.org/dist/hbase/2.0.1/hbase-2.0.1-src.tar.gz.sha512";>sha512
 https://apache.org/dist/hbase/2.0.1/hbase-2.0.1-src.tar.gz.asc";>asc) 

 https://www.apache.org/dyn/closer.lua/hbase/2.0.1/hbase-2.0.1-bin.tar.gz";>bin
 (https://apache.org/dist/hbase/2.0.1/hbase-2.0.1-bin.tar.gz.sha512";>sha512
 https://apache.org/dist/hbase/2.0.1/hbase-2.0.1-bin.tar.gz.asc";>asc)
   
@@ -109,9 +102,6 @@ under the License.
 https://apache.org/dist/hbase/2.0.0/RELEASENOTES.md";>Release 
Notes
   
   
-https://git-wip-us.apache.org/repos/asf?p=hbase.git;a=commit;h=7483b111e4da77adbfc8062b3b22cbe7c2cb91c1";>7483b111e4da77adbfc8062b3b22cbe7c2cb91c1
-  
-  
 https://www.apache.org/dyn/closer.lua/hbase/2.0.0/hbase-2.0.0-src.tar.gz";>src
 (https://apache.org/dist/hbase/2.0.0/hbase-2.0.0-src.tar.gz.sha512";>sha512
 https://apache.org/dist/hbase/2.0.0/hbase-2.0.0-src.tar.gz.asc";>asc) 

 https://www.apache.org/dyn/closer.lua/hbase/2.0.0/hbase-2.0.0-bin.tar.gz";>bin
 (https://apache.org/dist/hbase/2.0.0/hbase-2.0.0-bin.tar.gz.sha512";>sha512
 https://apache.org/dist/hbase/2.0.0/hbase-2.0.0-bin.tar.gz.asc";>asc)