hbase git commit: HBASE-21445 CopyTable by bulkload will write hfile into yarn's HDFS

2018-11-09 Thread openinx
Repository: hbase
Updated Branches:
  refs/heads/branch-1 68c939668 -> 814c3690d


HBASE-21445 CopyTable by bulkload will write hfile into yarn's HDFS


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/814c3690
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/814c3690
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/814c3690

Branch: refs/heads/branch-1
Commit: 814c3690dc6f1aac91bcc18f379bc58265409c0d
Parents: 68c9396
Author: huzheng 
Authored: Sat Nov 10 10:28:49 2018 +0800
Committer: huzheng 
Committed: Sat Nov 10 10:41:38 2018 +0800

--
 .../main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java  | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/814c3690/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
index 867b7df..f63ed3e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.util.GenericOptionsParser;
 import org.apache.hadoop.util.Tool;
@@ -151,7 +152,7 @@ public class CopyTable extends Configured implements Tool {
   // We need to split the inputs by destination tables so that output of 
Map can be bulk-loaded.
   TableInputFormat.configureSplitTable(job, 
TableName.valueOf(dstTableName));
 
-  FileSystem fs = FileSystem.get(getConf());
+  FileSystem fs = FSUtils.getCurrentFileSystem(getConf());
   Random rand = new Random();
   Path root = new Path(fs.getWorkingDirectory(), "copytable");
   fs.mkdirs(root);
@@ -380,7 +381,7 @@ public class CopyTable extends Configured implements Tool {
   if (code == 0) {
 // bulkloadDir is deleted only LoadIncrementalHFiles was successful so 
that one can rerun
 // LoadIncrementalHFiles.
-FileSystem fs = FileSystem.get(this.getConf());
+FileSystem fs = FSUtils.getCurrentFileSystem(getConf());
 if (!fs.delete(this.bulkloadDir, true)) {
   LOG.error("Deleting folder " + bulkloadDir + " failed!");
   code = 1;



hbase git commit: HBASE-21445 CopyTable by bulkload will write hfile into yarn's HDFS

2018-11-09 Thread openinx
Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 58e4731ac -> fa6a6aff4


HBASE-21445 CopyTable by bulkload will write hfile into yarn's HDFS


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/fa6a6aff
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/fa6a6aff
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/fa6a6aff

Branch: refs/heads/branch-1.4
Commit: fa6a6aff4f64607df3cc47ae0a1eab482afd88e8
Parents: 58e4731
Author: huzheng 
Authored: Sat Nov 10 10:28:49 2018 +0800
Committer: huzheng 
Committed: Sat Nov 10 10:38:44 2018 +0800

--
 .../main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java  | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/fa6a6aff/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
index 867b7df..f63ed3e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.util.GenericOptionsParser;
 import org.apache.hadoop.util.Tool;
@@ -151,7 +152,7 @@ public class CopyTable extends Configured implements Tool {
   // We need to split the inputs by destination tables so that output of 
Map can be bulk-loaded.
   TableInputFormat.configureSplitTable(job, 
TableName.valueOf(dstTableName));
 
-  FileSystem fs = FileSystem.get(getConf());
+  FileSystem fs = FSUtils.getCurrentFileSystem(getConf());
   Random rand = new Random();
   Path root = new Path(fs.getWorkingDirectory(), "copytable");
   fs.mkdirs(root);
@@ -380,7 +381,7 @@ public class CopyTable extends Configured implements Tool {
   if (code == 0) {
 // bulkloadDir is deleted only LoadIncrementalHFiles was successful so 
that one can rerun
 // LoadIncrementalHFiles.
-FileSystem fs = FileSystem.get(this.getConf());
+FileSystem fs = FSUtils.getCurrentFileSystem(getConf());
 if (!fs.delete(this.bulkloadDir, true)) {
   LOG.error("Deleting folder " + bulkloadDir + " failed!");
   code = 1;



hbase git commit: HBASE-21445 CopyTable by bulkload will write hfile into yarn's HDFS

2018-11-09 Thread openinx
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 6618a40ff -> f94b81a2d


HBASE-21445 CopyTable by bulkload will write hfile into yarn's HDFS


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f94b81a2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f94b81a2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f94b81a2

Branch: refs/heads/branch-1.3
Commit: f94b81a2db64b6fd7b40db680ea8834a5401ed22
Parents: 6618a40
Author: huzheng 
Authored: Sat Nov 10 10:28:49 2018 +0800
Committer: huzheng 
Committed: Sat Nov 10 10:32:58 2018 +0800

--
 .../main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java  | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f94b81a2/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
index c31f42c..eb3227c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.util.GenericOptionsParser;
 import org.apache.hadoop.util.Tool;
@@ -144,7 +145,7 @@ public class CopyTable extends Configured implements Tool {
   // We need to split the inputs by destination tables so that output of 
Map can be bulk-loaded.
   TableInputFormat.configureSplitTable(job, 
TableName.valueOf(dstTableName));
 
-  FileSystem fs = FileSystem.get(getConf());
+  FileSystem fs = FSUtils.getCurrentFileSystem(getConf());
   Random rand = new Random();
   Path root = new Path(fs.getWorkingDirectory(), "copytable");
   fs.mkdirs(root);
@@ -361,7 +362,7 @@ public class CopyTable extends Configured implements Tool {
   if (code == 0) {
 // bulkloadDir is deleted only LoadIncrementalHFiles was successful so 
that one can rerun
 // LoadIncrementalHFiles.
-FileSystem fs = FileSystem.get(this.getConf());
+FileSystem fs = FSUtils.getCurrentFileSystem(getConf());
 if (!fs.delete(this.bulkloadDir, true)) {
   LOG.error("Deleting folder " + bulkloadDir + " failed!");
   code = 1;



hbase git commit: HBASE-21445 CopyTable by bulkload will write hfile into yarn's HDFS

2018-11-09 Thread openinx
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 3d9b0409c -> d5fb0a585


HBASE-21445 CopyTable by bulkload will write hfile into yarn's HDFS


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d5fb0a58
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d5fb0a58
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d5fb0a58

Branch: refs/heads/branch-1.2
Commit: d5fb0a58502e9880d9d2f8558bd8062f4fe79c37
Parents: 3d9b040
Author: huzheng 
Authored: Sat Nov 10 10:28:49 2018 +0800
Committer: huzheng 
Committed: Sat Nov 10 10:31:38 2018 +0800

--
 .../main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java  | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d5fb0a58/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
index 8d930d1..0432b3a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.util.GenericOptionsParser;
 import org.apache.hadoop.util.Tool;
@@ -144,7 +145,7 @@ public class CopyTable extends Configured implements Tool {
   // We need to split the inputs by destination tables so that output of 
Map can be bulk-loaded.
   TableInputFormat.configureSplitTable(job, 
TableName.valueOf(dstTableName));
 
-  FileSystem fs = FileSystem.get(getConf());
+  FileSystem fs = FSUtils.getCurrentFileSystem(getConf());
   Random rand = new Random();
   Path root = new Path(fs.getWorkingDirectory(), "copytable");
   fs.mkdirs(root);
@@ -361,7 +362,7 @@ public class CopyTable extends Configured implements Tool {
   if (code == 0) {
 // bulkloadDir is deleted only LoadIncrementalHFiles was successful so 
that one can rerun
 // LoadIncrementalHFiles.
-FileSystem fs = FileSystem.get(this.getConf());
+FileSystem fs = FSUtils.getCurrentFileSystem(getConf());
 if (!fs.delete(this.bulkloadDir, true)) {
   LOG.error("Deleting folder " + bulkloadDir + " failed!");
   code = 1;



hbase git commit: HBASE-21445 CopyTable by bulkload will write hfile into yarn's HDFS

2018-11-09 Thread openinx
Repository: hbase
Updated Branches:
  refs/heads/branch-2.1 c6090d4f0 -> 189122f3f


HBASE-21445 CopyTable by bulkload will write hfile into yarn's HDFS


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/189122f3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/189122f3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/189122f3

Branch: refs/heads/branch-2.1
Commit: 189122f3fe086e06d81a95ed352a9a6838fc3b67
Parents: c6090d4
Author: huzheng 
Authored: Wed Nov 7 11:25:43 2018 +0800
Committer: huzheng 
Committed: Sat Nov 10 10:21:53 2018 +0800

--
 .../org/apache/hadoop/hbase/mapreduce/CopyTable.java | 15 ---
 1 file changed, 8 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/189122f3/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
index 2e9e62c..22540fe 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -139,13 +140,13 @@ public class CopyTable extends Configured implements Tool 
{
 job.setNumReduceTasks(0);
 
 if (bulkload) {
-  TableMapReduceUtil.initTableMapperJob(tableName, scan, 
Import.CellImporter.class, null,
-null, job);
+  TableMapReduceUtil.initTableMapperJob(tableName, scan, 
Import.CellImporter.class, null, null,
+job);
 
   // We need to split the inputs by destination tables so that output of 
Map can be bulk-loaded.
   TableInputFormat.configureSplitTable(job, 
TableName.valueOf(dstTableName));
 
-  FileSystem fs = FileSystem.get(getConf());
+  FileSystem fs = FSUtils.getCurrentFileSystem(getConf());
   Random rand = new Random();
   Path root = new Path(fs.getWorkingDirectory(), "copytable");
   fs.mkdirs(root);
@@ -161,7 +162,7 @@ public class CopyTable extends Configured implements Tool {
   try (Connection conn = ConnectionFactory.createConnection(getConf());
   Admin admin = conn.getAdmin()) {
 HFileOutputFormat2.configureIncrementalLoadMap(job,
-admin.getDescriptor((TableName.valueOf(dstTableName;
+  admin.getDescriptor((TableName.valueOf(dstTableName;
   }
 } else {
   TableMapReduceUtil.initTableMapperJob(tableName, scan,
@@ -370,12 +371,12 @@ public class CopyTable extends Configured implements Tool 
{
 }
 int code = 0;
 if (bulkload) {
-  code = new LoadIncrementalHFiles(this.getConf()).run(new 
String[]{this.bulkloadDir.toString(),
-  this.dstTableName});
+  code = new LoadIncrementalHFiles(this.getConf())
+  .run(new String[] { this.bulkloadDir.toString(), this.dstTableName 
});
   if (code == 0) {
 // bulkloadDir is deleted only LoadIncrementalHFiles was successful so 
that one can rerun
 // LoadIncrementalHFiles.
-FileSystem fs = FileSystem.get(this.getConf());
+FileSystem fs = FSUtils.getCurrentFileSystem(getConf());
 if (!fs.delete(this.bulkloadDir, true)) {
   LOG.error("Deleting folder " + bulkloadDir + " failed!");
   code = 1;



hbase git commit: HBASE-21445 CopyTable by bulkload will write hfile into yarn's HDFS

2018-11-09 Thread openinx
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 e8404c7c2 -> 5a2db03f9


HBASE-21445 CopyTable by bulkload will write hfile into yarn's HDFS


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5a2db03f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5a2db03f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5a2db03f

Branch: refs/heads/branch-2.0
Commit: 5a2db03f93ae048182ba5f10df6bb84384738d7e
Parents: e8404c7
Author: huzheng 
Authored: Wed Nov 7 11:25:43 2018 +0800
Committer: huzheng 
Committed: Sat Nov 10 10:20:47 2018 +0800

--
 .../org/apache/hadoop/hbase/mapreduce/CopyTable.java | 15 ---
 1 file changed, 8 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5a2db03f/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
index 2e9e62c..22540fe 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -139,13 +140,13 @@ public class CopyTable extends Configured implements Tool 
{
 job.setNumReduceTasks(0);
 
 if (bulkload) {
-  TableMapReduceUtil.initTableMapperJob(tableName, scan, 
Import.CellImporter.class, null,
-null, job);
+  TableMapReduceUtil.initTableMapperJob(tableName, scan, 
Import.CellImporter.class, null, null,
+job);
 
   // We need to split the inputs by destination tables so that output of 
Map can be bulk-loaded.
   TableInputFormat.configureSplitTable(job, 
TableName.valueOf(dstTableName));
 
-  FileSystem fs = FileSystem.get(getConf());
+  FileSystem fs = FSUtils.getCurrentFileSystem(getConf());
   Random rand = new Random();
   Path root = new Path(fs.getWorkingDirectory(), "copytable");
   fs.mkdirs(root);
@@ -161,7 +162,7 @@ public class CopyTable extends Configured implements Tool {
   try (Connection conn = ConnectionFactory.createConnection(getConf());
   Admin admin = conn.getAdmin()) {
 HFileOutputFormat2.configureIncrementalLoadMap(job,
-admin.getDescriptor((TableName.valueOf(dstTableName;
+  admin.getDescriptor((TableName.valueOf(dstTableName;
   }
 } else {
   TableMapReduceUtil.initTableMapperJob(tableName, scan,
@@ -370,12 +371,12 @@ public class CopyTable extends Configured implements Tool 
{
 }
 int code = 0;
 if (bulkload) {
-  code = new LoadIncrementalHFiles(this.getConf()).run(new 
String[]{this.bulkloadDir.toString(),
-  this.dstTableName});
+  code = new LoadIncrementalHFiles(this.getConf())
+  .run(new String[] { this.bulkloadDir.toString(), this.dstTableName 
});
   if (code == 0) {
 // bulkloadDir is deleted only LoadIncrementalHFiles was successful so 
that one can rerun
 // LoadIncrementalHFiles.
-FileSystem fs = FileSystem.get(this.getConf());
+FileSystem fs = FSUtils.getCurrentFileSystem(getConf());
 if (!fs.delete(this.bulkloadDir, true)) {
   LOG.error("Deleting folder " + bulkloadDir + " failed!");
   code = 1;



hbase git commit: HBASE-21445 CopyTable by bulkload will write hfile into yarn's HDFS

2018-11-09 Thread openinx
Repository: hbase
Updated Branches:
  refs/heads/branch-2 320d657fb -> ee19f2b36


HBASE-21445 CopyTable by bulkload will write hfile into yarn's HDFS


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ee19f2b3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ee19f2b3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ee19f2b3

Branch: refs/heads/branch-2
Commit: ee19f2b36f2399f5303cf5d7a5c2c44e44555cf8
Parents: 320d657
Author: huzheng 
Authored: Wed Nov 7 11:25:43 2018 +0800
Committer: huzheng 
Committed: Sat Nov 10 10:19:45 2018 +0800

--
 .../org/apache/hadoop/hbase/mapreduce/CopyTable.java | 15 ---
 1 file changed, 8 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ee19f2b3/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
index 2e9e62c..22540fe 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -139,13 +140,13 @@ public class CopyTable extends Configured implements Tool 
{
 job.setNumReduceTasks(0);
 
 if (bulkload) {
-  TableMapReduceUtil.initTableMapperJob(tableName, scan, 
Import.CellImporter.class, null,
-null, job);
+  TableMapReduceUtil.initTableMapperJob(tableName, scan, 
Import.CellImporter.class, null, null,
+job);
 
   // We need to split the inputs by destination tables so that output of 
Map can be bulk-loaded.
   TableInputFormat.configureSplitTable(job, 
TableName.valueOf(dstTableName));
 
-  FileSystem fs = FileSystem.get(getConf());
+  FileSystem fs = FSUtils.getCurrentFileSystem(getConf());
   Random rand = new Random();
   Path root = new Path(fs.getWorkingDirectory(), "copytable");
   fs.mkdirs(root);
@@ -161,7 +162,7 @@ public class CopyTable extends Configured implements Tool {
   try (Connection conn = ConnectionFactory.createConnection(getConf());
   Admin admin = conn.getAdmin()) {
 HFileOutputFormat2.configureIncrementalLoadMap(job,
-admin.getDescriptor((TableName.valueOf(dstTableName;
+  admin.getDescriptor((TableName.valueOf(dstTableName;
   }
 } else {
   TableMapReduceUtil.initTableMapperJob(tableName, scan,
@@ -370,12 +371,12 @@ public class CopyTable extends Configured implements Tool 
{
 }
 int code = 0;
 if (bulkload) {
-  code = new LoadIncrementalHFiles(this.getConf()).run(new 
String[]{this.bulkloadDir.toString(),
-  this.dstTableName});
+  code = new LoadIncrementalHFiles(this.getConf())
+  .run(new String[] { this.bulkloadDir.toString(), this.dstTableName 
});
   if (code == 0) {
 // bulkloadDir is deleted only LoadIncrementalHFiles was successful so 
that one can rerun
 // LoadIncrementalHFiles.
-FileSystem fs = FileSystem.get(this.getConf());
+FileSystem fs = FSUtils.getCurrentFileSystem(getConf());
 if (!fs.delete(this.bulkloadDir, true)) {
   LOG.error("Deleting folder " + bulkloadDir + " failed!");
   code = 1;



hbase git commit: HBASE-21445 CopyTable by bulkload will write hfile into yarn's HDFS

2018-11-09 Thread openinx
Repository: hbase
Updated Branches:
  refs/heads/master ccabf7310 -> 813528550


HBASE-21445 CopyTable by bulkload will write hfile into yarn's HDFS


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/81352855
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/81352855
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/81352855

Branch: refs/heads/master
Commit: 81352855068d2f81fe1f08967ed0de6d2873136d
Parents: ccabf73
Author: huzheng 
Authored: Wed Nov 7 11:25:43 2018 +0800
Committer: huzheng 
Committed: Sat Nov 10 10:18:01 2018 +0800

--
 .../org/apache/hadoop/hbase/mapreduce/CopyTable.java | 15 ---
 1 file changed, 8 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/81352855/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
index b20f07f..4e57f54 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -139,13 +140,13 @@ public class CopyTable extends Configured implements Tool 
{
 job.setNumReduceTasks(0);
 
 if (bulkload) {
-  TableMapReduceUtil.initTableMapperJob(tableName, scan, 
Import.CellImporter.class, null,
-null, job);
+  TableMapReduceUtil.initTableMapperJob(tableName, scan, 
Import.CellImporter.class, null, null,
+job);
 
   // We need to split the inputs by destination tables so that output of 
Map can be bulk-loaded.
   TableInputFormat.configureSplitTable(job, 
TableName.valueOf(dstTableName));
 
-  FileSystem fs = FileSystem.get(getConf());
+  FileSystem fs = FSUtils.getCurrentFileSystem(getConf());
   Random rand = new Random();
   Path root = new Path(fs.getWorkingDirectory(), "copytable");
   fs.mkdirs(root);
@@ -161,7 +162,7 @@ public class CopyTable extends Configured implements Tool {
   try (Connection conn = ConnectionFactory.createConnection(getConf());
   Admin admin = conn.getAdmin()) {
 HFileOutputFormat2.configureIncrementalLoadMap(job,
-admin.getDescriptor((TableName.valueOf(dstTableName;
+  admin.getDescriptor((TableName.valueOf(dstTableName;
   }
 } else {
   TableMapReduceUtil.initTableMapperJob(tableName, scan,
@@ -370,12 +371,12 @@ public class CopyTable extends Configured implements Tool 
{
 }
 int code = 0;
 if (bulkload) {
-  code = new LoadIncrementalHFiles(this.getConf()).run(new 
String[]{this.bulkloadDir.toString(),
-  this.dstTableName});
+  code = new LoadIncrementalHFiles(this.getConf())
+  .run(new String[] { this.bulkloadDir.toString(), this.dstTableName 
});
   if (code == 0) {
 // bulkloadDir is deleted only LoadIncrementalHFiles was successful so 
that one can rerun
 // LoadIncrementalHFiles.
-FileSystem fs = FileSystem.get(this.getConf());
+FileSystem fs = FSUtils.getCurrentFileSystem(getConf());
 if (!fs.delete(this.bulkloadDir, true)) {
   LOG.error("Deleting folder " + bulkloadDir + " failed!");
   code = 1;



hbase git commit: HBASE-21437 Bypassed procedure throw IllegalArgumentException when its state is WAITING_TIMEOUT

2018-11-09 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/master fe2265fa4 -> ccabf7310


HBASE-21437 Bypassed procedure throw IllegalArgumentException when its state is 
WAITING_TIMEOUT

Signed-off-by: Allan Yang 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ccabf731
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ccabf731
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ccabf731

Branch: refs/heads/master
Commit: ccabf7310d3fdab3b4a24ea60b391995367256a6
Parents: fe2265f
Author: jingyuntian 
Authored: Fri Nov 9 23:03:19 2018 +0800
Committer: Allan Yang 
Committed: Fri Nov 9 23:03:19 2018 +0800

--
 .../hbase/procedure2/ProcedureExecutor.java | 22 
 .../hbase/procedure2/TimeoutExecutorThread.java |  2 +-
 .../hbase/procedure2/TestProcedureBypass.java   | 36 
 3 files changed, 52 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ccabf731/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index d02ca6e..c18ca32 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -967,17 +967,25 @@ public class ProcedureExecutor {
 store.update(procedure);
   }
 
-  // If we don't have the lock, we can't re-submit the queue,
-  // since it is already executing. To get rid of the stuck situation, we
-  // need to restart the master. With the procedure set to bypass, the 
procedureExecutor
-  // will bypass it and won't get stuck again.
-  if (lockEntry != null) {
-// add the procedure to run queue,
+  // If state of procedure is WAITING_TIMEOUT, we can directly submit it 
to the scheduler.
+  // Instead we should remove it from timeout Executor queue and tranfer 
its state to RUNNABLE
+  if (procedure.getState() == ProcedureState.WAITING_TIMEOUT) {
+LOG.debug("transform procedure {} from WAITING_TIMEOUT to RUNNABLE", 
procedure);
+if (timeoutExecutor.remove(procedure)) {
+  LOG.debug("removed procedure {} from timeoutExecutor", procedure);
+  timeoutExecutor.executeTimedoutProcedure(procedure);
+}
+  } else if (lockEntry != null) {
 scheduler.addFront(procedure);
 LOG.debug("Bypassing {} and its ancestors successfully, adding to 
queue", procedure);
   } else {
+// If we don't have the lock, we can't re-submit the queue,
+// since it is already executing. To get rid of the stuck situation, we
+// need to restart the master. With the procedure set to bypass, the 
procedureExecutor
+// will bypass it and won't get stuck again.
 LOG.debug("Bypassing {} and its ancestors successfully, but since it 
is already running, "
-+ "skipping add to queue", procedure);
++ "skipping add to queue",
+  procedure);
   }
   return true;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/ccabf731/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java
index 9e050a2..4416177 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java
@@ -126,7 +126,7 @@ class TimeoutExecutorThread extends 
StoppableThread {
 }
   }
 
-  private void executeTimedoutProcedure(Procedure proc) {
+  protected void executeTimedoutProcedure(Procedure proc) {
 // The procedure received a timeout. if the procedure itself does not 
handle it,
 // call abort() and add the procedure back in the queue for rollback.
 if (proc.setTimeoutFailure(executor.getEnvironment())) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/ccabf731/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java
--
diff --git 
a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java
 
b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java
index 7d587fd.

hbase git commit: HBASE-21437 Bypassed procedure throw IllegalArgumentException when its state is WAITING_TIMEOUT

2018-11-09 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2 6f602db5f -> 320d657fb


HBASE-21437 Bypassed procedure throw IllegalArgumentException when its state is 
WAITING_TIMEOUT

Signed-off-by: Allan Yang 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/320d657f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/320d657f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/320d657f

Branch: refs/heads/branch-2
Commit: 320d657fb4caa86fdd8bfbfda02ae6e26ae1877f
Parents: 6f602db
Author: jingyuntian 
Authored: Fri Nov 9 22:59:30 2018 +0800
Committer: Allan Yang 
Committed: Fri Nov 9 22:59:30 2018 +0800

--
 .../hbase/procedure2/ProcedureExecutor.java | 22 
 .../hbase/procedure2/TimeoutExecutorThread.java |  2 +-
 .../hbase/procedure2/TestProcedureBypass.java   | 36 
 3 files changed, 52 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/320d657f/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index 084da21..6c05492 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -967,17 +967,25 @@ public class ProcedureExecutor {
 store.update(procedure);
   }
 
-  // If we don't have the lock, we can't re-submit the queue,
-  // since it is already executing. To get rid of the stuck situation, we
-  // need to restart the master. With the procedure set to bypass, the 
procedureExecutor
-  // will bypass it and won't get stuck again.
-  if (lockEntry != null) {
-// add the procedure to run queue,
+  // If state of procedure is WAITING_TIMEOUT, we can directly submit it 
to the scheduler.
+  // Instead we should remove it from timeout Executor queue and tranfer 
its state to RUNNABLE
+  if (procedure.getState() == ProcedureState.WAITING_TIMEOUT) {
+LOG.debug("transform procedure {} from WAITING_TIMEOUT to RUNNABLE", 
procedure);
+if (timeoutExecutor.remove(procedure)) {
+  LOG.debug("removed procedure {} from timeoutExecutor", procedure);
+  timeoutExecutor.executeTimedoutProcedure(procedure);
+}
+  } else if (lockEntry != null) {
 scheduler.addFront(procedure);
 LOG.debug("Bypassing {} and its ancestors successfully, adding to 
queue", procedure);
   } else {
+// If we don't have the lock, we can't re-submit the queue,
+// since it is already executing. To get rid of the stuck situation, we
+// need to restart the master. With the procedure set to bypass, the 
procedureExecutor
+// will bypass it and won't get stuck again.
 LOG.debug("Bypassing {} and its ancestors successfully, but since it 
is already running, "
-+ "skipping add to queue", procedure);
++ "skipping add to queue",
+  procedure);
   }
   return true;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/320d657f/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java
index 9e050a2..4416177 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java
@@ -126,7 +126,7 @@ class TimeoutExecutorThread extends 
StoppableThread {
 }
   }
 
-  private void executeTimedoutProcedure(Procedure proc) {
+  protected void executeTimedoutProcedure(Procedure proc) {
 // The procedure received a timeout. if the procedure itself does not 
handle it,
 // call abort() and add the procedure back in the queue for rollback.
 if (proc.setTimeoutFailure(executor.getEnvironment())) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/320d657f/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java
--
diff --git 
a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java
 
b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java
index 7d58

hbase-site git commit: INFRA-10751 Empty commit

2018-11-09 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 9592fdb58 -> a19d669e4


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/a19d669e
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/a19d669e
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/a19d669e

Branch: refs/heads/asf-site
Commit: a19d669e458d23b77aa337ad67c31541eecfbcc5
Parents: 9592fdb
Author: jenkins 
Authored: Fri Nov 9 14:54:34 2018 +
Committer: jenkins 
Committed: Fri Nov 9 14:54:34 2018 +

--

--




[23/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.ReadRequestCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.ReadRequestCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.ReadRequestCostFunction.html
index 9d1542c..ec5f688 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.ReadRequestCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.ReadRequestCostFunction.html
@@ -53,502 +53,502 @@
 045import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType;
 046import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
 047import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction;
-048import 
org.apache.hadoop.hbase.util.Bytes;
-049import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-050import 
org.apache.yetus.audience.InterfaceAudience;
-051import org.slf4j.Logger;
-052import org.slf4j.LoggerFactory;
-053
-054import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-055import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
-056import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+048import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+049import 
org.apache.yetus.audience.InterfaceAudience;
+050import org.slf4j.Logger;
+051import org.slf4j.LoggerFactory;
+052
+053import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+054import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
+055import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+056
 057
-058
-059/**
-060 * 

This is a best effort load balancer. Given a Cost function F(C) => x It will -061 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the -062 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

-063 *
    -064 *
  • Region Load
  • -065 *
  • Table Load
  • -066 *
  • Data Locality
  • -067 *
  • Memstore Sizes
  • -068 *
  • Storefile Sizes
  • -069 *
+058/** +059 *

This is a best effort load balancer. Given a Cost function F(C) => x It will +060 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the +061 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

+062 *
    +063 *
  • Region Load
  • +064 *
  • Table Load
  • +065 *
  • Data Locality
  • +066 *
  • Memstore Sizes
  • +067 *
  • Storefile Sizes
  • +068 *
+069 * 070 * -071 * -072 *

Every cost function returns a number between 0 and 1 inclusive; where 0 is the lowest cost -073 * best solution, and 1 is the highest possible cost and the worst solution. The computed costs are -074 * scaled by their respective multipliers:

-075 * -076 *
    -077 *
  • hbase.master.balancer.stochastic.regionLoadCost
  • -078 *
  • hbase.master.balancer.stochastic.moveCost
  • -079 *
  • hbase.master.balancer.stochastic.tableLoadCost
  • -080 *
  • hbase.master.balancer.stochastic.localityCost
  • -081 *
  • hbase.master.balancer.stochastic.memstoreSizeCost
  • -082 *
  • hbase.master.balancer.stochastic.storefileSizeCost
  • -083 *
-084 * -085 *

In addition to the above configurations, the balancer can be tuned by the following -086 * configuration values:

-087 *
    -088 *
  • hbase.master.balancer.stochastic.maxMoveRegions which -089 * controls what the max number of regions that can be moved in a single invocation of this -090 * balancer.
  • -091 *
  • hbase.master.balancer.stochastic.stepsPerRegion is the coefficient by which the number of -092 * regions is multiplied to try and get the number of times the balancer will -093 * mutate all servers.
  • -094 *
  • hbase.master.balancer.stochastic.maxSteps which controls the maximum number of times that -095 * the balancer will try and mutate all the servers. The balancer will use the minimum of this -096 * value and the above computation.
  • -097 *
-098 * -099 *

This balancer is best used with hbase.master.loadbalance.bytable set to false -100 * so that the balancer gets the full picture of all loads on the cluster.

-101 */ -102@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) -103@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC", -104 justification="Complaint is about costFunctions not being synchronized; not end of the world") -105public class StochasticLoadBalancer extends BaseLoadBalancer { -106 -107 protected static final String STEPS_PE

[31/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LoadCandidateGenerator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LoadCandidateGenerator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LoadCandidateGenerator.html
index 9d1542c..ec5f688 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LoadCandidateGenerator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LoadCandidateGenerator.html
@@ -53,502 +53,502 @@
 045import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType;
 046import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
 047import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction;
-048import 
org.apache.hadoop.hbase.util.Bytes;
-049import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-050import 
org.apache.yetus.audience.InterfaceAudience;
-051import org.slf4j.Logger;
-052import org.slf4j.LoggerFactory;
-053
-054import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-055import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
-056import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+048import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+049import 
org.apache.yetus.audience.InterfaceAudience;
+050import org.slf4j.Logger;
+051import org.slf4j.LoggerFactory;
+052
+053import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+054import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
+055import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+056
 057
-058
-059/**
-060 * 

This is a best effort load balancer. Given a Cost function F(C) => x It will -061 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the -062 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

-063 *
    -064 *
  • Region Load
  • -065 *
  • Table Load
  • -066 *
  • Data Locality
  • -067 *
  • Memstore Sizes
  • -068 *
  • Storefile Sizes
  • -069 *
+058/** +059 *

This is a best effort load balancer. Given a Cost function F(C) => x It will +060 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the +061 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

+062 *
    +063 *
  • Region Load
  • +064 *
  • Table Load
  • +065 *
  • Data Locality
  • +066 *
  • Memstore Sizes
  • +067 *
  • Storefile Sizes
  • +068 *
+069 * 070 * -071 * -072 *

Every cost function returns a number between 0 and 1 inclusive; where 0 is the lowest cost -073 * best solution, and 1 is the highest possible cost and the worst solution. The computed costs are -074 * scaled by their respective multipliers:

-075 * -076 *
    -077 *
  • hbase.master.balancer.stochastic.regionLoadCost
  • -078 *
  • hbase.master.balancer.stochastic.moveCost
  • -079 *
  • hbase.master.balancer.stochastic.tableLoadCost
  • -080 *
  • hbase.master.balancer.stochastic.localityCost
  • -081 *
  • hbase.master.balancer.stochastic.memstoreSizeCost
  • -082 *
  • hbase.master.balancer.stochastic.storefileSizeCost
  • -083 *
-084 * -085 *

In addition to the above configurations, the balancer can be tuned by the following -086 * configuration values:

-087 *
    -088 *
  • hbase.master.balancer.stochastic.maxMoveRegions which -089 * controls what the max number of regions that can be moved in a single invocation of this -090 * balancer.
  • -091 *
  • hbase.master.balancer.stochastic.stepsPerRegion is the coefficient by which the number of -092 * regions is multiplied to try and get the number of times the balancer will -093 * mutate all servers.
  • -094 *
  • hbase.master.balancer.stochastic.maxSteps which controls the maximum number of times that -095 * the balancer will try and mutate all the servers. The balancer will use the minimum of this -096 * value and the above computation.
  • -097 *
-098 * -099 *

This balancer is best used with hbase.master.loadbalance.bytable set to false -100 * so that the balancer gets the full picture of all loads on the cluster.

-101 */ -102@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) -103@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC", -104 justification="Complaint is about costFunctions not being synchronized; not end of the world") -105public class StochasticLoadBalancer extends BaseLoadBalancer { -106 -107 protected static final String STEPS_PER_REG

[42/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/devapidocs/org/apache/hadoop/hbase/client/RegionInfo.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/RegionInfo.html 
b/devapidocs/org/apache/hadoop/hbase/client/RegionInfo.html
index dff70dc..8ea34c4 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/RegionInfo.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/RegionInfo.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":17,"i1":6,"i2":6,"i3":17,"i4":17,"i5":17,"i6":17,"i7":17,"i8":17,"i9":17,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":6,"i16":6,"i17":6,"i18":17,"i19":17,"i20":6,"i21":17,"i22":6,"i23":17,"i24":17,"i25":17,"i26":6,"i27":6,"i28":6,"i29":6,"i30":17,"i31":17,"i32":17,"i33":17,"i34":17,"i35":17,"i36":17,"i37":17,"i38":17,"i39":17,"i40":17};
+var methods = 
{"i0":17,"i1":6,"i2":6,"i3":17,"i4":17,"i5":17,"i6":17,"i7":17,"i8":17,"i9":17,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":6,"i16":17,"i17":17,"i18":6,"i19":6,"i20":17,"i21":17,"i22":6,"i23":17,"i24":6,"i25":17,"i26":17,"i27":17,"i28":6,"i29":6,"i30":6,"i31":6,"i32":17,"i33":17,"i34":17,"i35":17,"i36":17,"i37":17,"i38":17,"i39":17,"i40":17,"i41":17,"i42":17};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],4:["t3","Abstract 
Methods"],16:["t5","Default Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -106,7 +106,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public interface RegionInfo
+public interface RegionInfo
 Information about a region. A region is a range of keys in 
the whole keyspace
  of a table, an identifier (a timestamp) for differentiating between subset
  ranges (after region split) and a replicaId for differentiating the instance
@@ -293,68 +293,77 @@ public interface getRegionNameAsString() 
 
 
+static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
+getRegionNameAsString(byte[] regionName) 
+
+
+static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
+getRegionNameAsString(RegionInfo ri,
+ byte[] regionName) 
+
+
 int
 getReplicaId() 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 getShortNameToLog() 
 
-
+
 static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 getShortNameToLog(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List ris) 
 
-
+
 static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 getShortNameToLog(RegionInfo... hris) 
 
-
+
 byte[]
 getStartKey() 
 
-
+
 static byte[]
 getStartKey(byte[] regionName)
 Gets the start key from the specified region name.
 
 
-
+
 TableName
 getTable() 
 
-
+
 static TableName
 getTable(byte[] regionName)
 Gets the table name from the specified region name.
 
 
-
+
 static boolean
 hasEncodedName(byte[] regionName)
 Does region name contain its encoded name?
 
 
-
+
 static boolean
 isEncodedRegionName(byte[] regionName) 
 
-
+
 boolean
 isMetaRegion() 
 
-
+
 boolean
 isOffline() 
 
-
+
 boolean
 isSplit() 
 
-
+
 boolean
 isSplitParent() 
 
-
+
 static https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
 parseDelimitedFrom(byte[] bytes,
   int offset,
@@ -362,55 +371,55 @@ public interface Parses all the RegionInfo instances from the passed in 
stream until EOF.
 
 
-
+
 static RegionInfo
 parseFrom(byte[] bytes) 
 
-
+
 static RegionInfo
 parseFrom(byte[] bytes,
  int offset,
  int len) 
 
-
+
 static RegionInfo
 parseFrom(https://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true";
 title="class or interface in java.io">DataInputStream in)
 Parses an RegionInfo instance from the passed in 
stream.
 
 
-
+
 static RegionInfo
 parseFromOrNull(byte[] bytes) 
 
-
+
 static RegionInfo
 parseFromOrNull(byte[] bytes,
int offset,
int len) 
 
-
+
 static byte[][]
 parseRegionName(byte[] regionName)
 Separate elements of a regionName.
 
 
-
+
 static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 prettyPrint(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in 
java.lang">String encodedRegionName)
 Use logging.
 
 
-
+
 static byte[]
 toByteArray(RegionInfo ri) 
 
-
+
 static byte[]
 toDelimitedByteArray(RegionInfo.

[38/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/devapidocs/src-html/org/apache/hadoop/hbase/client/RegionInfo.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/RegionInfo.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RegionInfo.html
index 6df10fd..96d95ad 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/RegionInfo.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/RegionInfo.html
@@ -26,742 +26,764 @@
 018 */
 019package org.apache.hadoop.hbase.client;
 020
-021import java.io.DataInputStream;
-022import java.io.IOException;
-023import java.util.ArrayList;
-024import java.util.Arrays;
-025import java.util.Comparator;
-026import java.util.List;
-027import java.util.stream.Collectors;
-028
-029import 
org.apache.hadoop.hbase.HConstants;
-030import 
org.apache.hadoop.hbase.TableName;
-031import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-032import 
org.apache.hadoop.hbase.util.ByteArrayHashKey;
-033import 
org.apache.hadoop.hbase.util.Bytes;
-034import 
org.apache.hadoop.hbase.util.HashKey;
-035import 
org.apache.hadoop.hbase.util.JenkinsHash;
-036import 
org.apache.hadoop.hbase.util.MD5Hash;
-037import 
org.apache.hadoop.io.DataInputBuffer;
-038import 
org.apache.hadoop.util.StringUtils;
-039import 
org.apache.yetus.audience.InterfaceAudience;
-040
-041import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-042import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-043
-044/**
-045 * Information about a region. A region 
is a range of keys in the whole keyspace
-046 * of a table, an identifier (a 
timestamp) for differentiating between subset
-047 * ranges (after region split) and a 
replicaId for differentiating the instance
-048 * for the same range and some status 
information about the region.
-049 *
-050 * The region has a unique name which 
consists of the following fields:
-051 * 
    -052 *
  • tableName : The name of the table
  • -053 *
  • startKey: The startKey for the region.
  • -054 *
  • regionId: A timestamp when the region is created.
  • -055 *
  • replicaId : An id starting from 0 to differentiate replicas of the -056 * same region range but hosted in separated servers. The same region range can -057 * be hosted in multiple locations.
  • -058 *
  • encodedName : An MD5 encoded string for the region name.
  • -059 *
-060 * -061 *
Other than the fields in the region name, region info contains: -062 *
    -063 *
  • endKey : the endKey for the region (exclusive)
  • -064 *
  • split : Whether the region is split
  • -065 *
  • offline : Whether the region is offline
  • -066 *
-067 * -068 */ -069@InterfaceAudience.Public -070public interface RegionInfo { -071 /** -072 * Separator used to demarcate the encodedName in a region name -073 * in the new format. See description on new format above. -074 */ -075 @InterfaceAudience.Private -076 int ENC_SEPARATOR = '.'; -077 -078 @InterfaceAudience.Private -079 int MD5_HEX_LENGTH = 32; -080 -081 @InterfaceAudience.Private -082 int DEFAULT_REPLICA_ID = 0; -083 -084 /** -085 * to keep appended int's sorted in string format. Only allows 2 bytes -086 * to be sorted for replicaId. -087 */ -088 @InterfaceAudience.Private -089 String REPLICA_ID_FORMAT = "%04X"; -090 -091 @InterfaceAudience.Private -092 byte REPLICA_ID_DELIMITER = (byte)'_'; -093 -094 @InterfaceAudience.Private -095 String INVALID_REGION_NAME_FORMAT_MESSAGE = "Invalid regionName format"; -096 -097 @InterfaceAudience.Private -098 Comparator COMPARATOR -099= (RegionInfo lhs, RegionInfo rhs) -> { -100 if (rhs == null) { -101return 1; -102 } -103 -104 // Are regions of same table? -105 int result = lhs.getTable().compareTo(rhs.getTable()); -106 if (result != 0) { -107return result; -108 } -109 -110 // Compare start keys. -111 result = Bytes.compareTo(lhs.getStartKey(), rhs.getStartKey()); -112 if (result != 0) { -113return result; -114 } -115 -116 // Compare end keys. -117 result = Bytes.compareTo(lhs.getEndKey(), rhs.getEndKey()); -118 -119 if (result != 0) { -120if (lhs.getStartKey().length != 0 -121&& lhs.getEndKey().length == 0) { -122return 1; // this is last region -123} -124if (rhs.getStartKey().length != 0 -125&& rhs.getEndKey().length == 0) { -126return -1; // o is the last region -127} -128return result; -129 } -130 -131 // regionId is usually milli timestamp -- this defines older stamps -132 // to be "smaller" than newer stamps in sort order. -133 if (lhs.getRegionId() > rhs.getRegionId()) { -134return 1; -135 } else if (lhs.getRegionId() < rhs.getR

[40/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
index 86fe2b3..4a6979a 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
@@ -215,9 +215,9 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.master.procedure.PeerProcedureInterface.PeerOperationType
-org.apache.hadoop.hbase.master.procedure.MetaProcedureInterface.MetaOperationType
 org.apache.hadoop.hbase.master.procedure.ServerProcedureInterface.ServerOperationType
+org.apache.hadoop.hbase.master.procedure.MetaProcedureInterface.MetaOperationType
+org.apache.hadoop.hbase.master.procedure.PeerProcedureInterface.PeerOperationType
 org.apache.hadoop.hbase.master.procedure.TableProcedureInterface.TableOperationType
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/devapidocs/org/apache/hadoop/hbase/monitoring/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/monitoring/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/monitoring/package-tree.html
index 4b549da..7b435d2 100644
--- a/devapidocs/org/apache/hadoop/hbase/monitoring/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/monitoring/package-tree.html
@@ -127,8 +127,8 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.monitoring.TaskMonitor.TaskFilter.TaskType
 org.apache.hadoop.hbase.monitoring.MonitoredTask.State
+org.apache.hadoop.hbase.monitoring.TaskMonitor.TaskFilter.TaskType
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/devapidocs/org/apache/hadoop/hbase/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/package-tree.html
index 7bd4d55..73f1f8d 100644
--- a/devapidocs/org/apache/hadoop/hbase/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/package-tree.html
@@ -437,19 +437,19 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
+org.apache.hadoop.hbase.Cell.Type
+org.apache.hadoop.hbase.CompatibilitySingletonFactory.SingletonStorage
+org.apache.hadoop.hbase.HealthChecker.HealthCheckerExitStatus
+org.apache.hadoop.hbase.CellBuilderType
+org.apache.hadoop.hbase.MetaTableAccessor.QueryType
+org.apache.hadoop.hbase.Size.Unit
+org.apache.hadoop.hbase.MemoryCompactionPolicy
 org.apache.hadoop.hbase.CompareOperator
-org.apache.hadoop.hbase.Coprocessor.State
 org.apache.hadoop.hbase.KeyValue.Type
 org.apache.hadoop.hbase.HConstants.OperationStatusCode
-org.apache.hadoop.hbase.CellBuilderType
 org.apache.hadoop.hbase.KeepDeletedCells
-org.apache.hadoop.hbase.MetaTableAccessor.QueryType
-org.apache.hadoop.hbase.Size.Unit
-org.apache.hadoop.hbase.HealthChecker.HealthCheckerExitStatus
 org.apache.hadoop.hbase.ClusterMetrics.Option
-org.apache.hadoop.hbase.CompatibilitySingletonFactory.SingletonStorage
-org.apache.hadoop.hbase.Cell.Type
-org.apache.hadoop.hbase.MemoryCompactionPolicy
+org.apache.hadoop.hbase.Coprocessor.State
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.ht

[41/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
index 911cf94..557aa17 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
@@ -1081,100 +1081,105 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 
 
 static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
+RegionInfo.getRegionNameAsString(RegionInfo ri,
+ byte[] regionName) 
+
+
+static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 RegionInfoDisplay.getRegionNameAsStringForDisplay(RegionInfo ri,

org.apache.hadoop.conf.Configuration conf)
 Get the region name for display.
 
 
-
+
 static byte[]
 RegionInfoDisplay.getRegionNameForDisplay(RegionInfo ri,
org.apache.hadoop.conf.Configuration conf)
 Get the region name for display.
 
 
-
+
 static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 RegionInfo.getShortNameToLog(RegionInfo... hris) 
 
-
+
 static byte[]
 RegionInfoDisplay.getStartKeyForDisplay(RegionInfo ri,
  org.apache.hadoop.conf.Configuration conf)
 Get the start key for display.
 
 
-
+
 static boolean
 RegionReplicaUtil.isDefaultReplica(RegionInfo hri) 
 
-
+
 static boolean
 RegionReplicaUtil.isReplicasForSameRegion(RegionInfo regionInfoA,
RegionInfo regionInfoB) 
 
-
+
 private boolean
 TableSnapshotScanner.isValidRegion(RegionInfo hri) 
 
-
+
 private boolean
 RawAsyncTableImpl.locateFinished(RegionInfo region,
   byte[] endKey,
   boolean endKeyInclusive) 
 
-
+
 static RegionInfoBuilder
 RegionInfoBuilder.newBuilder(RegionInfo regionInfo) 
 
-
+
 (package private) static boolean
 ConnectionUtils.noMoreResultsForReverseScan(Scan scan,
RegionInfo info) 
 
-
+
 (package private) static boolean
 ConnectionUtils.noMoreResultsForScan(Scan scan,
 RegionInfo info) 
 
-
+
 void
 AsyncTable.CoprocessorCallback.onRegionComplete(RegionInfo region,
 R resp) 
 
-
+
 void
 AsyncTable.CoprocessorCallback.onRegionError(RegionInfo region,
  https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true";
 title="class or interface in 
java.lang">Throwable error) 
 
-
+
 private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureVoid>
 RawAsyncHBaseAdmin.split(RegionInfo hri,
  byte[] splitPoint) 
 
-
+
 (package private) https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Future.html?is-external=true";
 title="class or interface in java.util.concurrent">FutureVoid>
 HBaseAdmin.splitRegionAsync(RegionInfo hri,
 byte[] splitPoint) 
 
-
+
 static byte[]
 RegionInfo.toByteArray(RegionInfo ri) 
 
-
+
 static byte[]
 RegionInfo.toDelimitedByteArray(RegionInfo... infos)
 Serializes given RegionInfo's as a byte array.
 
 
-
+
 static byte[]
 RegionInfo.toDelimitedByteArray(RegionInfo ri)
 Use this instead of toByteArray(RegionInfo)
 when writing to a stream and you want to use
  the pb mergeDelimitedFrom (w/o the delimiter, pb reads to EOF which may not 
be what you want).
 
 
-
+
 (package private) void
 ConnectionImplementation.updateCachedLocation(RegionInfo hri,
 ServerName source,

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
index 0a69c99..3110163 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
@@ -555,24 +555,24 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";

[33/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFromRegionLoadFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFromRegionLoadFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFromRegionLoadFunction.html
index 9d1542c..ec5f688 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFromRegionLoadFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFromRegionLoadFunction.html
@@ -53,502 +53,502 @@
 045import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType;
 046import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
 047import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction;
-048import 
org.apache.hadoop.hbase.util.Bytes;
-049import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-050import 
org.apache.yetus.audience.InterfaceAudience;
-051import org.slf4j.Logger;
-052import org.slf4j.LoggerFactory;
-053
-054import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-055import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
-056import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+048import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+049import 
org.apache.yetus.audience.InterfaceAudience;
+050import org.slf4j.Logger;
+051import org.slf4j.LoggerFactory;
+052
+053import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+054import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
+055import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+056
 057
-058
-059/**
-060 * 

This is a best effort load balancer. Given a Cost function F(C) => x It will -061 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the -062 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

-063 *
    -064 *
  • Region Load
  • -065 *
  • Table Load
  • -066 *
  • Data Locality
  • -067 *
  • Memstore Sizes
  • -068 *
  • Storefile Sizes
  • -069 *
+058/** +059 *

This is a best effort load balancer. Given a Cost function F(C) => x It will +060 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the +061 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

+062 *
    +063 *
  • Region Load
  • +064 *
  • Table Load
  • +065 *
  • Data Locality
  • +066 *
  • Memstore Sizes
  • +067 *
  • Storefile Sizes
  • +068 *
+069 * 070 * -071 * -072 *

Every cost function returns a number between 0 and 1 inclusive; where 0 is the lowest cost -073 * best solution, and 1 is the highest possible cost and the worst solution. The computed costs are -074 * scaled by their respective multipliers:

-075 * -076 *
    -077 *
  • hbase.master.balancer.stochastic.regionLoadCost
  • -078 *
  • hbase.master.balancer.stochastic.moveCost
  • -079 *
  • hbase.master.balancer.stochastic.tableLoadCost
  • -080 *
  • hbase.master.balancer.stochastic.localityCost
  • -081 *
  • hbase.master.balancer.stochastic.memstoreSizeCost
  • -082 *
  • hbase.master.balancer.stochastic.storefileSizeCost
  • -083 *
-084 * -085 *

In addition to the above configurations, the balancer can be tuned by the following -086 * configuration values:

-087 *
    -088 *
  • hbase.master.balancer.stochastic.maxMoveRegions which -089 * controls what the max number of regions that can be moved in a single invocation of this -090 * balancer.
  • -091 *
  • hbase.master.balancer.stochastic.stepsPerRegion is the coefficient by which the number of -092 * regions is multiplied to try and get the number of times the balancer will -093 * mutate all servers.
  • -094 *
  • hbase.master.balancer.stochastic.maxSteps which controls the maximum number of times that -095 * the balancer will try and mutate all the servers. The balancer will use the minimum of this -096 * value and the above computation.
  • -097 *
-098 * -099 *

This balancer is best used with hbase.master.loadbalance.bytable set to false -100 * so that the balancer gets the full picture of all loads on the cluster.

-101 */ -102@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) -103@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC", -104 justification="Complaint is about costFunctions not being synchronized; not end of the world") -105public class StochasticLoadBalancer extends BaseLoadBalancer { -106 -107 protected static final S

[25/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RackLocalityCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RackLocalityCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RackLocalityCostFunction.html
index 9d1542c..ec5f688 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RackLocalityCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RackLocalityCostFunction.html
@@ -53,502 +53,502 @@
 045import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType;
 046import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
 047import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction;
-048import 
org.apache.hadoop.hbase.util.Bytes;
-049import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-050import 
org.apache.yetus.audience.InterfaceAudience;
-051import org.slf4j.Logger;
-052import org.slf4j.LoggerFactory;
-053
-054import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-055import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
-056import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+048import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+049import 
org.apache.yetus.audience.InterfaceAudience;
+050import org.slf4j.Logger;
+051import org.slf4j.LoggerFactory;
+052
+053import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+054import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
+055import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+056
 057
-058
-059/**
-060 * 

This is a best effort load balancer. Given a Cost function F(C) => x It will -061 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the -062 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

-063 *
    -064 *
  • Region Load
  • -065 *
  • Table Load
  • -066 *
  • Data Locality
  • -067 *
  • Memstore Sizes
  • -068 *
  • Storefile Sizes
  • -069 *
+058/** +059 *

This is a best effort load balancer. Given a Cost function F(C) => x It will +060 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the +061 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

+062 *
    +063 *
  • Region Load
  • +064 *
  • Table Load
  • +065 *
  • Data Locality
  • +066 *
  • Memstore Sizes
  • +067 *
  • Storefile Sizes
  • +068 *
+069 * 070 * -071 * -072 *

Every cost function returns a number between 0 and 1 inclusive; where 0 is the lowest cost -073 * best solution, and 1 is the highest possible cost and the worst solution. The computed costs are -074 * scaled by their respective multipliers:

-075 * -076 *
    -077 *
  • hbase.master.balancer.stochastic.regionLoadCost
  • -078 *
  • hbase.master.balancer.stochastic.moveCost
  • -079 *
  • hbase.master.balancer.stochastic.tableLoadCost
  • -080 *
  • hbase.master.balancer.stochastic.localityCost
  • -081 *
  • hbase.master.balancer.stochastic.memstoreSizeCost
  • -082 *
  • hbase.master.balancer.stochastic.storefileSizeCost
  • -083 *
-084 * -085 *

In addition to the above configurations, the balancer can be tuned by the following -086 * configuration values:

-087 *
    -088 *
  • hbase.master.balancer.stochastic.maxMoveRegions which -089 * controls what the max number of regions that can be moved in a single invocation of this -090 * balancer.
  • -091 *
  • hbase.master.balancer.stochastic.stepsPerRegion is the coefficient by which the number of -092 * regions is multiplied to try and get the number of times the balancer will -093 * mutate all servers.
  • -094 *
  • hbase.master.balancer.stochastic.maxSteps which controls the maximum number of times that -095 * the balancer will try and mutate all the servers. The balancer will use the minimum of this -096 * value and the above computation.
  • -097 *
-098 * -099 *

This balancer is best used with hbase.master.loadbalance.bytable set to false -100 * so that the balancer gets the full picture of all loads on the cluster.

-101 */ -102@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) -103@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC", -104 justification="Complaint is about costFunctions not being synchronized; not end of the world") -105public class StochasticLoadBalancer extends BaseLoadBalancer { -106 -107 protected static final String STE

[46/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/apache_hbase_reference_guide.pdf
--
diff --git a/apache_hbase_reference_guide.pdf b/apache_hbase_reference_guide.pdf
index f399d9f..deb2ad9 100644
--- a/apache_hbase_reference_guide.pdf
+++ b/apache_hbase_reference_guide.pdf
@@ -5,16 +5,16 @@
 /Author (Apache HBase Team)
 /Creator (Asciidoctor PDF 1.5.0.alpha.15, based on Prawn 2.2.2)
 /Producer (Apache HBase Team)
-/ModDate (D:20181108143309+00'00')
-/CreationDate (D:20181108144803+00'00')
+/ModDate (D:20181109143344+00'00')
+/CreationDate (D:20181109144916+00'00')
 >>
 endobj
 2 0 obj
 << /Type /Catalog
 /Pages 3 0 R
 /Names 28 0 R
-/Outlines 5015 0 R
-/PageLabels 5265 0 R
+/Outlines 5028 0 R
+/PageLabels 5278 0 R
 /PageMode /UseOutlines
 /OpenAction [7 0 R /FitH 842.89]
 /ViewerPreferences << /DisplayDocTitle true
@@ -23,8 +23,8 @@ endobj
 endobj
 3 0 obj
 << /Type /Pages
-/Count 787
-/Kids [7 0 R 12 0 R 14 0 R 16 0 R 18 0 R 20 0 R 22 0 R 24 0 R 26 0 R 46 0 R 49 
0 R 52 0 R 56 0 R 63 0 R 65 0 R 69 0 R 71 0 R 73 0 R 80 0 R 83 0 R 85 0 R 91 0 
R 94 0 R 96 0 R 98 0 R 105 0 R 112 0 R 117 0 R 119 0 R 135 0 R 140 0 R 148 0 R 
157 0 R 165 0 R 169 0 R 178 0 R 189 0 R 193 0 R 195 0 R 199 0 R 208 0 R 217 0 R 
225 0 R 234 0 R 239 0 R 248 0 R 256 0 R 265 0 R 278 0 R 285 0 R 295 0 R 303 0 R 
311 0 R 318 0 R 327 0 R 333 0 R 339 0 R 346 0 R 354 0 R 362 0 R 373 0 R 386 0 R 
394 0 R 401 0 R 409 0 R 417 0 R 426 0 R 436 0 R 444 0 R 451 0 R 458 0 R 467 0 R 
480 0 R 488 0 R 495 0 R 503 0 R 511 0 R 520 0 R 525 0 R 531 0 R 536 0 R 540 0 R 
556 0 R 567 0 R 571 0 R 586 0 R 591 0 R 596 0 R 598 0 R 600 0 R 603 0 R 605 0 R 
607 0 R 615 0 R 621 0 R 624 0 R 628 0 R 637 0 R 648 0 R 656 0 R 660 0 R 664 0 R 
666 0 R 679 0 R 693 0 R 700 0 R 712 0 R 722 0 R 733 0 R 745 0 R 763 0 R 780 0 R 
787 0 R 794 0 R 800 0 R 803 0 R 807 0 R 811 0 R 814 0 R 817 0 R 819 0 R 822 0 R 
826 0 R 828 0 R 832 0 R 838 0 R 843 0 R 
 847 0 R 850 0 R 856 0 R 858 0 R 862 0 R 870 0 R 872 0 R 875 0 R 878 0 R 881 0 
R 884 0 R 898 0 R 906 0 R 917 0 R 928 0 R 934 0 R 944 0 R 955 0 R 958 0 R 962 0 
R 965 0 R 970 0 R 979 0 R 987 0 R 992 0 R 996 0 R 1001 0 R 1005 0 R 1007 0 R 
1022 0 R 1033 0 R 1038 0 R 1045 0 R 1048 0 R 1056 0 R 1064 0 R 1069 0 R 1074 0 
R 1079 0 R 1081 0 R 1083 0 R 1085 0 R 1095 0 R 1103 0 R 1107 0 R 1114 0 R 1121 
0 R 1129 0 R 1133 0 R 1139 0 R 1144 0 R 1152 0 R 1156 0 R 1161 0 R 1163 0 R 
1169 0 R 1177 0 R 1183 0 R 1190 0 R 1201 0 R 1205 0 R 1207 0 R 1209 0 R 1213 0 
R 1216 0 R 1221 0 R 1224 0 R 1236 0 R 1240 0 R 1246 0 R 1254 0 R 1259 0 R 1263 
0 R 1267 0 R 1269 0 R 1272 0 R 1275 0 R 1278 0 R 1282 0 R 1286 0 R 1290 0 R 
1295 0 R 1299 0 R 1302 0 R 1304 0 R 1314 0 R 1316 0 R 1321 0 R 1334 0 R 1338 0 
R 1344 0 R 1346 0 R 1357 0 R 1360 0 R 1366 0 R 1374 0 R 1377 0 R 1384 0 R 1391 
0 R 1394 0 R 1396 0 R 1405 0 R 1407 0 R 1409 0 R 1412 0 R 1414 0 R 1416 0 R 
1418 0 R 1420 0 R 1423 0 R 1427 0 R 1432 0 R 1434 0 R 1436 0
  R 1438 0 R 1443 0 R 1450 0 R 1456 0 R 1459 0 R 1461 0 R 1464 0 R 1468 0 R 
1472 0 R 1475 0 R 1477 0 R 1479 0 R 1482 0 R 1487 0 R 1493 0 R 1501 0 R 1515 0 
R 1529 0 R 1532 0 R 1537 0 R 1550 0 R 1555 0 R 1570 0 R 1578 0 R 1582 0 R 1591 
0 R 1606 0 R 1618 0 R 1621 0 R 1635 0 R 1643 0 R 1648 0 R 1659 0 R 1664 0 R 
1670 0 R 1676 0 R 1688 0 R 1691 0 R 1700 0 R 1703 0 R 1712 0 R 1717 0 R 1722 0 
R 1726 0 R 1739 0 R 1741 0 R 1747 0 R 1753 0 R 1756 0 R 1764 0 R 1772 0 R 1776 
0 R 1778 0 R 1780 0 R 1792 0 R 1798 0 R 1807 0 R 1814 0 R 1827 0 R 1833 0 R 
1839 0 R 1850 0 R 1856 0 R 1861 0 R 1865 0 R 1869 0 R 1872 0 R 1877 0 R 1882 0 
R 1888 0 R 1893 0 R 1897 0 R 1906 0 R 1912 0 R 1915 0 R 1919 0 R 1928 0 R 1935 
0 R 1941 0 R 1948 0 R 1952 0 R 1955 0 R 1960 0 R 1965 0 R 1971 0 R 1973 0 R 
1975 0 R 1978 0 R 1989 0 R 1992 0 R 1999 0 R 2007 0 R 2012 0 R 2015 0 R 2020 0 
R 2022 0 R 2025 0 R 2030 0 R 2033 0 R 2035 0 R 2038 0 R 2043 0 R 2046 0 R 2056 
0 R 2061 0 R 2066 0 R 2068 0 R 2076 0 R 2083 0 R 2090 0 R 2096
  0 R 2101 0 R 2103 0 R 2112 0 R 2122 0 R 2132 0 R 2138 0 R 2145 0 R 2147 0 R 
2152 0 R 2154 0 R 2156 0 R 2160 0 R 2163 0 R 2166 0 R 2171 0 R 2175 0 R 2186 0 
R 2189 0 R 2192 0 R 2196 0 R 2200 0 R 2203 0 R 2205 0 R 2210 0 R 2213 0 R 2215 
0 R 2220 0 R 2230 0 R 2232 0 R 2234 0 R 2236 0 R 2238 0 R 2241 0 R 2243 0 R 
2245 0 R 2248 0 R 2250 0 R 2252 0 R 2256 0 R 2261 0 R 2270 0 R 2272 0 R 2274 0 
R 2280 0 R 2282 0 R 2287 0 R 2289 0 R 2291 0 R 2298 0 R 2303 0 R 2307 0 R 2312 
0 R 2316 0 R 2318 0 R 2320 0 R 2324 0 R 2327 0 R 2329 0 R 2331 0 R 2335 0 R 
2337 0 R 2340 0 R 2342 0 R 2344 0 R 2346 0 R 2353 0 R 2356 0 R 2361 0 R 2363 0 
R 2365 0 R 2367 0 R 2369 0 R 2377 0 R 2388 0 R 2402 0 R 2413 0 R 2417 0 R 2422 
0 R 2426 0 R 2429 0 R 2434 0 R 2440 0 R 2442 0 R 2445 0 R 2447 0 R 2449 0 R 
2451 0 R 2456 0 R 2458 0 R 2471 0 R 2474 0 R 2482 0 R 2488 0 R 2500 0 R 2514 0 
R 2527 0 R 2546 0 R 2548 0 R 2550 0 R 2554 0 R 2572 0 R 2578 0 R 2590 0 R 2594 
0 R 2598 0 R 2607 0 R 2619 0 R 2624

[35/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CandidateGenerator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CandidateGenerator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CandidateGenerator.html
index 9d1542c..ec5f688 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CandidateGenerator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CandidateGenerator.html
@@ -53,502 +53,502 @@
 045import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType;
 046import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
 047import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction;
-048import 
org.apache.hadoop.hbase.util.Bytes;
-049import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-050import 
org.apache.yetus.audience.InterfaceAudience;
-051import org.slf4j.Logger;
-052import org.slf4j.LoggerFactory;
-053
-054import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-055import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
-056import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+048import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+049import 
org.apache.yetus.audience.InterfaceAudience;
+050import org.slf4j.Logger;
+051import org.slf4j.LoggerFactory;
+052
+053import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+054import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
+055import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+056
 057
-058
-059/**
-060 * 

This is a best effort load balancer. Given a Cost function F(C) => x It will -061 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the -062 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

-063 *
    -064 *
  • Region Load
  • -065 *
  • Table Load
  • -066 *
  • Data Locality
  • -067 *
  • Memstore Sizes
  • -068 *
  • Storefile Sizes
  • -069 *
+058/** +059 *

This is a best effort load balancer. Given a Cost function F(C) => x It will +060 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the +061 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

+062 *
    +063 *
  • Region Load
  • +064 *
  • Table Load
  • +065 *
  • Data Locality
  • +066 *
  • Memstore Sizes
  • +067 *
  • Storefile Sizes
  • +068 *
+069 * 070 * -071 * -072 *

Every cost function returns a number between 0 and 1 inclusive; where 0 is the lowest cost -073 * best solution, and 1 is the highest possible cost and the worst solution. The computed costs are -074 * scaled by their respective multipliers:

-075 * -076 *
    -077 *
  • hbase.master.balancer.stochastic.regionLoadCost
  • -078 *
  • hbase.master.balancer.stochastic.moveCost
  • -079 *
  • hbase.master.balancer.stochastic.tableLoadCost
  • -080 *
  • hbase.master.balancer.stochastic.localityCost
  • -081 *
  • hbase.master.balancer.stochastic.memstoreSizeCost
  • -082 *
  • hbase.master.balancer.stochastic.storefileSizeCost
  • -083 *
-084 * -085 *

In addition to the above configurations, the balancer can be tuned by the following -086 * configuration values:

-087 *
    -088 *
  • hbase.master.balancer.stochastic.maxMoveRegions which -089 * controls what the max number of regions that can be moved in a single invocation of this -090 * balancer.
  • -091 *
  • hbase.master.balancer.stochastic.stepsPerRegion is the coefficient by which the number of -092 * regions is multiplied to try and get the number of times the balancer will -093 * mutate all servers.
  • -094 *
  • hbase.master.balancer.stochastic.maxSteps which controls the maximum number of times that -095 * the balancer will try and mutate all the servers. The balancer will use the minimum of this -096 * value and the above computation.
  • -097 *
-098 * -099 *

This balancer is best used with hbase.master.loadbalance.bytable set to false -100 * so that the balancer gets the full picture of all loads on the cluster.

-101 */ -102@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) -103@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC", -104 justification="Complaint is about costFunctions not being synchronized; not end of the world") -105public class StochasticLoadBalancer extends BaseLoadBalancer { -106 -107 protected static final String STEPS_PER_REGION_KEY = -108

[44/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/apidocs/src-html/org/apache/hadoop/hbase/client/RegionInfo.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/RegionInfo.html 
b/apidocs/src-html/org/apache/hadoop/hbase/client/RegionInfo.html
index 6df10fd..96d95ad 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/client/RegionInfo.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/client/RegionInfo.html
@@ -26,742 +26,764 @@
 018 */
 019package org.apache.hadoop.hbase.client;
 020
-021import java.io.DataInputStream;
-022import java.io.IOException;
-023import java.util.ArrayList;
-024import java.util.Arrays;
-025import java.util.Comparator;
-026import java.util.List;
-027import java.util.stream.Collectors;
-028
-029import 
org.apache.hadoop.hbase.HConstants;
-030import 
org.apache.hadoop.hbase.TableName;
-031import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-032import 
org.apache.hadoop.hbase.util.ByteArrayHashKey;
-033import 
org.apache.hadoop.hbase.util.Bytes;
-034import 
org.apache.hadoop.hbase.util.HashKey;
-035import 
org.apache.hadoop.hbase.util.JenkinsHash;
-036import 
org.apache.hadoop.hbase.util.MD5Hash;
-037import 
org.apache.hadoop.io.DataInputBuffer;
-038import 
org.apache.hadoop.util.StringUtils;
-039import 
org.apache.yetus.audience.InterfaceAudience;
-040
-041import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-042import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-043
-044/**
-045 * Information about a region. A region 
is a range of keys in the whole keyspace
-046 * of a table, an identifier (a 
timestamp) for differentiating between subset
-047 * ranges (after region split) and a 
replicaId for differentiating the instance
-048 * for the same range and some status 
information about the region.
-049 *
-050 * The region has a unique name which 
consists of the following fields:
-051 * 
    -052 *
  • tableName : The name of the table
  • -053 *
  • startKey: The startKey for the region.
  • -054 *
  • regionId: A timestamp when the region is created.
  • -055 *
  • replicaId : An id starting from 0 to differentiate replicas of the -056 * same region range but hosted in separated servers. The same region range can -057 * be hosted in multiple locations.
  • -058 *
  • encodedName : An MD5 encoded string for the region name.
  • -059 *
-060 * -061 *
Other than the fields in the region name, region info contains: -062 *
    -063 *
  • endKey : the endKey for the region (exclusive)
  • -064 *
  • split : Whether the region is split
  • -065 *
  • offline : Whether the region is offline
  • -066 *
-067 * -068 */ -069@InterfaceAudience.Public -070public interface RegionInfo { -071 /** -072 * Separator used to demarcate the encodedName in a region name -073 * in the new format. See description on new format above. -074 */ -075 @InterfaceAudience.Private -076 int ENC_SEPARATOR = '.'; -077 -078 @InterfaceAudience.Private -079 int MD5_HEX_LENGTH = 32; -080 -081 @InterfaceAudience.Private -082 int DEFAULT_REPLICA_ID = 0; -083 -084 /** -085 * to keep appended int's sorted in string format. Only allows 2 bytes -086 * to be sorted for replicaId. -087 */ -088 @InterfaceAudience.Private -089 String REPLICA_ID_FORMAT = "%04X"; -090 -091 @InterfaceAudience.Private -092 byte REPLICA_ID_DELIMITER = (byte)'_'; -093 -094 @InterfaceAudience.Private -095 String INVALID_REGION_NAME_FORMAT_MESSAGE = "Invalid regionName format"; -096 -097 @InterfaceAudience.Private -098 Comparator COMPARATOR -099= (RegionInfo lhs, RegionInfo rhs) -> { -100 if (rhs == null) { -101return 1; -102 } -103 -104 // Are regions of same table? -105 int result = lhs.getTable().compareTo(rhs.getTable()); -106 if (result != 0) { -107return result; -108 } -109 -110 // Compare start keys. -111 result = Bytes.compareTo(lhs.getStartKey(), rhs.getStartKey()); -112 if (result != 0) { -113return result; -114 } -115 -116 // Compare end keys. -117 result = Bytes.compareTo(lhs.getEndKey(), rhs.getEndKey()); -118 -119 if (result != 0) { -120if (lhs.getStartKey().length != 0 -121&& lhs.getEndKey().length == 0) { -122return 1; // this is last region -123} -124if (rhs.getStartKey().length != 0 -125&& rhs.getEndKey().length == 0) { -126return -1; // o is the last region -127} -128return result; -129 } -130 -131 // regionId is usually milli timestamp -- this defines older stamps -132 // to be "smaller" than newer stamps in sort order. -133 if (lhs.getRegionId() > rhs.getRegionId()) { -134return 1; -135 } else if (lhs.getRegionId() < rhs.getRegionId()) { -1

[03/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.DummyServer.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.DummyServer.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.DummyServer.html
index 006d1ba..18cf507 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.DummyServer.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.DummyServer.html
@@ -654,258 +654,274 @@
 646}
 647  }
 648
-649  /**
-650   * Add a peer and wait for it to 
initialize
-651   * @param waitForSource Whether to wait 
for replication source to initialize
-652   */
-653  private void addPeerAndWait(final 
String peerId, final ReplicationPeerConfig peerConfig,
-654  final boolean waitForSource) throws 
Exception {
-655final ReplicationPeers rp = 
manager.getReplicationPeers();
-656rp.getPeerStorage().addPeer(peerId, 
peerConfig, true, SyncReplicationState.NONE);
-657try {
-658  manager.addPeer(peerId);
-659} catch (Exception e) {
-660  // ignore the failed exception, 
because we'll test both success & failed case.
-661}
-662waitPeer(peerId, manager, 
waitForSource);
-663if (managerOfCluster != null) {
-664  managerOfCluster.addPeer(peerId);
-665  waitPeer(peerId, managerOfCluster, 
waitForSource);
-666}
-667  }
-668
-669  private static void waitPeer(final 
String peerId,
-670  ReplicationSourceManager manager, 
final boolean waitForSource) {
-671ReplicationPeers rp = 
manager.getReplicationPeers();
-672Waiter.waitFor(conf, 2, () -> 
{
-673  if (waitForSource) {
-674ReplicationSourceInterface rs = 
manager.getSource(peerId);
-675if (rs == null) {
-676  return false;
-677}
-678if (rs instanceof 
ReplicationSourceDummy) {
-679  return 
((ReplicationSourceDummy)rs).isStartup();
-680}
-681return true;
-682  } else {
-683return (rp.getPeer(peerId) != 
null);
-684  }
-685});
-686  }
-687
-688  /**
-689   * Remove a peer and wait for it to get 
cleaned up
-690   * @param peerId
-691   * @throws Exception
-692   */
-693  private void removePeerAndWait(final 
String peerId) throws Exception {
-694final ReplicationPeers rp = 
manager.getReplicationPeers();
-695if 
(rp.getPeerStorage().listPeerIds().contains(peerId)) {
-696  
rp.getPeerStorage().removePeer(peerId);
-697  try {
-698manager.removePeer(peerId);
-699  } catch (Exception e) {
-700// ignore the failed exception 
and continue.
-701  }
-702}
-703Waiter.waitFor(conf, 2, new 
Waiter.Predicate() {
-704  @Override
-705  public boolean evaluate() throws 
Exception {
-706Collection peers = 
rp.getPeerStorage().listPeerIds();
-707return 
(!manager.getAllQueues().contains(peerId)) && (rp.getPeer(peerId) == 
null)
-708&& 
(!peers.contains(peerId)) && manager.getSource(peerId) == null;
-709  }
-710});
-711  }
-712
-713  private WALEdit 
getBulkLoadWALEdit(NavigableMap scope) {
-714// 1. Create store files for the 
families
-715Map> 
storeFiles = new HashMap<>(1);
-716Map 
storeFilesSize = new HashMap<>(1);
-717List p = new 
ArrayList<>(1);
-718Path hfilePath1 = new 
Path(Bytes.toString(f1));
-719p.add(hfilePath1);
-720try {
-721  
storeFilesSize.put(hfilePath1.getName(), 
fs.getFileStatus(hfilePath1).getLen());
-722} catch (IOException e) {
-723  LOG.debug("Failed to calculate the 
size of hfile " + hfilePath1);
-724  
storeFilesSize.put(hfilePath1.getName(), 0L);
-725}
-726storeFiles.put(f1, p);
-727scope.put(f1, 1);
-728p = new ArrayList<>(1);
-729Path hfilePath2 = new 
Path(Bytes.toString(f2));
-730p.add(hfilePath2);
-731try {
-732  
storeFilesSize.put(hfilePath2.getName(), 
fs.getFileStatus(hfilePath2).getLen());
-733} catch (IOException e) {
-734  LOG.debug("Failed to calculate the 
size of hfile " + hfilePath2);
-735  
storeFilesSize.put(hfilePath2.getName(), 0L);
-736}
-737storeFiles.put(f2, p);
-738// 2. Create bulk load descriptor
-739BulkLoadDescriptor desc =
-740
ProtobufUtil.toBulkLoadDescriptor(hri.getTable(),
-741  
UnsafeByteOperations.unsafeWrap(hri.getEncodedNameAsBytes()), storeFiles, 
storeFilesSize, 1);
-742
-743// 3. create bulk load wal edit 
event
-744WALEdit logEdit = 
WALEdit.createBulkLoadEvent(hri, desc);
-745return logEdit;
-746  }
-747
-748  static class DummyNodeFailoverWorker 
e

[17/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.ServerLocalityCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.ServerLocalityCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.ServerLocalityCostFunction.html
index 9d1542c..ec5f688 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.ServerLocalityCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.ServerLocalityCostFunction.html
@@ -53,502 +53,502 @@
 045import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType;
 046import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
 047import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction;
-048import 
org.apache.hadoop.hbase.util.Bytes;
-049import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-050import 
org.apache.yetus.audience.InterfaceAudience;
-051import org.slf4j.Logger;
-052import org.slf4j.LoggerFactory;
-053
-054import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-055import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
-056import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+048import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+049import 
org.apache.yetus.audience.InterfaceAudience;
+050import org.slf4j.Logger;
+051import org.slf4j.LoggerFactory;
+052
+053import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+054import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
+055import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+056
 057
-058
-059/**
-060 * 

This is a best effort load balancer. Given a Cost function F(C) => x It will -061 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the -062 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

-063 *
    -064 *
  • Region Load
  • -065 *
  • Table Load
  • -066 *
  • Data Locality
  • -067 *
  • Memstore Sizes
  • -068 *
  • Storefile Sizes
  • -069 *
+058/** +059 *

This is a best effort load balancer. Given a Cost function F(C) => x It will +060 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the +061 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

+062 *
    +063 *
  • Region Load
  • +064 *
  • Table Load
  • +065 *
  • Data Locality
  • +066 *
  • Memstore Sizes
  • +067 *
  • Storefile Sizes
  • +068 *
+069 * 070 * -071 * -072 *

Every cost function returns a number between 0 and 1 inclusive; where 0 is the lowest cost -073 * best solution, and 1 is the highest possible cost and the worst solution. The computed costs are -074 * scaled by their respective multipliers:

-075 * -076 *
    -077 *
  • hbase.master.balancer.stochastic.regionLoadCost
  • -078 *
  • hbase.master.balancer.stochastic.moveCost
  • -079 *
  • hbase.master.balancer.stochastic.tableLoadCost
  • -080 *
  • hbase.master.balancer.stochastic.localityCost
  • -081 *
  • hbase.master.balancer.stochastic.memstoreSizeCost
  • -082 *
  • hbase.master.balancer.stochastic.storefileSizeCost
  • -083 *
-084 * -085 *

In addition to the above configurations, the balancer can be tuned by the following -086 * configuration values:

-087 *
    -088 *
  • hbase.master.balancer.stochastic.maxMoveRegions which -089 * controls what the max number of regions that can be moved in a single invocation of this -090 * balancer.
  • -091 *
  • hbase.master.balancer.stochastic.stepsPerRegion is the coefficient by which the number of -092 * regions is multiplied to try and get the number of times the balancer will -093 * mutate all servers.
  • -094 *
  • hbase.master.balancer.stochastic.maxSteps which controls the maximum number of times that -095 * the balancer will try and mutate all the servers. The balancer will use the minimum of this -096 * value and the above computation.
  • -097 *
-098 * -099 *

This balancer is best used with hbase.master.loadbalance.bytable set to false -100 * so that the balancer gets the full picture of all loads on the cluster.

-101 */ -102@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) -103@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC", -104 justification="Complaint is about costFunctions not being synchronized; not end of the world") -105public class StochasticLoadBalancer extends BaseLoadBalancer { -106 -107 protected static final S

[21/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaCandidateGenerator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaCandidateGenerator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaCandidateGenerator.html
index 9d1542c..ec5f688 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaCandidateGenerator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaCandidateGenerator.html
@@ -53,502 +53,502 @@
 045import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType;
 046import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
 047import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction;
-048import 
org.apache.hadoop.hbase.util.Bytes;
-049import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-050import 
org.apache.yetus.audience.InterfaceAudience;
-051import org.slf4j.Logger;
-052import org.slf4j.LoggerFactory;
-053
-054import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-055import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
-056import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+048import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+049import 
org.apache.yetus.audience.InterfaceAudience;
+050import org.slf4j.Logger;
+051import org.slf4j.LoggerFactory;
+052
+053import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+054import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
+055import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+056
 057
-058
-059/**
-060 * 

This is a best effort load balancer. Given a Cost function F(C) => x It will -061 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the -062 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

-063 *
    -064 *
  • Region Load
  • -065 *
  • Table Load
  • -066 *
  • Data Locality
  • -067 *
  • Memstore Sizes
  • -068 *
  • Storefile Sizes
  • -069 *
+058/** +059 *

This is a best effort load balancer. Given a Cost function F(C) => x It will +060 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the +061 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

+062 *
    +063 *
  • Region Load
  • +064 *
  • Table Load
  • +065 *
  • Data Locality
  • +066 *
  • Memstore Sizes
  • +067 *
  • Storefile Sizes
  • +068 *
+069 * 070 * -071 * -072 *

Every cost function returns a number between 0 and 1 inclusive; where 0 is the lowest cost -073 * best solution, and 1 is the highest possible cost and the worst solution. The computed costs are -074 * scaled by their respective multipliers:

-075 * -076 *
    -077 *
  • hbase.master.balancer.stochastic.regionLoadCost
  • -078 *
  • hbase.master.balancer.stochastic.moveCost
  • -079 *
  • hbase.master.balancer.stochastic.tableLoadCost
  • -080 *
  • hbase.master.balancer.stochastic.localityCost
  • -081 *
  • hbase.master.balancer.stochastic.memstoreSizeCost
  • -082 *
  • hbase.master.balancer.stochastic.storefileSizeCost
  • -083 *
-084 * -085 *

In addition to the above configurations, the balancer can be tuned by the following -086 * configuration values:

-087 *
    -088 *
  • hbase.master.balancer.stochastic.maxMoveRegions which -089 * controls what the max number of regions that can be moved in a single invocation of this -090 * balancer.
  • -091 *
  • hbase.master.balancer.stochastic.stepsPerRegion is the coefficient by which the number of -092 * regions is multiplied to try and get the number of times the balancer will -093 * mutate all servers.
  • -094 *
  • hbase.master.balancer.stochastic.maxSteps which controls the maximum number of times that -095 * the balancer will try and mutate all the servers. The balancer will use the minimum of this -096 * value and the above computation.
  • -097 *
-098 * -099 *

This balancer is best used with hbase.master.loadbalance.bytable set to false -100 * so that the balancer gets the full picture of all loads on the cluster.

-101 */ -102@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) -103@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC", -104 justification="Complaint is about costFunctions not being synchronized; not end of the world") -105public class StochasticLoadBalancer extends BaseLoadBalancer { -106 -107

[27/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MoveCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MoveCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MoveCostFunction.html
index 9d1542c..ec5f688 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MoveCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MoveCostFunction.html
@@ -53,502 +53,502 @@
 045import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType;
 046import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
 047import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction;
-048import 
org.apache.hadoop.hbase.util.Bytes;
-049import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-050import 
org.apache.yetus.audience.InterfaceAudience;
-051import org.slf4j.Logger;
-052import org.slf4j.LoggerFactory;
-053
-054import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-055import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
-056import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+048import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+049import 
org.apache.yetus.audience.InterfaceAudience;
+050import org.slf4j.Logger;
+051import org.slf4j.LoggerFactory;
+052
+053import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+054import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
+055import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+056
 057
-058
-059/**
-060 * 

This is a best effort load balancer. Given a Cost function F(C) => x It will -061 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the -062 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

-063 *
    -064 *
  • Region Load
  • -065 *
  • Table Load
  • -066 *
  • Data Locality
  • -067 *
  • Memstore Sizes
  • -068 *
  • Storefile Sizes
  • -069 *
+058/** +059 *

This is a best effort load balancer. Given a Cost function F(C) => x It will +060 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the +061 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

+062 *
    +063 *
  • Region Load
  • +064 *
  • Table Load
  • +065 *
  • Data Locality
  • +066 *
  • Memstore Sizes
  • +067 *
  • Storefile Sizes
  • +068 *
+069 * 070 * -071 * -072 *

Every cost function returns a number between 0 and 1 inclusive; where 0 is the lowest cost -073 * best solution, and 1 is the highest possible cost and the worst solution. The computed costs are -074 * scaled by their respective multipliers:

-075 * -076 *
    -077 *
  • hbase.master.balancer.stochastic.regionLoadCost
  • -078 *
  • hbase.master.balancer.stochastic.moveCost
  • -079 *
  • hbase.master.balancer.stochastic.tableLoadCost
  • -080 *
  • hbase.master.balancer.stochastic.localityCost
  • -081 *
  • hbase.master.balancer.stochastic.memstoreSizeCost
  • -082 *
  • hbase.master.balancer.stochastic.storefileSizeCost
  • -083 *
-084 * -085 *

In addition to the above configurations, the balancer can be tuned by the following -086 * configuration values:

-087 *
    -088 *
  • hbase.master.balancer.stochastic.maxMoveRegions which -089 * controls what the max number of regions that can be moved in a single invocation of this -090 * balancer.
  • -091 *
  • hbase.master.balancer.stochastic.stepsPerRegion is the coefficient by which the number of -092 * regions is multiplied to try and get the number of times the balancer will -093 * mutate all servers.
  • -094 *
  • hbase.master.balancer.stochastic.maxSteps which controls the maximum number of times that -095 * the balancer will try and mutate all the servers. The balancer will use the minimum of this -096 * value and the above computation.
  • -097 *
-098 * -099 *

This balancer is best used with hbase.master.loadbalance.bytable set to false -100 * so that the balancer gets the full picture of all loads on the cluster.

-101 */ -102@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) -103@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC", -104 justification="Complaint is about costFunctions not being synchronized; not end of the world") -105public class StochasticLoadBalancer extends BaseLoadBalancer { -106 -107 protected static final String STEPS_PER_REGION_KEY = -108 "hbase.ma

[15/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.TableSkewCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.TableSkewCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.TableSkewCostFunction.html
index 9d1542c..ec5f688 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.TableSkewCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.TableSkewCostFunction.html
@@ -53,502 +53,502 @@
 045import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType;
 046import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
 047import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction;
-048import 
org.apache.hadoop.hbase.util.Bytes;
-049import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-050import 
org.apache.yetus.audience.InterfaceAudience;
-051import org.slf4j.Logger;
-052import org.slf4j.LoggerFactory;
-053
-054import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-055import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
-056import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+048import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+049import 
org.apache.yetus.audience.InterfaceAudience;
+050import org.slf4j.Logger;
+051import org.slf4j.LoggerFactory;
+052
+053import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+054import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
+055import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+056
 057
-058
-059/**
-060 * 

This is a best effort load balancer. Given a Cost function F(C) => x It will -061 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the -062 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

-063 *
    -064 *
  • Region Load
  • -065 *
  • Table Load
  • -066 *
  • Data Locality
  • -067 *
  • Memstore Sizes
  • -068 *
  • Storefile Sizes
  • -069 *
+058/** +059 *

This is a best effort load balancer. Given a Cost function F(C) => x It will +060 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the +061 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

+062 *
    +063 *
  • Region Load
  • +064 *
  • Table Load
  • +065 *
  • Data Locality
  • +066 *
  • Memstore Sizes
  • +067 *
  • Storefile Sizes
  • +068 *
+069 * 070 * -071 * -072 *

Every cost function returns a number between 0 and 1 inclusive; where 0 is the lowest cost -073 * best solution, and 1 is the highest possible cost and the worst solution. The computed costs are -074 * scaled by their respective multipliers:

-075 * -076 *
    -077 *
  • hbase.master.balancer.stochastic.regionLoadCost
  • -078 *
  • hbase.master.balancer.stochastic.moveCost
  • -079 *
  • hbase.master.balancer.stochastic.tableLoadCost
  • -080 *
  • hbase.master.balancer.stochastic.localityCost
  • -081 *
  • hbase.master.balancer.stochastic.memstoreSizeCost
  • -082 *
  • hbase.master.balancer.stochastic.storefileSizeCost
  • -083 *
-084 * -085 *

In addition to the above configurations, the balancer can be tuned by the following -086 * configuration values:

-087 *
    -088 *
  • hbase.master.balancer.stochastic.maxMoveRegions which -089 * controls what the max number of regions that can be moved in a single invocation of this -090 * balancer.
  • -091 *
  • hbase.master.balancer.stochastic.stepsPerRegion is the coefficient by which the number of -092 * regions is multiplied to try and get the number of times the balancer will -093 * mutate all servers.
  • -094 *
  • hbase.master.balancer.stochastic.maxSteps which controls the maximum number of times that -095 * the balancer will try and mutate all the servers. The balancer will use the minimum of this -096 * value and the above computation.
  • -097 *
-098 * -099 *

This balancer is best used with hbase.master.loadbalance.bytable set to false -100 * so that the balancer gets the full picture of all loads on the cluster.

-101 */ -102@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) -103@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC", -104 justification="Complaint is about costFunctions not being synchronized; not end of the world") -105public class StochasticLoadBalancer extends BaseLoadBalancer { -106 -107 protected static final String STEPS_PER_REGION_K

[34/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFromRegionLoadAsRateFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFromRegionLoadAsRateFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFromRegionLoadAsRateFunction.html
index 9d1542c..ec5f688 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFromRegionLoadAsRateFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFromRegionLoadAsRateFunction.html
@@ -53,502 +53,502 @@
 045import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType;
 046import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
 047import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction;
-048import 
org.apache.hadoop.hbase.util.Bytes;
-049import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-050import 
org.apache.yetus.audience.InterfaceAudience;
-051import org.slf4j.Logger;
-052import org.slf4j.LoggerFactory;
-053
-054import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-055import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
-056import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+048import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+049import 
org.apache.yetus.audience.InterfaceAudience;
+050import org.slf4j.Logger;
+051import org.slf4j.LoggerFactory;
+052
+053import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+054import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
+055import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+056
 057
-058
-059/**
-060 * 

This is a best effort load balancer. Given a Cost function F(C) => x It will -061 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the -062 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

-063 *
    -064 *
  • Region Load
  • -065 *
  • Table Load
  • -066 *
  • Data Locality
  • -067 *
  • Memstore Sizes
  • -068 *
  • Storefile Sizes
  • -069 *
+058/** +059 *

This is a best effort load balancer. Given a Cost function F(C) => x It will +060 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the +061 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

+062 *
    +063 *
  • Region Load
  • +064 *
  • Table Load
  • +065 *
  • Data Locality
  • +066 *
  • Memstore Sizes
  • +067 *
  • Storefile Sizes
  • +068 *
+069 * 070 * -071 * -072 *

Every cost function returns a number between 0 and 1 inclusive; where 0 is the lowest cost -073 * best solution, and 1 is the highest possible cost and the worst solution. The computed costs are -074 * scaled by their respective multipliers:

-075 * -076 *
    -077 *
  • hbase.master.balancer.stochastic.regionLoadCost
  • -078 *
  • hbase.master.balancer.stochastic.moveCost
  • -079 *
  • hbase.master.balancer.stochastic.tableLoadCost
  • -080 *
  • hbase.master.balancer.stochastic.localityCost
  • -081 *
  • hbase.master.balancer.stochastic.memstoreSizeCost
  • -082 *
  • hbase.master.balancer.stochastic.storefileSizeCost
  • -083 *
-084 * -085 *

In addition to the above configurations, the balancer can be tuned by the following -086 * configuration values:

-087 *
    -088 *
  • hbase.master.balancer.stochastic.maxMoveRegions which -089 * controls what the max number of regions that can be moved in a single invocation of this -090 * balancer.
  • -091 *
  • hbase.master.balancer.stochastic.stepsPerRegion is the coefficient by which the number of -092 * regions is multiplied to try and get the number of times the balancer will -093 * mutate all servers.
  • -094 *
  • hbase.master.balancer.stochastic.maxSteps which controls the maximum number of times that -095 * the balancer will try and mutate all the servers. The balancer will use the minimum of this -096 * value and the above computation.
  • -097 *
-098 * -099 *

This balancer is best used with hbase.master.loadbalance.bytable set to false -100 * so that the balancer gets the full picture of all loads on the cluster.

-101 */ -102@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) -103@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC", -104 justification="Complaint is about costFunctions not being synchronized; not end of the world") -105public class StochasticLoadBalancer extends BaseLoadBalancer { -106

[16/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.StoreFileCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.StoreFileCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.StoreFileCostFunction.html
index 9d1542c..ec5f688 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.StoreFileCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.StoreFileCostFunction.html
@@ -53,502 +53,502 @@
 045import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType;
 046import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
 047import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction;
-048import 
org.apache.hadoop.hbase.util.Bytes;
-049import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-050import 
org.apache.yetus.audience.InterfaceAudience;
-051import org.slf4j.Logger;
-052import org.slf4j.LoggerFactory;
-053
-054import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-055import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
-056import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+048import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+049import 
org.apache.yetus.audience.InterfaceAudience;
+050import org.slf4j.Logger;
+051import org.slf4j.LoggerFactory;
+052
+053import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+054import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
+055import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+056
 057
-058
-059/**
-060 * 

This is a best effort load balancer. Given a Cost function F(C) => x It will -061 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the -062 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

-063 *
    -064 *
  • Region Load
  • -065 *
  • Table Load
  • -066 *
  • Data Locality
  • -067 *
  • Memstore Sizes
  • -068 *
  • Storefile Sizes
  • -069 *
+058/** +059 *

This is a best effort load balancer. Given a Cost function F(C) => x It will +060 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the +061 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

+062 *
    +063 *
  • Region Load
  • +064 *
  • Table Load
  • +065 *
  • Data Locality
  • +066 *
  • Memstore Sizes
  • +067 *
  • Storefile Sizes
  • +068 *
+069 * 070 * -071 * -072 *

Every cost function returns a number between 0 and 1 inclusive; where 0 is the lowest cost -073 * best solution, and 1 is the highest possible cost and the worst solution. The computed costs are -074 * scaled by their respective multipliers:

-075 * -076 *
    -077 *
  • hbase.master.balancer.stochastic.regionLoadCost
  • -078 *
  • hbase.master.balancer.stochastic.moveCost
  • -079 *
  • hbase.master.balancer.stochastic.tableLoadCost
  • -080 *
  • hbase.master.balancer.stochastic.localityCost
  • -081 *
  • hbase.master.balancer.stochastic.memstoreSizeCost
  • -082 *
  • hbase.master.balancer.stochastic.storefileSizeCost
  • -083 *
-084 * -085 *

In addition to the above configurations, the balancer can be tuned by the following -086 * configuration values:

-087 *
    -088 *
  • hbase.master.balancer.stochastic.maxMoveRegions which -089 * controls what the max number of regions that can be moved in a single invocation of this -090 * balancer.
  • -091 *
  • hbase.master.balancer.stochastic.stepsPerRegion is the coefficient by which the number of -092 * regions is multiplied to try and get the number of times the balancer will -093 * mutate all servers.
  • -094 *
  • hbase.master.balancer.stochastic.maxSteps which controls the maximum number of times that -095 * the balancer will try and mutate all the servers. The balancer will use the minimum of this -096 * value and the above computation.
  • -097 *
-098 * -099 *

This balancer is best used with hbase.master.loadbalance.bytable set to false -100 * so that the balancer gets the full picture of all loads on the cluster.

-101 */ -102@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) -103@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC", -104 justification="Complaint is about costFunctions not being synchronized; not end of the world") -105public class StochasticLoadBalancer extends BaseLoadBalancer { -106 -107 protected static final String STEPS_PER_REGION_K

[08/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmplImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmplImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmplImpl.html
index 6ed9d13..52c02b0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmplImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmplImpl.html
@@ -80,11 +80,11 @@
 072int numOfPages = (int) 
Math.ceil(numOfRITs * 1.0 / ritsPerPage);
 073
 074  // 51, 5
-075  
jamonWriter.write("
\n

Regions in Transition

\n

"); -076 // 53, 9 +075 jamonWriter.write("

\n

Regions in Transition

\n

"); +076 // 53, 29 077 org.jamon.escaping.Escaping.HTML.write(org.jamon.emit.StandardEmitter.valueOf(numOfRITs), jamonWriter); -078 // 53, 24 -079 jamonWriter.write(" region(s) in transition.\n "); +078 // 53, 44 +079 jamonWriter.write(" region(s) in transition.\n "); 080 // 54, 6 081 if (ritStat.hasRegionsTwiceOverThreshold() ) 082 { http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmplImpl.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmplImpl.html b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmplImpl.html index 8f7ff31..3b11445 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmplImpl.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmplImpl.html @@ -316,109 +316,109 @@ 308 // 217, 15 309 jamonWriter.write("\n"); 310 // 218, 9 -311 if (master.getMasterCoprocessorHost().findCoprocessor("RSGroupAdminEndpoint") != null ) +311 if (master.getAssignmentManager() != null ) 312 { -313// 218, 98 -314jamonWriter.write("\n

\n

RSGroup

\n"); -315// 221, 13 +313// 218, 54 +314jamonWriter.write("\n "); +315// 219, 11 316{ -317 org.apache.hadoop.hbase.tmpl.master.RSGroupListTmpl __jamon__var_1 = new org.apache.hadoop.hbase.tmpl.master.RSGroupListTmpl(this.getTemplateManager()); -318 __jamon__var_1.renderNoFlush(jamonWriter, master, serverManager); +317 org.apache.hadoop.hbase.tmpl.master.AssignmentManagerStatusTmpl __jamon__var_1 = new org.apache.hadoop.hbase.tmpl.master.AssignmentManagerStatusTmpl(this.getTemplateManager()); +318 __jamon__var_1.renderNoFlush(jamonWriter, master.getAssignmentManager()); 319} -320// 221, 79 -321jamonWriter.write("\n
\n"); +320// 219, 92 +321jamonWriter.write("\n "); 322 } -323 // 223, 15 -324 jamonWriter.write("\n
\n

Region Servers

\n"); -325 // 226, 13 -326 { -327 org.apache.hadoop.hbase.tmpl.master.RegionServerListTmpl __jamon__var_2 = new org.apache.hadoop.hbase.tmpl.master.RegionServerListTmpl(this.getTemplateManager()); -328__jamon__var_2.setServers(servers ); -329 __jamon__var_2.renderNoFlush(jamonWriter, master); -330 } -331 // 226, 74 -332 jamonWriter.write("\n\n "); -333 // 228, 13 -334 if ((deadServers != null) ) -335 { -336// 228, 42 -337jamonWriter.write("\n "); -338// 229, 17 -339{ -340 // 229, 17 -341 __jamon_innerUnit__deadRegionServers(jamonWriter); -342} -343// 229, 40 -344jamonWriter.write("\n "); +323 // 220, 15 +324 jamonWriter.write("\n"); +325 // 221, 9 +326 if (master.getMasterCoprocessorHost().findCoprocessor("RSGroupAdminEndpoint") != null ) +327 { +328// 221, 98 +329jamonWriter.write("\n
\n

RSGroup

\n"); +330// 224, 13 +331{ +332 org.apache.hadoop.hbase.tmpl.master.RSGroupListTmpl __jamon__var_2 = new org.apache.hadoop.hbase.tmpl.master.RSGroupListTmpl(this.getTemplateManager()); +333 __jamon__var_2.renderNoFlush(jamonWriter, master, serverManager); +334} +335// 224, 79 +336jamonWriter.write("\n
\n"); +337 } +338

[32/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFunction.html
index 9d1542c..ec5f688 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFunction.html
@@ -53,502 +53,502 @@
 045import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType;
 046import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
 047import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction;
-048import 
org.apache.hadoop.hbase.util.Bytes;
-049import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-050import 
org.apache.yetus.audience.InterfaceAudience;
-051import org.slf4j.Logger;
-052import org.slf4j.LoggerFactory;
-053
-054import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-055import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
-056import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+048import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+049import 
org.apache.yetus.audience.InterfaceAudience;
+050import org.slf4j.Logger;
+051import org.slf4j.LoggerFactory;
+052
+053import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+054import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
+055import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+056
 057
-058
-059/**
-060 * 

This is a best effort load balancer. Given a Cost function F(C) => x It will -061 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the -062 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

-063 *
    -064 *
  • Region Load
  • -065 *
  • Table Load
  • -066 *
  • Data Locality
  • -067 *
  • Memstore Sizes
  • -068 *
  • Storefile Sizes
  • -069 *
+058/** +059 *

This is a best effort load balancer. Given a Cost function F(C) => x It will +060 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the +061 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

+062 *
    +063 *
  • Region Load
  • +064 *
  • Table Load
  • +065 *
  • Data Locality
  • +066 *
  • Memstore Sizes
  • +067 *
  • Storefile Sizes
  • +068 *
+069 * 070 * -071 * -072 *

Every cost function returns a number between 0 and 1 inclusive; where 0 is the lowest cost -073 * best solution, and 1 is the highest possible cost and the worst solution. The computed costs are -074 * scaled by their respective multipliers:

-075 * -076 *
    -077 *
  • hbase.master.balancer.stochastic.regionLoadCost
  • -078 *
  • hbase.master.balancer.stochastic.moveCost
  • -079 *
  • hbase.master.balancer.stochastic.tableLoadCost
  • -080 *
  • hbase.master.balancer.stochastic.localityCost
  • -081 *
  • hbase.master.balancer.stochastic.memstoreSizeCost
  • -082 *
  • hbase.master.balancer.stochastic.storefileSizeCost
  • -083 *
-084 * -085 *

In addition to the above configurations, the balancer can be tuned by the following -086 * configuration values:

-087 *
    -088 *
  • hbase.master.balancer.stochastic.maxMoveRegions which -089 * controls what the max number of regions that can be moved in a single invocation of this -090 * balancer.
  • -091 *
  • hbase.master.balancer.stochastic.stepsPerRegion is the coefficient by which the number of -092 * regions is multiplied to try and get the number of times the balancer will -093 * mutate all servers.
  • -094 *
  • hbase.master.balancer.stochastic.maxSteps which controls the maximum number of times that -095 * the balancer will try and mutate all the servers. The balancer will use the minimum of this -096 * value and the above computation.
  • -097 *
-098 * -099 *

This balancer is best used with hbase.master.loadbalance.bytable set to false -100 * so that the balancer gets the full picture of all loads on the cluster.

-101 */ -102@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) -103@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC", -104 justification="Complaint is about costFunctions not being synchronized; not end of the world") -105public class StochasticLoadBalancer extends BaseLoadBalancer { -106 -107 protected static final String STEPS_PER_REGION_KEY = -108 "hbase.master.balancer.stocha

[26/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.PrimaryRegionCountSkewCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.PrimaryRegionCountSkewCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.PrimaryRegionCountSkewCostFunction.html
index 9d1542c..ec5f688 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.PrimaryRegionCountSkewCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.PrimaryRegionCountSkewCostFunction.html
@@ -53,502 +53,502 @@
 045import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType;
 046import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
 047import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction;
-048import 
org.apache.hadoop.hbase.util.Bytes;
-049import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-050import 
org.apache.yetus.audience.InterfaceAudience;
-051import org.slf4j.Logger;
-052import org.slf4j.LoggerFactory;
-053
-054import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-055import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
-056import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+048import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+049import 
org.apache.yetus.audience.InterfaceAudience;
+050import org.slf4j.Logger;
+051import org.slf4j.LoggerFactory;
+052
+053import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+054import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
+055import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+056
 057
-058
-059/**
-060 * 

This is a best effort load balancer. Given a Cost function F(C) => x It will -061 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the -062 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

-063 *
    -064 *
  • Region Load
  • -065 *
  • Table Load
  • -066 *
  • Data Locality
  • -067 *
  • Memstore Sizes
  • -068 *
  • Storefile Sizes
  • -069 *
+058/** +059 *

This is a best effort load balancer. Given a Cost function F(C) => x It will +060 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the +061 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

+062 *
    +063 *
  • Region Load
  • +064 *
  • Table Load
  • +065 *
  • Data Locality
  • +066 *
  • Memstore Sizes
  • +067 *
  • Storefile Sizes
  • +068 *
+069 * 070 * -071 * -072 *

Every cost function returns a number between 0 and 1 inclusive; where 0 is the lowest cost -073 * best solution, and 1 is the highest possible cost and the worst solution. The computed costs are -074 * scaled by their respective multipliers:

-075 * -076 *
    -077 *
  • hbase.master.balancer.stochastic.regionLoadCost
  • -078 *
  • hbase.master.balancer.stochastic.moveCost
  • -079 *
  • hbase.master.balancer.stochastic.tableLoadCost
  • -080 *
  • hbase.master.balancer.stochastic.localityCost
  • -081 *
  • hbase.master.balancer.stochastic.memstoreSizeCost
  • -082 *
  • hbase.master.balancer.stochastic.storefileSizeCost
  • -083 *
-084 * -085 *

In addition to the above configurations, the balancer can be tuned by the following -086 * configuration values:

-087 *
    -088 *
  • hbase.master.balancer.stochastic.maxMoveRegions which -089 * controls what the max number of regions that can be moved in a single invocation of this -090 * balancer.
  • -091 *
  • hbase.master.balancer.stochastic.stepsPerRegion is the coefficient by which the number of -092 * regions is multiplied to try and get the number of times the balancer will -093 * mutate all servers.
  • -094 *
  • hbase.master.balancer.stochastic.maxSteps which controls the maximum number of times that -095 * the balancer will try and mutate all the servers. The balancer will use the minimum of this -096 * value and the above computation.
  • -097 *
-098 * -099 *

This balancer is best used with hbase.master.loadbalance.bytable set to false -100 * so that the balancer gets the full picture of all loads on the cluster.

-101 */ -102@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) -103@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC", -104 justification="Complaint is about costFunctions not being synchronized; not end of the world") -105public class StochasticLoadBalancer extends BaseLoadBalanc

[10/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.ReplicationQueueOperation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.ReplicationQueueOperation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.ReplicationQueueOperation.html
index 083ab07..be28dfa 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.ReplicationQueueOperation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.ReplicationQueueOperation.html
@@ -31,54 +31,54 @@
 023import java.util.Collection;
 024import java.util.Collections;
 025import java.util.HashMap;
-026import java.util.HashSet;
-027import java.util.Iterator;
-028import java.util.List;
-029import java.util.Map;
-030import java.util.NavigableSet;
-031import java.util.Set;
-032import java.util.SortedSet;
-033import java.util.TreeSet;
-034import java.util.UUID;
-035import 
java.util.concurrent.ConcurrentHashMap;
-036import 
java.util.concurrent.ConcurrentMap;
-037import java.util.concurrent.Future;
-038import 
java.util.concurrent.LinkedBlockingQueue;
-039import 
java.util.concurrent.RejectedExecutionException;
-040import 
java.util.concurrent.ThreadLocalRandom;
-041import 
java.util.concurrent.ThreadPoolExecutor;
-042import java.util.concurrent.TimeUnit;
-043import 
java.util.concurrent.atomic.AtomicLong;
-044import java.util.stream.Collectors;
-045import 
org.apache.hadoop.conf.Configuration;
-046import org.apache.hadoop.fs.FileSystem;
-047import org.apache.hadoop.fs.Path;
-048import 
org.apache.hadoop.hbase.CompatibilitySingletonFactory;
-049import 
org.apache.hadoop.hbase.HConstants;
-050import org.apache.hadoop.hbase.Server;
-051import 
org.apache.hadoop.hbase.ServerName;
-052import 
org.apache.hadoop.hbase.TableName;
-053import 
org.apache.hadoop.hbase.replication.ReplicationException;
-054import 
org.apache.hadoop.hbase.replication.ReplicationListener;
-055import 
org.apache.hadoop.hbase.replication.ReplicationPeer;
-056import 
org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState;
-057import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-058import 
org.apache.hadoop.hbase.replication.ReplicationPeerImpl;
-059import 
org.apache.hadoop.hbase.replication.ReplicationPeers;
-060import 
org.apache.hadoop.hbase.replication.ReplicationQueueInfo;
-061import 
org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
-062import 
org.apache.hadoop.hbase.replication.ReplicationTracker;
-063import 
org.apache.hadoop.hbase.replication.ReplicationUtils;
-064import 
org.apache.hadoop.hbase.replication.SyncReplicationState;
-065import 
org.apache.hadoop.hbase.util.Pair;
-066import 
org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
-067import 
org.apache.hadoop.hbase.wal.SyncReplicationWALProvider;
-068import 
org.apache.yetus.audience.InterfaceAudience;
-069import 
org.apache.zookeeper.KeeperException;
-070import org.slf4j.Logger;
-071import org.slf4j.LoggerFactory;
-072
-073import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+026import java.util.Iterator;
+027import java.util.List;
+028import java.util.Map;
+029import java.util.NavigableSet;
+030import java.util.Set;
+031import java.util.SortedSet;
+032import java.util.TreeSet;
+033import java.util.UUID;
+034import 
java.util.concurrent.ConcurrentHashMap;
+035import 
java.util.concurrent.ConcurrentMap;
+036import java.util.concurrent.Future;
+037import 
java.util.concurrent.LinkedBlockingQueue;
+038import 
java.util.concurrent.RejectedExecutionException;
+039import 
java.util.concurrent.ThreadLocalRandom;
+040import 
java.util.concurrent.ThreadPoolExecutor;
+041import java.util.concurrent.TimeUnit;
+042import 
java.util.concurrent.atomic.AtomicLong;
+043import java.util.stream.Collectors;
+044import 
org.apache.hadoop.conf.Configuration;
+045import org.apache.hadoop.fs.FileSystem;
+046import org.apache.hadoop.fs.Path;
+047import 
org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+048import 
org.apache.hadoop.hbase.HConstants;
+049import org.apache.hadoop.hbase.Server;
+050import 
org.apache.hadoop.hbase.ServerName;
+051import 
org.apache.hadoop.hbase.TableName;
+052import 
org.apache.hadoop.hbase.replication.ReplicationException;
+053import 
org.apache.hadoop.hbase.replication.ReplicationListener;
+054import 
org.apache.hadoop.hbase.replication.ReplicationPeer;
+055import 
org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState;
+056import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
+057import 
org.apache.hadoop.hbase.replication.ReplicationPeerImpl;
+058import 
org.apache.hadoop.hbase.replication.ReplicationPeers;
+059import 
org.apache.hadoop.hbase.replica

[47/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/9592fdb5
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/9592fdb5
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/9592fdb5

Branch: refs/heads/asf-site
Commit: 9592fdb5864ff3e5d36dbcf983e9f235f672fd0b
Parents: c21eba9
Author: jenkins 
Authored: Fri Nov 9 14:52:37 2018 +
Committer: jenkins 
Committed: Fri Nov 9 14:52:37 2018 +

--
 acid-semantics.html | 4 +-
 apache_hbase_reference_guide.pdf| 22251 +
 apidocs/index-all.html  | 4 +
 .../apache/hadoop/hbase/client/RegionInfo.html  |68 +-
 .../hbase/client/class-use/RegionInfo.html  |15 +-
 .../apache/hadoop/hbase/client/RegionInfo.html  |  1488 +-
 book.html   |64 +-
 bulk-loads.html | 4 +-
 checkstyle-aggregate.html   |   158 +-
 checkstyle.rss  | 4 +-
 coc.html| 4 +-
 dependencies.html   | 4 +-
 dependency-convergence.html | 4 +-
 dependency-info.html| 4 +-
 dependency-management.html  | 4 +-
 devapidocs/constant-values.html | 4 +-
 devapidocs/index-all.html   | 6 +
 .../org/apache/hadoop/hbase/HRegionInfo.html| 2 +-
 .../hadoop/hbase/backup/package-tree.html   | 4 +-
 .../hbase/client/ImmutableHRegionInfo.html  | 2 +-
 .../apache/hadoop/hbase/client/RegionInfo.html  |   179 +-
 .../RegionInfoBuilder.MutableRegionInfo.html|38 +-
 .../hbase/client/UnmodifyableHRegionInfo.html   | 2 +-
 .../hbase/client/class-use/RegionInfo.html  |41 +-
 .../hadoop/hbase/client/package-tree.html   |24 +-
 .../hadoop/hbase/filter/package-tree.html   |10 +-
 .../hadoop/hbase/io/hfile/package-tree.html | 6 +-
 .../apache/hadoop/hbase/ipc/package-tree.html   | 2 +-
 .../hadoop/hbase/mapreduce/package-tree.html| 4 +-
 .../master/balancer/StochasticLoadBalancer.html |94 +-
 .../hadoop/hbase/master/package-tree.html   | 2 +-
 .../hbase/master/procedure/package-tree.html| 4 +-
 .../hadoop/hbase/monitoring/package-tree.html   | 2 +-
 .../org/apache/hadoop/hbase/package-tree.html   |16 +-
 .../hadoop/hbase/quotas/package-tree.html   | 6 +-
 .../hadoop/hbase/regionserver/package-tree.html |22 +-
 .../regionserver/querymatcher/package-tree.html | 2 +-
 .../regionserver/wal/ProtobufLogReader.html | 4 +-
 .../hbase/regionserver/wal/package-tree.html| 2 +-
 .../hadoop/hbase/replication/package-tree.html  | 2 +-
 ...icationSourceManager.NodeFailoverWorker.html |12 +-
 ...SourceManager.ReplicationQueueOperation.html | 4 +-
 .../regionserver/ReplicationSourceManager.html  |   137 +-
 .../replication/regionserver/package-tree.html  | 2 +-
 .../hadoop/hbase/rest/model/package-tree.html   | 2 +-
 .../hbase/security/access/package-tree.html | 2 +-
 .../hadoop/hbase/security/package-tree.html | 2 +-
 .../hadoop/hbase/util/HBaseFsck.MetaEntry.html  | 2 +-
 .../apache/hadoop/hbase/util/package-tree.html  | 8 +-
 .../org/apache/hadoop/hbase/Version.html| 4 +-
 .../apache/hadoop/hbase/client/RegionInfo.html  |  1488 +-
 .../RegionInfoBuilder.MutableRegionInfo.html|   338 +-
 .../hadoop/hbase/client/RegionInfoBuilder.html  |   338 +-
 ...asticLoadBalancer.CPRequestCostFunction.html |   976 +-
 ...ochasticLoadBalancer.CandidateGenerator.html |   976 +-
 ...lancer.CostFromRegionLoadAsRateFunction.html |   976 +-
 ...LoadBalancer.CostFromRegionLoadFunction.html |   976 +-
 .../StochasticLoadBalancer.CostFunction.html|   976 +-
 ...sticLoadBalancer.LoadCandidateGenerator.html |   976 +-
 ...alancer.LocalityBasedCandidateGenerator.html |   976 +-
 ...cLoadBalancer.LocalityBasedCostFunction.html |   976 +-
 ...icLoadBalancer.MemStoreSizeCostFunction.html |   976 +-
 ...StochasticLoadBalancer.MoveCostFunction.html |   976 +-
 ...ncer.PrimaryRegionCountSkewCostFunction.html |   976 +-
 ...icLoadBalancer.RackLocalityCostFunction.html |   976 +-
 ...icLoadBalancer.RandomCandidateGenerator.html |   976 +-
 ...ticLoadBalancer.ReadRequestCostFunction.html |   976 +-
 ...oadBalancer.RegionCountSkewCostFunction.html |   976 +-
 ...alancer.RegionReplicaCandidateGenerator.html |   976 +-
 ...dBalancer.RegionReplicaHostCostFunction.html |   976 +-
 ...cer.RegionReplicaRackCandidateGenerator.html |   976 +-
 ...dBalancer.RegionReplicaRackCostFunction.html |   976 +-
 ...LoadBalancer.ServerLocalityCostFuncti

[39/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
 
b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
index 576c53f..42fb6ff 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -222,7 +222,7 @@ implements fs 
 
 
-private https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in 
java.util">Set
+private https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,org.apache.hadoop.fs.Path>
 latestPaths 
 
 
@@ -432,116 +432,120 @@ implements 
+(package private) https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in 
java.util">Set
+getLastestPath() 
+
+
 org.apache.hadoop.fs.Path
 getLogDir()
 Get the directory where wals are stored by their RSs
 
 
-
+
 org.apache.hadoop.fs.Path
 getOldLogDir()
 Get the directory where wals are archived
 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
 getOldSources()
 Get a list of all the recovered sources of this rs
 
 
-
+
 ReplicationPeers
 getReplicationPeers()
 Get the ReplicationPeers used by this 
ReplicationSourceManager
 
 
-
+
 (package private) int
 getSizeOfLatestPath() 
 
-
+
 ReplicationSourceInterface
 getSource(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String peerId)
 Get the normal source for a given peer
 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
 getSources()
 Get a list of all the normal sources of this rs
 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 getStats()
 Get a string representation of all the sources' 
metrics
 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true";
 title="class or interface in 
java.util.concurrent.atomic">AtomicLong
 getTotalBufferUsed() 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,https://docs.oracle.com/javase/8/docs/api/java/util/NavigableSet.html?is-external=true";
 title="class or interface in java.util">NavigableSetString>>>
 getWALs()
 Get a copy of the wals of the normal sources on this 
rs
 
 
-
+
 (package private) https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,https://docs.oracle.com/javase/8

[36/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CPRequestCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CPRequestCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CPRequestCostFunction.html
index 9d1542c..ec5f688 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CPRequestCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CPRequestCostFunction.html
@@ -53,502 +53,502 @@
 045import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType;
 046import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
 047import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction;
-048import 
org.apache.hadoop.hbase.util.Bytes;
-049import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-050import 
org.apache.yetus.audience.InterfaceAudience;
-051import org.slf4j.Logger;
-052import org.slf4j.LoggerFactory;
-053
-054import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-055import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
-056import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+048import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+049import 
org.apache.yetus.audience.InterfaceAudience;
+050import org.slf4j.Logger;
+051import org.slf4j.LoggerFactory;
+052
+053import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+054import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
+055import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+056
 057
-058
-059/**
-060 * 

This is a best effort load balancer. Given a Cost function F(C) => x It will -061 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the -062 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

-063 *
    -064 *
  • Region Load
  • -065 *
  • Table Load
  • -066 *
  • Data Locality
  • -067 *
  • Memstore Sizes
  • -068 *
  • Storefile Sizes
  • -069 *
+058/** +059 *

This is a best effort load balancer. Given a Cost function F(C) => x It will +060 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the +061 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

+062 *
    +063 *
  • Region Load
  • +064 *
  • Table Load
  • +065 *
  • Data Locality
  • +066 *
  • Memstore Sizes
  • +067 *
  • Storefile Sizes
  • +068 *
+069 * 070 * -071 * -072 *

Every cost function returns a number between 0 and 1 inclusive; where 0 is the lowest cost -073 * best solution, and 1 is the highest possible cost and the worst solution. The computed costs are -074 * scaled by their respective multipliers:

-075 * -076 *
    -077 *
  • hbase.master.balancer.stochastic.regionLoadCost
  • -078 *
  • hbase.master.balancer.stochastic.moveCost
  • -079 *
  • hbase.master.balancer.stochastic.tableLoadCost
  • -080 *
  • hbase.master.balancer.stochastic.localityCost
  • -081 *
  • hbase.master.balancer.stochastic.memstoreSizeCost
  • -082 *
  • hbase.master.balancer.stochastic.storefileSizeCost
  • -083 *
-084 * -085 *

In addition to the above configurations, the balancer can be tuned by the following -086 * configuration values:

-087 *
    -088 *
  • hbase.master.balancer.stochastic.maxMoveRegions which -089 * controls what the max number of regions that can be moved in a single invocation of this -090 * balancer.
  • -091 *
  • hbase.master.balancer.stochastic.stepsPerRegion is the coefficient by which the number of -092 * regions is multiplied to try and get the number of times the balancer will -093 * mutate all servers.
  • -094 *
  • hbase.master.balancer.stochastic.maxSteps which controls the maximum number of times that -095 * the balancer will try and mutate all the servers. The balancer will use the minimum of this -096 * value and the above computation.
  • -097 *
-098 * -099 *

This balancer is best used with hbase.master.loadbalance.bytable set to false -100 * so that the balancer gets the full picture of all loads on the cluster.

-101 */ -102@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) -103@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC", -104 justification="Complaint is about costFunctions not being synchronized; not end of the world") -105public class StochasticLoadBalancer extends BaseLoadBalancer { -106 -107 protected static final String STEPS_PER_REGION_K

[45/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/apidocs/index-all.html
--
diff --git a/apidocs/index-all.html b/apidocs/index-all.html
index 8dfd629..f93b10c 100644
--- a/apidocs/index-all.html
+++ b/apidocs/index-all.html
@@ -7987,6 +7987,10 @@
  
 getRegionNameAsString()
 - Method in interface org.apache.hadoop.hbase.client.RegionInfo
  
+getRegionNameAsString(byte[])
 - Static method in interface org.apache.hadoop.hbase.client.RegionInfo
+ 
+getRegionNameAsString(RegionInfo,
 byte[]) - Static method in interface 
org.apache.hadoop.hbase.client.RegionInfo
+ 
 getRegionNameAsString()
 - Method in class org.apache.hadoop.hbase.HRegionInfo
 
 Deprecated.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/apidocs/org/apache/hadoop/hbase/client/RegionInfo.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/client/RegionInfo.html 
b/apidocs/org/apache/hadoop/hbase/client/RegionInfo.html
index f35e785..4b3225e 100644
--- a/apidocs/org/apache/hadoop/hbase/client/RegionInfo.html
+++ b/apidocs/org/apache/hadoop/hbase/client/RegionInfo.html
@@ -106,7 +106,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public interface RegionInfo
+public interface RegionInfo
 Information about a region. A region is a range of keys in 
the whole keyspace
  of a table, an identifier (a timestamp) for differentiating between subset
  ranges (after region split) and a replicaId for differentiating the instance
@@ -350,7 +350,7 @@ public interface 
 
 getShortNameToLog
-https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String getShortNameToLog()
+https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String getShortNameToLog()
 
 Returns:
 Return a short, printable name for this region
@@ -364,7 +364,7 @@ public interface 
 
 getRegionId
-long getRegionId()
+long getRegionId()
 
 Returns:
 the regionId.
@@ -377,7 +377,7 @@ public interface 
 
 getRegionName
-byte[] getRegionName()
+byte[] getRegionName()
 
 Returns:
 the regionName as an array of bytes.
@@ -392,7 +392,7 @@ public interface 
 
 getRegionNameAsString
-https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String getRegionNameAsString()
+https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String getRegionNameAsString()
 
 Returns:
 Region name as a String for use in logging, etc.
@@ -405,7 +405,7 @@ public interface 
 
 getEncodedName
-https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String getEncodedName()
+https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String getEncodedName()
 
 Returns:
 the encoded region name.
@@ -418,7 +418,7 @@ public interface 
 
 getEncodedNameAsBytes
-byte[] getEncodedNameAsBytes()
+byte[] getEncodedNameAsBytes()
 
 Returns:
 the encoded region name as an array of bytes.
@@ -431,7 +431,7 @@ public interface 
 
 getStartKey
-byte[] getStartKey()
+byte[] getStartKey()
 
 Returns:
 the startKey.
@@ -444,7 +444,7 @@ public interface 
 
 getEndKey
-byte[] getEndKey()
+byte[] getEndKey()
 
 Returns:
 the endKey.
@@ -457,7 +457,7 @@ public interface 
 
 getTable
-TableName getTable()
+TableName getTable()
 
 Returns:
 current table name of the region
@@ -470,7 +470,7 @@ public interface 
 
 getReplicaId
-int getReplicaId()
+int getReplicaId()
 
 Returns:
 returns region replica id
@@ -483,7 +483,7 @@ public interface 
 
 isSplit
-boolean isSplit()
+boolean isSplit()
 
 Returns:
 True if has been split and has daughters.
@@ -496,7 +496,7 @@ public interface 
 
 isOffline
-boolean isOffline()
+boolean isOffline()
 
 Returns:
 True if this region is offline.
@@ -509,7 +509,7 @@ public interface 
 
 isSplitParent
-boolean isSplitParent()
+boolean isSplitParent()
 
 Returns:
 True if this is a split parent region.
@@ -522,7 +522,7 @@ public interface 
 
 isMetaRegion
-boolean isMetaRegion()
+boolean isMetaRegion()
 
 Returns:
 true if this region is a meta region.
@@ -535,7 +535,7 @@ public interface 
 
 containsRange
-boolean containsRange(byte[] rangeStartKey,
+boolean containsRange(byte[] rangeStartKey,
   byte[] rangeEndKey)
 
 Parameters:
@@ -557,7 +557,7 @@ public interface 
 
 containsRow
-boolean containsRow(byte[] row)
+boolean containsRow(byte[] row)
 
 Parameters:
 row - 
@@ -572,7 +572,7 @@ public interface 
 
 getShortNameToLog
-static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String getShortNameToLog(RegionInfo... hris)
+stat

[37/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/devapidocs/src-html/org/apache/hadoop/hbase/client/RegionInfoBuilder.MutableRegionInfo.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RegionInfoBuilder.MutableRegionInfo.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RegionInfoBuilder.MutableRegionInfo.html
index a16d6c3..e11b36c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RegionInfoBuilder.MutableRegionInfo.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RegionInfoBuilder.MutableRegionInfo.html
@@ -295,193 +295,185 @@
 287 */
 288@Override
 289public String getRegionNameAsString() 
{
-290  if 
(RegionInfo.hasEncodedName(this.regionName)) {
-291// new format region names 
already have their encoded name.
-292return 
Bytes.toStringBinary(this.regionName);
-293  }
-294
-295  // old format. regionNameStr 
doesn't have the region name.
-296  //
-297  //
-298  return 
Bytes.toStringBinary(this.regionName) + "." + this.getEncodedName();
-299}
-300
-301/** @return the encoded region name 
*/
-302@Override
-303public String getEncodedName() {
-304  return this.encodedName;
-305}
-306
-307@Override
-308public byte [] 
getEncodedNameAsBytes() {
-309  return this.encodedNameAsBytes;
-310}
-311
-312/** @return the startKey */
-313@Override
-314public byte [] getStartKey(){
-315  return startKey;
-316}
-317
-318
-319/** @return the endKey */
-320@Override
-321public byte [] getEndKey(){
-322  return endKey;
-323}
-324
-325/**
-326 * Get current table name of the 
region
-327 * @return TableName
-328 */
-329@Override
-330public TableName getTable() {
-331  return this.tableName;
-332}
-333
-334/**
-335 * Returns true if the given 
inclusive range of rows is fully contained
-336 * by this region. For example, if 
the region is foo,a,g and this is
-337 * passed ["b","c"] or ["a","c"] it 
will return true, but if this is passed
-338 * ["b","z"] it will return false.
-339 * @throws IllegalArgumentException 
if the range passed is invalid (ie. end < start)
-340 */
-341@Override
-342public boolean containsRange(byte[] 
rangeStartKey, byte[] rangeEndKey) {
-343  if (Bytes.compareTo(rangeStartKey, 
rangeEndKey) > 0) {
-344throw new 
IllegalArgumentException(
-345"Invalid range: " + 
Bytes.toStringBinary(rangeStartKey) +
-346" > " + 
Bytes.toStringBinary(rangeEndKey));
-347  }
-348
-349  boolean firstKeyInRange = 
Bytes.compareTo(rangeStartKey, startKey) >= 0;
-350  boolean lastKeyInRange =
-351Bytes.compareTo(rangeEndKey, 
endKey) < 0 ||
-352Bytes.equals(endKey, 
HConstants.EMPTY_BYTE_ARRAY);
-353  return firstKeyInRange && 
lastKeyInRange;
-354}
-355
-356/**
-357 * Return true if the given row falls 
in this region.
-358 */
+290  return 
RegionInfo.getRegionNameAsString(this, this.regionName);
+291}
+292
+293/** @return the encoded region name 
*/
+294@Override
+295public String getEncodedName() {
+296  return this.encodedName;
+297}
+298
+299@Override
+300public byte [] 
getEncodedNameAsBytes() {
+301  return this.encodedNameAsBytes;
+302}
+303
+304/** @return the startKey */
+305@Override
+306public byte [] getStartKey(){
+307  return startKey;
+308}
+309
+310
+311/** @return the endKey */
+312@Override
+313public byte [] getEndKey(){
+314  return endKey;
+315}
+316
+317/**
+318 * Get current table name of the 
region
+319 * @return TableName
+320 */
+321@Override
+322public TableName getTable() {
+323  return this.tableName;
+324}
+325
+326/**
+327 * Returns true if the given 
inclusive range of rows is fully contained
+328 * by this region. For example, if 
the region is foo,a,g and this is
+329 * passed ["b","c"] or ["a","c"] it 
will return true, but if this is passed
+330 * ["b","z"] it will return false.
+331 * @throws IllegalArgumentException 
if the range passed is invalid (ie. end < start)
+332 */
+333@Override
+334public boolean containsRange(byte[] 
rangeStartKey, byte[] rangeEndKey) {
+335  if (Bytes.compareTo(rangeStartKey, 
rangeEndKey) > 0) {
+336throw new 
IllegalArgumentException(
+337"Invalid range: " + 
Bytes.toStringBinary(rangeStartKey) +
+338" > " + 
Bytes.toStringBinary(rangeEndKey));
+339  }
+340
+341  boolean firstKeyInRange = 
Bytes.compareTo(rangeStartKey, startKey) >= 0;
+342  boolean lastKeyInRange =
+343Bytes.compareTo(rangeEndKey, 
endKey) < 0 ||
+344Bytes.equals(endKey, 
HConstants.EMPTY_BYTE_ARRAY);
+345  return firstKeyInRange && 
lastKeyInRange;
+346   

[28/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MemStoreSizeCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MemStoreSizeCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MemStoreSizeCostFunction.html
index 9d1542c..ec5f688 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MemStoreSizeCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MemStoreSizeCostFunction.html
@@ -53,502 +53,502 @@
 045import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType;
 046import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
 047import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction;
-048import 
org.apache.hadoop.hbase.util.Bytes;
-049import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-050import 
org.apache.yetus.audience.InterfaceAudience;
-051import org.slf4j.Logger;
-052import org.slf4j.LoggerFactory;
-053
-054import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-055import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
-056import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+048import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+049import 
org.apache.yetus.audience.InterfaceAudience;
+050import org.slf4j.Logger;
+051import org.slf4j.LoggerFactory;
+052
+053import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+054import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
+055import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+056
 057
-058
-059/**
-060 * 

This is a best effort load balancer. Given a Cost function F(C) => x It will -061 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the -062 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

-063 *
    -064 *
  • Region Load
  • -065 *
  • Table Load
  • -066 *
  • Data Locality
  • -067 *
  • Memstore Sizes
  • -068 *
  • Storefile Sizes
  • -069 *
+058/** +059 *

This is a best effort load balancer. Given a Cost function F(C) => x It will +060 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the +061 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

+062 *
    +063 *
  • Region Load
  • +064 *
  • Table Load
  • +065 *
  • Data Locality
  • +066 *
  • Memstore Sizes
  • +067 *
  • Storefile Sizes
  • +068 *
+069 * 070 * -071 * -072 *

Every cost function returns a number between 0 and 1 inclusive; where 0 is the lowest cost -073 * best solution, and 1 is the highest possible cost and the worst solution. The computed costs are -074 * scaled by their respective multipliers:

-075 * -076 *
    -077 *
  • hbase.master.balancer.stochastic.regionLoadCost
  • -078 *
  • hbase.master.balancer.stochastic.moveCost
  • -079 *
  • hbase.master.balancer.stochastic.tableLoadCost
  • -080 *
  • hbase.master.balancer.stochastic.localityCost
  • -081 *
  • hbase.master.balancer.stochastic.memstoreSizeCost
  • -082 *
  • hbase.master.balancer.stochastic.storefileSizeCost
  • -083 *
-084 * -085 *

In addition to the above configurations, the balancer can be tuned by the following -086 * configuration values:

-087 *
    -088 *
  • hbase.master.balancer.stochastic.maxMoveRegions which -089 * controls what the max number of regions that can be moved in a single invocation of this -090 * balancer.
  • -091 *
  • hbase.master.balancer.stochastic.stepsPerRegion is the coefficient by which the number of -092 * regions is multiplied to try and get the number of times the balancer will -093 * mutate all servers.
  • -094 *
  • hbase.master.balancer.stochastic.maxSteps which controls the maximum number of times that -095 * the balancer will try and mutate all the servers. The balancer will use the minimum of this -096 * value and the above computation.
  • -097 *
-098 * -099 *

This balancer is best used with hbase.master.loadbalance.bytable set to false -100 * so that the balancer gets the full picture of all loads on the cluster.

-101 */ -102@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) -103@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC", -104 justification="Complaint is about costFunctions not being synchronized; not end of the world") -105public class StochasticLoadBalancer extends BaseLoadBalancer { -106 -107 protected static final String STE

[29/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCostFunction.html
index 9d1542c..ec5f688 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCostFunction.html
@@ -53,502 +53,502 @@
 045import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType;
 046import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
 047import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction;
-048import 
org.apache.hadoop.hbase.util.Bytes;
-049import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-050import 
org.apache.yetus.audience.InterfaceAudience;
-051import org.slf4j.Logger;
-052import org.slf4j.LoggerFactory;
-053
-054import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-055import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
-056import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+048import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+049import 
org.apache.yetus.audience.InterfaceAudience;
+050import org.slf4j.Logger;
+051import org.slf4j.LoggerFactory;
+052
+053import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+054import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
+055import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+056
 057
-058
-059/**
-060 * 

This is a best effort load balancer. Given a Cost function F(C) => x It will -061 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the -062 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

-063 *
    -064 *
  • Region Load
  • -065 *
  • Table Load
  • -066 *
  • Data Locality
  • -067 *
  • Memstore Sizes
  • -068 *
  • Storefile Sizes
  • -069 *
+058/** +059 *

This is a best effort load balancer. Given a Cost function F(C) => x It will +060 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the +061 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

+062 *
    +063 *
  • Region Load
  • +064 *
  • Table Load
  • +065 *
  • Data Locality
  • +066 *
  • Memstore Sizes
  • +067 *
  • Storefile Sizes
  • +068 *
+069 * 070 * -071 * -072 *

Every cost function returns a number between 0 and 1 inclusive; where 0 is the lowest cost -073 * best solution, and 1 is the highest possible cost and the worst solution. The computed costs are -074 * scaled by their respective multipliers:

-075 * -076 *
    -077 *
  • hbase.master.balancer.stochastic.regionLoadCost
  • -078 *
  • hbase.master.balancer.stochastic.moveCost
  • -079 *
  • hbase.master.balancer.stochastic.tableLoadCost
  • -080 *
  • hbase.master.balancer.stochastic.localityCost
  • -081 *
  • hbase.master.balancer.stochastic.memstoreSizeCost
  • -082 *
  • hbase.master.balancer.stochastic.storefileSizeCost
  • -083 *
-084 * -085 *

In addition to the above configurations, the balancer can be tuned by the following -086 * configuration values:

-087 *
    -088 *
  • hbase.master.balancer.stochastic.maxMoveRegions which -089 * controls what the max number of regions that can be moved in a single invocation of this -090 * balancer.
  • -091 *
  • hbase.master.balancer.stochastic.stepsPerRegion is the coefficient by which the number of -092 * regions is multiplied to try and get the number of times the balancer will -093 * mutate all servers.
  • -094 *
  • hbase.master.balancer.stochastic.maxSteps which controls the maximum number of times that -095 * the balancer will try and mutate all the servers. The balancer will use the minimum of this -096 * value and the above computation.
  • -097 *
-098 * -099 *

This balancer is best used with hbase.master.loadbalance.bytable set to false -100 * so that the balancer gets the full picture of all loads on the cluster.

-101 */ -102@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) -103@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC", -104 justification="Complaint is about costFunctions not being synchronized; not end of the world") -105public class StochasticLoadBalancer extends BaseLoadBalancer { -106 -107 protected static final String

hbase git commit: HBASE-21437 Bypassed procedure throw IllegalArgumentException when its state is WAITING_TIMEOUT

2018-11-09 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2.1 0875fa063 -> c6090d4f0


HBASE-21437 Bypassed procedure throw IllegalArgumentException when its state is 
WAITING_TIMEOUT

Signed-off-by: Allan Yang 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c6090d4f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c6090d4f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c6090d4f

Branch: refs/heads/branch-2.1
Commit: c6090d4f048c078c7f56a08db0e9f7e90225969e
Parents: 0875fa0
Author: jingyuntian 
Authored: Fri Nov 9 22:52:14 2018 +0800
Committer: Allan Yang 
Committed: Fri Nov 9 22:52:14 2018 +0800

--
 .../hbase/procedure2/ProcedureExecutor.java | 19 ++
 .../hbase/procedure2/TimeoutExecutorThread.java |  2 +-
 .../hbase/procedure2/TestProcedureBypass.java   | 38 +++-
 3 files changed, 51 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c6090d4f/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index c4fffa8..a5b66a0 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -1033,15 +1033,22 @@ public class ProcedureExecutor {
 store.update(procedure);
   }
 
-  // If we don't have the lock, we can't re-submit the queue,
-  // since it is already executing. To get rid of the stuck situation, we
-  // need to restart the master. With the procedure set to bypass, the 
procedureExecutor
-  // will bypass it and won't get stuck again.
-  if (lockEntry != null) {
-// add the procedure to run queue,
+  // If state of procedure is WAITING_TIMEOUT, we can directly submit it 
to the scheduler.
+  // Instead we should remove it from timeout Executor queue and tranfer 
its state to RUNNABLE
+  if (procedure.getState() == ProcedureState.WAITING_TIMEOUT) {
+LOG.debug("transform procedure {} from WAITING_TIMEOUT to RUNNABLE", 
procedure);
+if (timeoutExecutor.remove(procedure)) {
+  LOG.debug("removed procedure {} from timeoutExecutor", procedure);
+  timeoutExecutor.executeTimedoutProcedure(procedure);
+}
+  } else if (lockEntry != null) {
 scheduler.addFront(procedure);
 LOG.info("Bypassing {} and its ancestors successfully, adding to 
queue", procedure);
   } else {
+// If we don't have the lock, we can't re-submit the queue,
+// since it is already executing. To get rid of the stuck situation, we
+// need to restart the master. With the procedure set to bypass, the 
procedureExecutor
+// will bypass it and won't get stuck again.
 LOG.info("Bypassing {} and its ancestors successfully, but since it is 
already running, "
 + "skipping add to queue", procedure);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c6090d4f/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java
index 9e050a2..4416177 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java
@@ -126,7 +126,7 @@ class TimeoutExecutorThread extends 
StoppableThread {
 }
   }
 
-  private void executeTimedoutProcedure(Procedure proc) {
+  protected void executeTimedoutProcedure(Procedure proc) {
 // The procedure received a timeout. if the procedure itself does not 
handle it,
 // call abort() and add the procedure back in the queue for rollback.
 if (proc.setTimeoutFailure(executor.getEnvironment())) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/c6090d4f/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java
--
diff --git 
a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java
 
b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java
index fa40631..de7a0a1 100644
--- 
a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/pr

[11/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.NodeFailoverWorker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.NodeFailoverWorker.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.NodeFailoverWorker.html
index 083ab07..be28dfa 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.NodeFailoverWorker.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.NodeFailoverWorker.html
@@ -31,54 +31,54 @@
 023import java.util.Collection;
 024import java.util.Collections;
 025import java.util.HashMap;
-026import java.util.HashSet;
-027import java.util.Iterator;
-028import java.util.List;
-029import java.util.Map;
-030import java.util.NavigableSet;
-031import java.util.Set;
-032import java.util.SortedSet;
-033import java.util.TreeSet;
-034import java.util.UUID;
-035import 
java.util.concurrent.ConcurrentHashMap;
-036import 
java.util.concurrent.ConcurrentMap;
-037import java.util.concurrent.Future;
-038import 
java.util.concurrent.LinkedBlockingQueue;
-039import 
java.util.concurrent.RejectedExecutionException;
-040import 
java.util.concurrent.ThreadLocalRandom;
-041import 
java.util.concurrent.ThreadPoolExecutor;
-042import java.util.concurrent.TimeUnit;
-043import 
java.util.concurrent.atomic.AtomicLong;
-044import java.util.stream.Collectors;
-045import 
org.apache.hadoop.conf.Configuration;
-046import org.apache.hadoop.fs.FileSystem;
-047import org.apache.hadoop.fs.Path;
-048import 
org.apache.hadoop.hbase.CompatibilitySingletonFactory;
-049import 
org.apache.hadoop.hbase.HConstants;
-050import org.apache.hadoop.hbase.Server;
-051import 
org.apache.hadoop.hbase.ServerName;
-052import 
org.apache.hadoop.hbase.TableName;
-053import 
org.apache.hadoop.hbase.replication.ReplicationException;
-054import 
org.apache.hadoop.hbase.replication.ReplicationListener;
-055import 
org.apache.hadoop.hbase.replication.ReplicationPeer;
-056import 
org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState;
-057import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-058import 
org.apache.hadoop.hbase.replication.ReplicationPeerImpl;
-059import 
org.apache.hadoop.hbase.replication.ReplicationPeers;
-060import 
org.apache.hadoop.hbase.replication.ReplicationQueueInfo;
-061import 
org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
-062import 
org.apache.hadoop.hbase.replication.ReplicationTracker;
-063import 
org.apache.hadoop.hbase.replication.ReplicationUtils;
-064import 
org.apache.hadoop.hbase.replication.SyncReplicationState;
-065import 
org.apache.hadoop.hbase.util.Pair;
-066import 
org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
-067import 
org.apache.hadoop.hbase.wal.SyncReplicationWALProvider;
-068import 
org.apache.yetus.audience.InterfaceAudience;
-069import 
org.apache.zookeeper.KeeperException;
-070import org.slf4j.Logger;
-071import org.slf4j.LoggerFactory;
-072
-073import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+026import java.util.Iterator;
+027import java.util.List;
+028import java.util.Map;
+029import java.util.NavigableSet;
+030import java.util.Set;
+031import java.util.SortedSet;
+032import java.util.TreeSet;
+033import java.util.UUID;
+034import 
java.util.concurrent.ConcurrentHashMap;
+035import 
java.util.concurrent.ConcurrentMap;
+036import java.util.concurrent.Future;
+037import 
java.util.concurrent.LinkedBlockingQueue;
+038import 
java.util.concurrent.RejectedExecutionException;
+039import 
java.util.concurrent.ThreadLocalRandom;
+040import 
java.util.concurrent.ThreadPoolExecutor;
+041import java.util.concurrent.TimeUnit;
+042import 
java.util.concurrent.atomic.AtomicLong;
+043import java.util.stream.Collectors;
+044import 
org.apache.hadoop.conf.Configuration;
+045import org.apache.hadoop.fs.FileSystem;
+046import org.apache.hadoop.fs.Path;
+047import 
org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+048import 
org.apache.hadoop.hbase.HConstants;
+049import org.apache.hadoop.hbase.Server;
+050import 
org.apache.hadoop.hbase.ServerName;
+051import 
org.apache.hadoop.hbase.TableName;
+052import 
org.apache.hadoop.hbase.replication.ReplicationException;
+053import 
org.apache.hadoop.hbase.replication.ReplicationListener;
+054import 
org.apache.hadoop.hbase.replication.ReplicationPeer;
+055import 
org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState;
+056import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
+057import 
org.apache.hadoop.hbase.replication.ReplicationPeerImpl;
+058import 
org.apache.hadoop.hbase.replication.ReplicationPeers;
+059import 
org.apache.hadoop.hbase.replication.ReplicationQueueInfo;
+060impo

[43/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/book.html
--
diff --git a/book.html b/book.html
index 4523e86..9aec55c 100644
--- a/book.html
+++ b/book.html
@@ -29312,6 +29312,52 @@ Following are a few examples of the reporting 
capabilities.
 Have a look in the Web UI.
 
 
+
+153.4. Snapshot Space Usage 
Monitoring
+
+Starting with HBase 0.95, Snapshot usage information on individual 
snapshots was shown in the HBase Master Web UI. This was further enhanced 
starting with HBase 1.3 to show the total Storefile size of the Snapshot Set. 
The following metrics are shown in the Master Web UI with HBase 1.3 and 
later.
+
+
+
+
+Shared Storefile Size is the Storefile size shared between snapshots and 
active tables.
+
+
+Mob Storefile Size is the Mob Storefile size shared between snapshots and 
active tables.
+
+
+Archived Storefile Size is the Storefile size in Archive.
+
+
+
+
+The format of Archived Storefile Size is NNN(MMM). NNN is the total 
Storefile size in Archive, MMM is the total Storefile size in Archive that is 
specific to the snapshot (not shared with other snapshots and tables).
+
+
+
+
+
+Figure 12. Master Snapshot Overview
+
+
+
+
+
+Figure 13. Snapshot Storefile Stats Example 1
+
+
+
+
+
+Figure 14. Snapshot Storefile Stats Example 2
+
+
+
+
+
+Figure 15. Empty Snapshot Storfile Stats Example
+
+
 
 
 
@@ -29461,7 +29507,7 @@ Use the arrows to follow the data paths.
 
 
 
-Figure 12. Example of a Complex Cluster Replication 
Configuration
+Figure 16. Example of a Complex Cluster Replication 
Configuration
 
 
 HBase replication borrows many concepts from the statement-based replication design used by MySQL.
@@ -29618,7 +29664,7 @@ The VerifyReplication command prints out 
GOODROWS and
 
 
 
-Figure 13. Replication Architecture Overview
+Figure 17. Replication Architecture Overview
 
 
 154.5.1. Life of a WAL Edit
@@ -37779,7 +37825,7 @@ Assuming the first key here is totally different from 
the key before, its prefix
 
 
 
-Figure 14. ColumnFamily with No Encoding
+Figure 18. ColumnFamily with No Encoding
 
 
 Here is the same data with prefix data encoding.
@@ -37788,7 +37834,7 @@ Assuming the first key here is totally different from 
the key before, its prefix
 
 
 
-Figure 15. ColumnFamily with Prefix Encoding
+Figure 19. ColumnFamily with Prefix Encoding
 
 
 Diff
@@ -37818,7 +37864,7 @@ Given the two row keys in the Prefix example, and given 
an exact match on timest
 
 
 
-Figure 16. ColumnFamily with Diff Encoding
+Figure 20. ColumnFamily with Diff Encoding
 
 
 Fast Diff
@@ -38319,7 +38365,7 @@ See https://github.com/tdunning/YCSB";>Ted 
Dunning’s YCSB.
 
 
 
-Figure 17. HFile V1 Format
+Figure 21. HFile V1 Format
 
 
 
@@ -38400,7 +38446,7 @@ A version 2 HFile is structured as follows:
 
 
 
-Figure 18. HFile Version 2 Structure
+Figure 22. HFile Version 2 Structure
 
 
 
@@ -39043,7 +39089,7 @@ See https://www.apache.org/foundation/board/reporting";>ASF board report
 
 
 
-Figure 19. Apache HBase Orca, HBase Colors, & Font
+Figure 23. Apache HBase Orca, HBase Colors, & Font
 
 
 https://issues.apache.org/jira/browse/HBASE-4920";>An Orca is the 
Apache HBase mascot. See NOTICES.txt.
@@ -41272,7 +41318,7 @@ 
org/apache/hadoop/hbase/security/access/AccessControlClient.revoke:(Lorg/apache/
 
 
 Version 3.0.0-SNAPSHOT
-Last updated 2018-11-08 14:33:09 UTC
+Last updated 2018-11-09 14:33:44 UTC
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/bulk-loads.html
--
diff --git a/bulk-loads.html b/bulk-loads.html
index 5006398..65a62c8 100644
--- a/bulk-loads.html
+++ b/bulk-loads.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase –  
   Bulk Loads in Apache HBase (TM)
@@ -316,7 +316,7 @@ under the License. -->
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-11-08
+  Last Published: 
2018-11-09
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index 070c1a3..a98b531 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Checkstyle Results
 
@@ -294,7 +294,7 @@
 3804
 0
 0
-15099
+15097
 
 Files
 
@@ -7467,7 +7467,7 @@
 org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java
 0
 0
-22
+20
 
 org/apache/hadoop/hbase/regionserver/wal/ReaderBase.java
 0
@@ -9849,7 +9849,7 @@
 
 max: "100"
 ignorePattern: "^package.*|^import.*|a 
href|href|http://|https://|ftp://|org.apache.thrift.|com.google.protobuf.|hbase.protobuf.generated"
-1445
+1443
  Erro

[24/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RandomCandidateGenerator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RandomCandidateGenerator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RandomCandidateGenerator.html
index 9d1542c..ec5f688 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RandomCandidateGenerator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RandomCandidateGenerator.html
@@ -53,502 +53,502 @@
 045import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType;
 046import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
 047import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction;
-048import 
org.apache.hadoop.hbase.util.Bytes;
-049import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-050import 
org.apache.yetus.audience.InterfaceAudience;
-051import org.slf4j.Logger;
-052import org.slf4j.LoggerFactory;
-053
-054import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-055import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
-056import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+048import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+049import 
org.apache.yetus.audience.InterfaceAudience;
+050import org.slf4j.Logger;
+051import org.slf4j.LoggerFactory;
+052
+053import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+054import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
+055import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+056
 057
-058
-059/**
-060 * 

This is a best effort load balancer. Given a Cost function F(C) => x It will -061 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the -062 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

-063 *
    -064 *
  • Region Load
  • -065 *
  • Table Load
  • -066 *
  • Data Locality
  • -067 *
  • Memstore Sizes
  • -068 *
  • Storefile Sizes
  • -069 *
+058/** +059 *

This is a best effort load balancer. Given a Cost function F(C) => x It will +060 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the +061 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

+062 *
    +063 *
  • Region Load
  • +064 *
  • Table Load
  • +065 *
  • Data Locality
  • +066 *
  • Memstore Sizes
  • +067 *
  • Storefile Sizes
  • +068 *
+069 * 070 * -071 * -072 *

Every cost function returns a number between 0 and 1 inclusive; where 0 is the lowest cost -073 * best solution, and 1 is the highest possible cost and the worst solution. The computed costs are -074 * scaled by their respective multipliers:

-075 * -076 *
    -077 *
  • hbase.master.balancer.stochastic.regionLoadCost
  • -078 *
  • hbase.master.balancer.stochastic.moveCost
  • -079 *
  • hbase.master.balancer.stochastic.tableLoadCost
  • -080 *
  • hbase.master.balancer.stochastic.localityCost
  • -081 *
  • hbase.master.balancer.stochastic.memstoreSizeCost
  • -082 *
  • hbase.master.balancer.stochastic.storefileSizeCost
  • -083 *
-084 * -085 *

In addition to the above configurations, the balancer can be tuned by the following -086 * configuration values:

-087 *
    -088 *
  • hbase.master.balancer.stochastic.maxMoveRegions which -089 * controls what the max number of regions that can be moved in a single invocation of this -090 * balancer.
  • -091 *
  • hbase.master.balancer.stochastic.stepsPerRegion is the coefficient by which the number of -092 * regions is multiplied to try and get the number of times the balancer will -093 * mutate all servers.
  • -094 *
  • hbase.master.balancer.stochastic.maxSteps which controls the maximum number of times that -095 * the balancer will try and mutate all the servers. The balancer will use the minimum of this -096 * value and the above computation.
  • -097 *
-098 * -099 *

This balancer is best used with hbase.master.loadbalance.bytable set to false -100 * so that the balancer gets the full picture of all loads on the cluster.

-101 */ -102@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) -103@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC", -104 justification="Complaint is about costFunctions not being synchronized; not end of the world") -105public class StochasticLoadBalancer extends BaseLoadBalancer { -106 -107 protected static final String STE

[12/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.WALHdrContext.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.WALHdrContext.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.WALHdrContext.html
index 62e604e..0a58fc2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.WALHdrContext.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.WALHdrContext.html
@@ -348,115 +348,131 @@
 340  }
 341  WALKey.Builder builder = 
WALKey.newBuilder();
 342  long size = 0;
-343  try {
-344long available = -1;
-345try {
-346  int firstByte = 
this.inputStream.read();
-347  if (firstByte == -1) {
-348throw new EOFException("First 
byte is negative at offset " + originalPosition);
-349  }
-350  size = 
CodedInputStream.readRawVarint32(firstByte, this.inputStream);
-351  // available may be < 0 on 
local fs for instance.  If so, can't depend on it.
-352  available = 
this.inputStream.available();
-353  if (available > 0 && 
available < size) {
-354throw new 
EOFException("Available stream not enough for edit, " +
-355"inputStream.available()= 
" + this.inputStream.available() + ", " +
-356"entry size= " + size + " 
at offset = " + this.inputStream.getPos());
-357  }
-358  ProtobufUtil.mergeFrom(builder, 
ByteStreams.limit(this.inputStream, size),
-359(int)size);
-360} catch 
(InvalidProtocolBufferException ipbe) {
-361  throw (EOFException) new 
EOFException("Invalid PB, EOF? Ignoring; originalPosition=" +
-362originalPosition + ", 
currentPosition=" + this.inputStream.getPos() +
-363", messageSize=" + size + ", 
currentAvailable=" + available).initCause(ipbe);
-364}
-365if (!builder.isInitialized()) {
-366  // TODO: not clear if we should 
try to recover from corrupt PB that looks semi-legit.
-367  //   If we can get the KV 
count, we could, theoretically, try to get next record.
-368  throw new EOFException("Partial 
PB while reading WAL, " +
-369  "probably an unexpected 
EOF, ignoring. current offset=" + this.inputStream.getPos());
-370}
-371WALKey walKey = 
builder.build();
-372
entry.getKey().readFieldsFromPb(walKey, this.byteStringUncompressor);
-373if (!walKey.hasFollowingKvCount() 
|| 0 == walKey.getFollowingKvCount()) {
-374  if (LOG.isTraceEnabled()) {
-375LOG.trace("WALKey has no KVs 
that follow it; trying the next one. current offset=" +
-376
this.inputStream.getPos());
-377  }
-378  continue;
-379}
-380int expectedCells = 
walKey.getFollowingKvCount();
-381long posBefore = 
this.inputStream.getPos();
-382try {
-383  int actualCells = 
entry.getEdit().readFromCells(cellDecoder, expectedCells);
-384  if (expectedCells != 
actualCells) {
-385throw new EOFException("Only 
read " + actualCells); // other info added in catch
-386  }
-387} catch (Exception ex) {
-388  String posAfterStr = 
"";
-389  try {
-390posAfterStr = 
this.inputStream.getPos() + "";
-391  } catch (Throwable t) {
-392if (LOG.isTraceEnabled()) {
-393  LOG.trace("Error getting 
pos for error message - ignoring", t);
-394}
-395  }
-396  String message = " while 
reading " + expectedCells + " WAL KVs; started reading at "
-397  + posBefore + " and read up 
to " + posAfterStr;
-398  IOException realEofEx = 
extractHiddenEof(ex);
-399  throw (EOFException) new 
EOFException("EOF " + message).
-400  initCause(realEofEx != null 
? realEofEx : ex);
-401}
-402if (trailerPresent && 
this.inputStream.getPos() > this.walEditsStopOffset) {
-403  LOG.error("Read WALTrailer 
while reading WALEdits. wal: " + this.path
-404  + ", inputStream.getPos(): 
" + this.inputStream.getPos() + ", walEditsStopOffset: "
-405  + 
this.walEditsStopOffset);
-406  throw new EOFException("Read 
WALTrailer while reading WALEdits");
-407}
-408  } catch (EOFException eof) {
-409// If originalPosition is < 0, 
it is rubbish and we cannot use it (probably local fs)
-410if (originalPosition < 0) {
-411  if (LOG.isTraceEnabled()) {
-412LOG.trace("Encountered a 
malformed edit, but can't seek back to last good position because 
originalPosition is negative. last offset=" + this.inputStream.getPos(), 
eof);
-

[04/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.DummyNodeFailoverWorker.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.DummyNodeFailoverWorker.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.DummyNodeFailoverWorker.html
index 006d1ba..18cf507 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.DummyNodeFailoverWorker.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.DummyNodeFailoverWorker.html
@@ -654,258 +654,274 @@
 646}
 647  }
 648
-649  /**
-650   * Add a peer and wait for it to 
initialize
-651   * @param waitForSource Whether to wait 
for replication source to initialize
-652   */
-653  private void addPeerAndWait(final 
String peerId, final ReplicationPeerConfig peerConfig,
-654  final boolean waitForSource) throws 
Exception {
-655final ReplicationPeers rp = 
manager.getReplicationPeers();
-656rp.getPeerStorage().addPeer(peerId, 
peerConfig, true, SyncReplicationState.NONE);
-657try {
-658  manager.addPeer(peerId);
-659} catch (Exception e) {
-660  // ignore the failed exception, 
because we'll test both success & failed case.
-661}
-662waitPeer(peerId, manager, 
waitForSource);
-663if (managerOfCluster != null) {
-664  managerOfCluster.addPeer(peerId);
-665  waitPeer(peerId, managerOfCluster, 
waitForSource);
-666}
-667  }
-668
-669  private static void waitPeer(final 
String peerId,
-670  ReplicationSourceManager manager, 
final boolean waitForSource) {
-671ReplicationPeers rp = 
manager.getReplicationPeers();
-672Waiter.waitFor(conf, 2, () -> 
{
-673  if (waitForSource) {
-674ReplicationSourceInterface rs = 
manager.getSource(peerId);
-675if (rs == null) {
-676  return false;
-677}
-678if (rs instanceof 
ReplicationSourceDummy) {
-679  return 
((ReplicationSourceDummy)rs).isStartup();
-680}
-681return true;
-682  } else {
-683return (rp.getPeer(peerId) != 
null);
-684  }
-685});
-686  }
-687
-688  /**
-689   * Remove a peer and wait for it to get 
cleaned up
-690   * @param peerId
-691   * @throws Exception
-692   */
-693  private void removePeerAndWait(final 
String peerId) throws Exception {
-694final ReplicationPeers rp = 
manager.getReplicationPeers();
-695if 
(rp.getPeerStorage().listPeerIds().contains(peerId)) {
-696  
rp.getPeerStorage().removePeer(peerId);
-697  try {
-698manager.removePeer(peerId);
-699  } catch (Exception e) {
-700// ignore the failed exception 
and continue.
-701  }
-702}
-703Waiter.waitFor(conf, 2, new 
Waiter.Predicate() {
-704  @Override
-705  public boolean evaluate() throws 
Exception {
-706Collection peers = 
rp.getPeerStorage().listPeerIds();
-707return 
(!manager.getAllQueues().contains(peerId)) && (rp.getPeer(peerId) == 
null)
-708&& 
(!peers.contains(peerId)) && manager.getSource(peerId) == null;
-709  }
-710});
-711  }
-712
-713  private WALEdit 
getBulkLoadWALEdit(NavigableMap scope) {
-714// 1. Create store files for the 
families
-715Map> 
storeFiles = new HashMap<>(1);
-716Map 
storeFilesSize = new HashMap<>(1);
-717List p = new 
ArrayList<>(1);
-718Path hfilePath1 = new 
Path(Bytes.toString(f1));
-719p.add(hfilePath1);
-720try {
-721  
storeFilesSize.put(hfilePath1.getName(), 
fs.getFileStatus(hfilePath1).getLen());
-722} catch (IOException e) {
-723  LOG.debug("Failed to calculate the 
size of hfile " + hfilePath1);
-724  
storeFilesSize.put(hfilePath1.getName(), 0L);
-725}
-726storeFiles.put(f1, p);
-727scope.put(f1, 1);
-728p = new ArrayList<>(1);
-729Path hfilePath2 = new 
Path(Bytes.toString(f2));
-730p.add(hfilePath2);
-731try {
-732  
storeFilesSize.put(hfilePath2.getName(), 
fs.getFileStatus(hfilePath2).getLen());
-733} catch (IOException e) {
-734  LOG.debug("Failed to calculate the 
size of hfile " + hfilePath2);
-735  
storeFilesSize.put(hfilePath2.getName(), 0L);
-736}
-737storeFiles.put(f2, p);
-738// 2. Create bulk load descriptor
-739BulkLoadDescriptor desc =
-740
ProtobufUtil.toBulkLoadDescriptor(hri.getTable(),
-741  
UnsafeByteOperations.unsafeWrap(hri.getEncodedNameAsBytes()), storeFiles, 
storeFilesSize, 1);
-742
-743// 3. create bulk load wal edit 
event
-744WALEdit logEdit = 
WALEdit.createBulkLoadEvent(hri, desc);
-745return logEdit

[09/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
index 083ab07..be28dfa 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.html
@@ -31,54 +31,54 @@
 023import java.util.Collection;
 024import java.util.Collections;
 025import java.util.HashMap;
-026import java.util.HashSet;
-027import java.util.Iterator;
-028import java.util.List;
-029import java.util.Map;
-030import java.util.NavigableSet;
-031import java.util.Set;
-032import java.util.SortedSet;
-033import java.util.TreeSet;
-034import java.util.UUID;
-035import 
java.util.concurrent.ConcurrentHashMap;
-036import 
java.util.concurrent.ConcurrentMap;
-037import java.util.concurrent.Future;
-038import 
java.util.concurrent.LinkedBlockingQueue;
-039import 
java.util.concurrent.RejectedExecutionException;
-040import 
java.util.concurrent.ThreadLocalRandom;
-041import 
java.util.concurrent.ThreadPoolExecutor;
-042import java.util.concurrent.TimeUnit;
-043import 
java.util.concurrent.atomic.AtomicLong;
-044import java.util.stream.Collectors;
-045import 
org.apache.hadoop.conf.Configuration;
-046import org.apache.hadoop.fs.FileSystem;
-047import org.apache.hadoop.fs.Path;
-048import 
org.apache.hadoop.hbase.CompatibilitySingletonFactory;
-049import 
org.apache.hadoop.hbase.HConstants;
-050import org.apache.hadoop.hbase.Server;
-051import 
org.apache.hadoop.hbase.ServerName;
-052import 
org.apache.hadoop.hbase.TableName;
-053import 
org.apache.hadoop.hbase.replication.ReplicationException;
-054import 
org.apache.hadoop.hbase.replication.ReplicationListener;
-055import 
org.apache.hadoop.hbase.replication.ReplicationPeer;
-056import 
org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState;
-057import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-058import 
org.apache.hadoop.hbase.replication.ReplicationPeerImpl;
-059import 
org.apache.hadoop.hbase.replication.ReplicationPeers;
-060import 
org.apache.hadoop.hbase.replication.ReplicationQueueInfo;
-061import 
org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
-062import 
org.apache.hadoop.hbase.replication.ReplicationTracker;
-063import 
org.apache.hadoop.hbase.replication.ReplicationUtils;
-064import 
org.apache.hadoop.hbase.replication.SyncReplicationState;
-065import 
org.apache.hadoop.hbase.util.Pair;
-066import 
org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
-067import 
org.apache.hadoop.hbase.wal.SyncReplicationWALProvider;
-068import 
org.apache.yetus.audience.InterfaceAudience;
-069import 
org.apache.zookeeper.KeeperException;
-070import org.slf4j.Logger;
-071import org.slf4j.LoggerFactory;
-072
-073import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+026import java.util.Iterator;
+027import java.util.List;
+028import java.util.Map;
+029import java.util.NavigableSet;
+030import java.util.Set;
+031import java.util.SortedSet;
+032import java.util.TreeSet;
+033import java.util.UUID;
+034import 
java.util.concurrent.ConcurrentHashMap;
+035import 
java.util.concurrent.ConcurrentMap;
+036import java.util.concurrent.Future;
+037import 
java.util.concurrent.LinkedBlockingQueue;
+038import 
java.util.concurrent.RejectedExecutionException;
+039import 
java.util.concurrent.ThreadLocalRandom;
+040import 
java.util.concurrent.ThreadPoolExecutor;
+041import java.util.concurrent.TimeUnit;
+042import 
java.util.concurrent.atomic.AtomicLong;
+043import java.util.stream.Collectors;
+044import 
org.apache.hadoop.conf.Configuration;
+045import org.apache.hadoop.fs.FileSystem;
+046import org.apache.hadoop.fs.Path;
+047import 
org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+048import 
org.apache.hadoop.hbase.HConstants;
+049import org.apache.hadoop.hbase.Server;
+050import 
org.apache.hadoop.hbase.ServerName;
+051import 
org.apache.hadoop.hbase.TableName;
+052import 
org.apache.hadoop.hbase.replication.ReplicationException;
+053import 
org.apache.hadoop.hbase.replication.ReplicationListener;
+054import 
org.apache.hadoop.hbase.replication.ReplicationPeer;
+055import 
org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState;
+056import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
+057import 
org.apache.hadoop.hbase.replication.ReplicationPeerImpl;
+058import 
org.apache.hadoop.hbase.replication.ReplicationPeers;
+059import 
org.apache.hadoop.hbase.replication.ReplicationQueueInfo;
+060import 
org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
+061import 
org.apache.hadoop.

[02/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.FailInitializeDummyReplicationSource.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.FailInitializeDummyReplicationSource.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.FailInitializeDummyReplicationSource.html
index 006d1ba..18cf507 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.FailInitializeDummyReplicationSource.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.FailInitializeDummyReplicationSource.html
@@ -654,258 +654,274 @@
 646}
 647  }
 648
-649  /**
-650   * Add a peer and wait for it to 
initialize
-651   * @param waitForSource Whether to wait 
for replication source to initialize
-652   */
-653  private void addPeerAndWait(final 
String peerId, final ReplicationPeerConfig peerConfig,
-654  final boolean waitForSource) throws 
Exception {
-655final ReplicationPeers rp = 
manager.getReplicationPeers();
-656rp.getPeerStorage().addPeer(peerId, 
peerConfig, true, SyncReplicationState.NONE);
-657try {
-658  manager.addPeer(peerId);
-659} catch (Exception e) {
-660  // ignore the failed exception, 
because we'll test both success & failed case.
-661}
-662waitPeer(peerId, manager, 
waitForSource);
-663if (managerOfCluster != null) {
-664  managerOfCluster.addPeer(peerId);
-665  waitPeer(peerId, managerOfCluster, 
waitForSource);
-666}
-667  }
-668
-669  private static void waitPeer(final 
String peerId,
-670  ReplicationSourceManager manager, 
final boolean waitForSource) {
-671ReplicationPeers rp = 
manager.getReplicationPeers();
-672Waiter.waitFor(conf, 2, () -> 
{
-673  if (waitForSource) {
-674ReplicationSourceInterface rs = 
manager.getSource(peerId);
-675if (rs == null) {
-676  return false;
-677}
-678if (rs instanceof 
ReplicationSourceDummy) {
-679  return 
((ReplicationSourceDummy)rs).isStartup();
-680}
-681return true;
-682  } else {
-683return (rp.getPeer(peerId) != 
null);
-684  }
-685});
-686  }
-687
-688  /**
-689   * Remove a peer and wait for it to get 
cleaned up
-690   * @param peerId
-691   * @throws Exception
-692   */
-693  private void removePeerAndWait(final 
String peerId) throws Exception {
-694final ReplicationPeers rp = 
manager.getReplicationPeers();
-695if 
(rp.getPeerStorage().listPeerIds().contains(peerId)) {
-696  
rp.getPeerStorage().removePeer(peerId);
-697  try {
-698manager.removePeer(peerId);
-699  } catch (Exception e) {
-700// ignore the failed exception 
and continue.
-701  }
-702}
-703Waiter.waitFor(conf, 2, new 
Waiter.Predicate() {
-704  @Override
-705  public boolean evaluate() throws 
Exception {
-706Collection peers = 
rp.getPeerStorage().listPeerIds();
-707return 
(!manager.getAllQueues().contains(peerId)) && (rp.getPeer(peerId) == 
null)
-708&& 
(!peers.contains(peerId)) && manager.getSource(peerId) == null;
-709  }
-710});
-711  }
-712
-713  private WALEdit 
getBulkLoadWALEdit(NavigableMap scope) {
-714// 1. Create store files for the 
families
-715Map> 
storeFiles = new HashMap<>(1);
-716Map 
storeFilesSize = new HashMap<>(1);
-717List p = new 
ArrayList<>(1);
-718Path hfilePath1 = new 
Path(Bytes.toString(f1));
-719p.add(hfilePath1);
-720try {
-721  
storeFilesSize.put(hfilePath1.getName(), 
fs.getFileStatus(hfilePath1).getLen());
-722} catch (IOException e) {
-723  LOG.debug("Failed to calculate the 
size of hfile " + hfilePath1);
-724  
storeFilesSize.put(hfilePath1.getName(), 0L);
-725}
-726storeFiles.put(f1, p);
-727scope.put(f1, 1);
-728p = new ArrayList<>(1);
-729Path hfilePath2 = new 
Path(Bytes.toString(f2));
-730p.add(hfilePath2);
-731try {
-732  
storeFilesSize.put(hfilePath2.getName(), 
fs.getFileStatus(hfilePath2).getLen());
-733} catch (IOException e) {
-734  LOG.debug("Failed to calculate the 
size of hfile " + hfilePath2);
-735  
storeFilesSize.put(hfilePath2.getName(), 0L);
-736}
-737storeFiles.put(f2, p);
-738// 2. Create bulk load descriptor
-739BulkLoadDescriptor desc =
-740
ProtobufUtil.toBulkLoadDescriptor(hri.getTable(),
-741  
UnsafeByteOperations.unsafeWrap(hri.getEncodedNameAsBytes()), storeFiles, 
storeFilesSize, 1);
-742
-743// 3. create bulk load wal edit 
event
-744WALEdit logEdit 

[01/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site c21eba909 -> 9592fdb58


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.html
index 006d1ba..18cf507 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.html
@@ -654,258 +654,274 @@
 646}
 647  }
 648
-649  /**
-650   * Add a peer and wait for it to 
initialize
-651   * @param waitForSource Whether to wait 
for replication source to initialize
-652   */
-653  private void addPeerAndWait(final 
String peerId, final ReplicationPeerConfig peerConfig,
-654  final boolean waitForSource) throws 
Exception {
-655final ReplicationPeers rp = 
manager.getReplicationPeers();
-656rp.getPeerStorage().addPeer(peerId, 
peerConfig, true, SyncReplicationState.NONE);
-657try {
-658  manager.addPeer(peerId);
-659} catch (Exception e) {
-660  // ignore the failed exception, 
because we'll test both success & failed case.
-661}
-662waitPeer(peerId, manager, 
waitForSource);
-663if (managerOfCluster != null) {
-664  managerOfCluster.addPeer(peerId);
-665  waitPeer(peerId, managerOfCluster, 
waitForSource);
-666}
-667  }
-668
-669  private static void waitPeer(final 
String peerId,
-670  ReplicationSourceManager manager, 
final boolean waitForSource) {
-671ReplicationPeers rp = 
manager.getReplicationPeers();
-672Waiter.waitFor(conf, 2, () -> 
{
-673  if (waitForSource) {
-674ReplicationSourceInterface rs = 
manager.getSource(peerId);
-675if (rs == null) {
-676  return false;
-677}
-678if (rs instanceof 
ReplicationSourceDummy) {
-679  return 
((ReplicationSourceDummy)rs).isStartup();
-680}
-681return true;
-682  } else {
-683return (rp.getPeer(peerId) != 
null);
-684  }
-685});
-686  }
-687
-688  /**
-689   * Remove a peer and wait for it to get 
cleaned up
-690   * @param peerId
-691   * @throws Exception
-692   */
-693  private void removePeerAndWait(final 
String peerId) throws Exception {
-694final ReplicationPeers rp = 
manager.getReplicationPeers();
-695if 
(rp.getPeerStorage().listPeerIds().contains(peerId)) {
-696  
rp.getPeerStorage().removePeer(peerId);
-697  try {
-698manager.removePeer(peerId);
-699  } catch (Exception e) {
-700// ignore the failed exception 
and continue.
-701  }
-702}
-703Waiter.waitFor(conf, 2, new 
Waiter.Predicate() {
-704  @Override
-705  public boolean evaluate() throws 
Exception {
-706Collection peers = 
rp.getPeerStorage().listPeerIds();
-707return 
(!manager.getAllQueues().contains(peerId)) && (rp.getPeer(peerId) == 
null)
-708&& 
(!peers.contains(peerId)) && manager.getSource(peerId) == null;
-709  }
-710});
-711  }
-712
-713  private WALEdit 
getBulkLoadWALEdit(NavigableMap scope) {
-714// 1. Create store files for the 
families
-715Map> 
storeFiles = new HashMap<>(1);
-716Map 
storeFilesSize = new HashMap<>(1);
-717List p = new 
ArrayList<>(1);
-718Path hfilePath1 = new 
Path(Bytes.toString(f1));
-719p.add(hfilePath1);
-720try {
-721  
storeFilesSize.put(hfilePath1.getName(), 
fs.getFileStatus(hfilePath1).getLen());
-722} catch (IOException e) {
-723  LOG.debug("Failed to calculate the 
size of hfile " + hfilePath1);
-724  
storeFilesSize.put(hfilePath1.getName(), 0L);
-725}
-726storeFiles.put(f1, p);
-727scope.put(f1, 1);
-728p = new ArrayList<>(1);
-729Path hfilePath2 = new 
Path(Bytes.toString(f2));
-730p.add(hfilePath2);
-731try {
-732  
storeFilesSize.put(hfilePath2.getName(), 
fs.getFileStatus(hfilePath2).getLen());
-733} catch (IOException e) {
-734  LOG.debug("Failed to calculate the 
size of hfile " + hfilePath2);
-735  
storeFilesSize.put(hfilePath2.getName(), 0L);
-736}
-737storeFiles.put(f2, p);
-738// 2. Create bulk load descriptor
-739BulkLoadDescriptor desc =
-740
ProtobufUtil.toBulkLoadDescriptor(hri.getTable(),
-741  
UnsafeByteOperations.unsafeWrap(hri.getEncodedNameAsBytes()), storeFiles, 
storeFilesSize, 1);
-742
-743// 3. create bulk load wal edit 
event
-744WALEdit logEdit = 
WALEdit.createBulkLoadEvent(hri, desc);
-745return logEdit;
-746  }
-747
-748  static clas

[20/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaHostCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaHostCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaHostCostFunction.html
index 9d1542c..ec5f688 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaHostCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaHostCostFunction.html
@@ -53,502 +53,502 @@
 045import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType;
 046import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
 047import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction;
-048import 
org.apache.hadoop.hbase.util.Bytes;
-049import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-050import 
org.apache.yetus.audience.InterfaceAudience;
-051import org.slf4j.Logger;
-052import org.slf4j.LoggerFactory;
-053
-054import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-055import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
-056import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+048import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+049import 
org.apache.yetus.audience.InterfaceAudience;
+050import org.slf4j.Logger;
+051import org.slf4j.LoggerFactory;
+052
+053import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+054import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
+055import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+056
 057
-058
-059/**
-060 * 

This is a best effort load balancer. Given a Cost function F(C) => x It will -061 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the -062 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

-063 *
    -064 *
  • Region Load
  • -065 *
  • Table Load
  • -066 *
  • Data Locality
  • -067 *
  • Memstore Sizes
  • -068 *
  • Storefile Sizes
  • -069 *
+058/** +059 *

This is a best effort load balancer. Given a Cost function F(C) => x It will +060 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the +061 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

+062 *
    +063 *
  • Region Load
  • +064 *
  • Table Load
  • +065 *
  • Data Locality
  • +066 *
  • Memstore Sizes
  • +067 *
  • Storefile Sizes
  • +068 *
+069 * 070 * -071 * -072 *

Every cost function returns a number between 0 and 1 inclusive; where 0 is the lowest cost -073 * best solution, and 1 is the highest possible cost and the worst solution. The computed costs are -074 * scaled by their respective multipliers:

-075 * -076 *
    -077 *
  • hbase.master.balancer.stochastic.regionLoadCost
  • -078 *
  • hbase.master.balancer.stochastic.moveCost
  • -079 *
  • hbase.master.balancer.stochastic.tableLoadCost
  • -080 *
  • hbase.master.balancer.stochastic.localityCost
  • -081 *
  • hbase.master.balancer.stochastic.memstoreSizeCost
  • -082 *
  • hbase.master.balancer.stochastic.storefileSizeCost
  • -083 *
-084 * -085 *

In addition to the above configurations, the balancer can be tuned by the following -086 * configuration values:

-087 *
    -088 *
  • hbase.master.balancer.stochastic.maxMoveRegions which -089 * controls what the max number of regions that can be moved in a single invocation of this -090 * balancer.
  • -091 *
  • hbase.master.balancer.stochastic.stepsPerRegion is the coefficient by which the number of -092 * regions is multiplied to try and get the number of times the balancer will -093 * mutate all servers.
  • -094 *
  • hbase.master.balancer.stochastic.maxSteps which controls the maximum number of times that -095 * the balancer will try and mutate all the servers. The balancer will use the minimum of this -096 * value and the above computation.
  • -097 *
-098 * -099 *

This balancer is best used with hbase.master.loadbalance.bytable set to false -100 * so that the balancer gets the full picture of all loads on the cluster.

-101 */ -102@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) -103@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC", -104 justification="Complaint is about costFunctions not being synchronized; not end of the world") -105public class StochasticLoadBalancer extends BaseLoadBalancer { -106 -107 protected

[19/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaRackCandidateGenerator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaRackCandidateGenerator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaRackCandidateGenerator.html
index 9d1542c..ec5f688 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaRackCandidateGenerator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaRackCandidateGenerator.html
@@ -53,502 +53,502 @@
 045import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType;
 046import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
 047import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction;
-048import 
org.apache.hadoop.hbase.util.Bytes;
-049import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-050import 
org.apache.yetus.audience.InterfaceAudience;
-051import org.slf4j.Logger;
-052import org.slf4j.LoggerFactory;
-053
-054import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-055import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
-056import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+048import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+049import 
org.apache.yetus.audience.InterfaceAudience;
+050import org.slf4j.Logger;
+051import org.slf4j.LoggerFactory;
+052
+053import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+054import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
+055import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+056
 057
-058
-059/**
-060 * 

This is a best effort load balancer. Given a Cost function F(C) => x It will -061 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the -062 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

-063 *
    -064 *
  • Region Load
  • -065 *
  • Table Load
  • -066 *
  • Data Locality
  • -067 *
  • Memstore Sizes
  • -068 *
  • Storefile Sizes
  • -069 *
+058/** +059 *

This is a best effort load balancer. Given a Cost function F(C) => x It will +060 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the +061 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

+062 *
    +063 *
  • Region Load
  • +064 *
  • Table Load
  • +065 *
  • Data Locality
  • +066 *
  • Memstore Sizes
  • +067 *
  • Storefile Sizes
  • +068 *
+069 * 070 * -071 * -072 *

Every cost function returns a number between 0 and 1 inclusive; where 0 is the lowest cost -073 * best solution, and 1 is the highest possible cost and the worst solution. The computed costs are -074 * scaled by their respective multipliers:

-075 * -076 *
    -077 *
  • hbase.master.balancer.stochastic.regionLoadCost
  • -078 *
  • hbase.master.balancer.stochastic.moveCost
  • -079 *
  • hbase.master.balancer.stochastic.tableLoadCost
  • -080 *
  • hbase.master.balancer.stochastic.localityCost
  • -081 *
  • hbase.master.balancer.stochastic.memstoreSizeCost
  • -082 *
  • hbase.master.balancer.stochastic.storefileSizeCost
  • -083 *
-084 * -085 *

In addition to the above configurations, the balancer can be tuned by the following -086 * configuration values:

-087 *
    -088 *
  • hbase.master.balancer.stochastic.maxMoveRegions which -089 * controls what the max number of regions that can be moved in a single invocation of this -090 * balancer.
  • -091 *
  • hbase.master.balancer.stochastic.stepsPerRegion is the coefficient by which the number of -092 * regions is multiplied to try and get the number of times the balancer will -093 * mutate all servers.
  • -094 *
  • hbase.master.balancer.stochastic.maxSteps which controls the maximum number of times that -095 * the balancer will try and mutate all the servers. The balancer will use the minimum of this -096 * value and the above computation.
  • -097 *
-098 * -099 *

This balancer is best used with hbase.master.loadbalance.bytable set to false -100 * so that the balancer gets the full picture of all loads on the cluster.

-101 */ -102@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) -103@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC", -104 justification="Complaint is about costFunctions not being synchronized; not end of the world") -105public class StochasticLoadBalancer extends BaseLoadB

[07/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
index 647f762..057d9b4 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
@@ -702,9 +702,9 @@
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
 org.apache.hadoop.hbase.regionserver.TestAtomicOperation.TestStep
-org.apache.hadoop.hbase.regionserver.DataBlockEncodingTool.Manipulation
 org.apache.hadoop.hbase.regionserver.TestCacheOnWriteInSchema.CacheOnWriteType
 org.apache.hadoop.hbase.regionserver.TestRegionServerReadRequestMetrics.Metric
+org.apache.hadoop.hbase.regionserver.DataBlockEncodingTool.Manipulation
 org.apache.hadoop.hbase.regionserver.TestMultiLogThreshold.ActionType
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/testdevapidocs/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.DummyNodeFailoverWorker.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.DummyNodeFailoverWorker.html
 
b/testdevapidocs/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.DummyNodeFailoverWorker.html
index 79489dc..20ee998 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.DummyNodeFailoverWorker.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.DummyNodeFailoverWorker.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class TestReplicationSourceManager.DummyNodeFailoverWorker
+static class TestReplicationSourceManager.DummyNodeFailoverWorker
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?is-external=true";
 title="class or interface in java.lang">Thread
 
 
@@ -256,7 +256,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html
 
 
 logZnodesMap
-private https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in java.util">SetString>> logZnodesMap
+private https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in java.util">SetString>> logZnodesMap
 
 
 
@@ -265,7 +265,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html
 
 
 server
-org.apache.hadoop.hbase.Server server
+org.apache.hadoop.hbase.Server server
 
 
 
@@ -274,7 +274,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html
 
 
 deadRS
-private org.apache.hadoop.hbase.ServerName deadRS
+private org.apache.hadoop.hbase.ServerName deadRS
 
 
 
@@ -283,7 +283,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html
 
 
 rq
-org.apache.hadoop.hbase.replication.ReplicationQueueStorage rq
+org.apache.hadoop.hbase.replication.ReplicationQueueStorage rq
 
 
 
@@ -300,7 +300,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html
 
 
 DummyNodeFailoverWorker
-public DummyNodeFailoverWorker(org.apache.hadoop.hbase.ServerName deadRS,
+public DummyNodeFailoverWorker(org.apache.hadoop.hbase.ServerName deadRS,
org.apache.hadoop.hbase.Server s)
 throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true";
 title="class or interface in java.lang">Exception
 
@@ -323,7 +323,7 @@ extends https://docs.oracle.co

[22/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionCountSkewCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionCountSkewCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionCountSkewCostFunction.html
index 9d1542c..ec5f688 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionCountSkewCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionCountSkewCostFunction.html
@@ -53,502 +53,502 @@
 045import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType;
 046import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
 047import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction;
-048import 
org.apache.hadoop.hbase.util.Bytes;
-049import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-050import 
org.apache.yetus.audience.InterfaceAudience;
-051import org.slf4j.Logger;
-052import org.slf4j.LoggerFactory;
-053
-054import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-055import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
-056import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+048import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+049import 
org.apache.yetus.audience.InterfaceAudience;
+050import org.slf4j.Logger;
+051import org.slf4j.LoggerFactory;
+052
+053import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+054import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
+055import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+056
 057
-058
-059/**
-060 * 

This is a best effort load balancer. Given a Cost function F(C) => x It will -061 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the -062 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

-063 *
    -064 *
  • Region Load
  • -065 *
  • Table Load
  • -066 *
  • Data Locality
  • -067 *
  • Memstore Sizes
  • -068 *
  • Storefile Sizes
  • -069 *
+058/** +059 *

This is a best effort load balancer. Given a Cost function F(C) => x It will +060 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the +061 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

+062 *
    +063 *
  • Region Load
  • +064 *
  • Table Load
  • +065 *
  • Data Locality
  • +066 *
  • Memstore Sizes
  • +067 *
  • Storefile Sizes
  • +068 *
+069 * 070 * -071 * -072 *

Every cost function returns a number between 0 and 1 inclusive; where 0 is the lowest cost -073 * best solution, and 1 is the highest possible cost and the worst solution. The computed costs are -074 * scaled by their respective multipliers:

-075 * -076 *
    -077 *
  • hbase.master.balancer.stochastic.regionLoadCost
  • -078 *
  • hbase.master.balancer.stochastic.moveCost
  • -079 *
  • hbase.master.balancer.stochastic.tableLoadCost
  • -080 *
  • hbase.master.balancer.stochastic.localityCost
  • -081 *
  • hbase.master.balancer.stochastic.memstoreSizeCost
  • -082 *
  • hbase.master.balancer.stochastic.storefileSizeCost
  • -083 *
-084 * -085 *

In addition to the above configurations, the balancer can be tuned by the following -086 * configuration values:

-087 *
    -088 *
  • hbase.master.balancer.stochastic.maxMoveRegions which -089 * controls what the max number of regions that can be moved in a single invocation of this -090 * balancer.
  • -091 *
  • hbase.master.balancer.stochastic.stepsPerRegion is the coefficient by which the number of -092 * regions is multiplied to try and get the number of times the balancer will -093 * mutate all servers.
  • -094 *
  • hbase.master.balancer.stochastic.maxSteps which controls the maximum number of times that -095 * the balancer will try and mutate all the servers. The balancer will use the minimum of this -096 * value and the above computation.
  • -097 *
-098 * -099 *

This balancer is best used with hbase.master.loadbalance.bytable set to false -100 * so that the balancer gets the full picture of all loads on the cluster.

-101 */ -102@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) -103@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC", -104 justification="Complaint is about costFunctions not being synchronized; not end of the world") -105public class StochasticLoadBalancer extends BaseLoadBalancer { -106 -107 protected static fi

[13/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html
index 9d1542c..ec5f688 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html
@@ -53,502 +53,502 @@
 045import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType;
 046import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
 047import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction;
-048import 
org.apache.hadoop.hbase.util.Bytes;
-049import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-050import 
org.apache.yetus.audience.InterfaceAudience;
-051import org.slf4j.Logger;
-052import org.slf4j.LoggerFactory;
-053
-054import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-055import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
-056import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+048import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+049import 
org.apache.yetus.audience.InterfaceAudience;
+050import org.slf4j.Logger;
+051import org.slf4j.LoggerFactory;
+052
+053import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+054import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
+055import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+056
 057
-058
-059/**
-060 * 

This is a best effort load balancer. Given a Cost function F(C) => x It will -061 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the -062 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

-063 *
    -064 *
  • Region Load
  • -065 *
  • Table Load
  • -066 *
  • Data Locality
  • -067 *
  • Memstore Sizes
  • -068 *
  • Storefile Sizes
  • -069 *
+058/** +059 *

This is a best effort load balancer. Given a Cost function F(C) => x It will +060 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the +061 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

+062 *
    +063 *
  • Region Load
  • +064 *
  • Table Load
  • +065 *
  • Data Locality
  • +066 *
  • Memstore Sizes
  • +067 *
  • Storefile Sizes
  • +068 *
+069 * 070 * -071 * -072 *

Every cost function returns a number between 0 and 1 inclusive; where 0 is the lowest cost -073 * best solution, and 1 is the highest possible cost and the worst solution. The computed costs are -074 * scaled by their respective multipliers:

-075 * -076 *
    -077 *
  • hbase.master.balancer.stochastic.regionLoadCost
  • -078 *
  • hbase.master.balancer.stochastic.moveCost
  • -079 *
  • hbase.master.balancer.stochastic.tableLoadCost
  • -080 *
  • hbase.master.balancer.stochastic.localityCost
  • -081 *
  • hbase.master.balancer.stochastic.memstoreSizeCost
  • -082 *
  • hbase.master.balancer.stochastic.storefileSizeCost
  • -083 *
-084 * -085 *

In addition to the above configurations, the balancer can be tuned by the following -086 * configuration values:

-087 *
    -088 *
  • hbase.master.balancer.stochastic.maxMoveRegions which -089 * controls what the max number of regions that can be moved in a single invocation of this -090 * balancer.
  • -091 *
  • hbase.master.balancer.stochastic.stepsPerRegion is the coefficient by which the number of -092 * regions is multiplied to try and get the number of times the balancer will -093 * mutate all servers.
  • -094 *
  • hbase.master.balancer.stochastic.maxSteps which controls the maximum number of times that -095 * the balancer will try and mutate all the servers. The balancer will use the minimum of this -096 * value and the above computation.
  • -097 *
-098 * -099 *

This balancer is best used with hbase.master.loadbalance.bytable set to false -100 * so that the balancer gets the full picture of all loads on the cluster.

-101 */ -102@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) -103@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC", -104 justification="Complaint is about costFunctions not being synchronized; not end of the world") -105public class StochasticLoadBalancer extends BaseLoadBalancer { -106 -107 protected static final String STEPS_PER_REGION_KEY = -108 "hbase.master.balancer.stochastic.stepsPerRegion"; -109 protected static final String MAX_ST

[30/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCandidateGenerator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCandidateGenerator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCandidateGenerator.html
index 9d1542c..ec5f688 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCandidateGenerator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCandidateGenerator.html
@@ -53,502 +53,502 @@
 045import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType;
 046import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
 047import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction;
-048import 
org.apache.hadoop.hbase.util.Bytes;
-049import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-050import 
org.apache.yetus.audience.InterfaceAudience;
-051import org.slf4j.Logger;
-052import org.slf4j.LoggerFactory;
-053
-054import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-055import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
-056import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+048import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+049import 
org.apache.yetus.audience.InterfaceAudience;
+050import org.slf4j.Logger;
+051import org.slf4j.LoggerFactory;
+052
+053import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+054import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
+055import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+056
 057
-058
-059/**
-060 * 

This is a best effort load balancer. Given a Cost function F(C) => x It will -061 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the -062 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

-063 *
    -064 *
  • Region Load
  • -065 *
  • Table Load
  • -066 *
  • Data Locality
  • -067 *
  • Memstore Sizes
  • -068 *
  • Storefile Sizes
  • -069 *
+058/** +059 *

This is a best effort load balancer. Given a Cost function F(C) => x It will +060 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the +061 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

+062 *
    +063 *
  • Region Load
  • +064 *
  • Table Load
  • +065 *
  • Data Locality
  • +066 *
  • Memstore Sizes
  • +067 *
  • Storefile Sizes
  • +068 *
+069 * 070 * -071 * -072 *

Every cost function returns a number between 0 and 1 inclusive; where 0 is the lowest cost -073 * best solution, and 1 is the highest possible cost and the worst solution. The computed costs are -074 * scaled by their respective multipliers:

-075 * -076 *
    -077 *
  • hbase.master.balancer.stochastic.regionLoadCost
  • -078 *
  • hbase.master.balancer.stochastic.moveCost
  • -079 *
  • hbase.master.balancer.stochastic.tableLoadCost
  • -080 *
  • hbase.master.balancer.stochastic.localityCost
  • -081 *
  • hbase.master.balancer.stochastic.memstoreSizeCost
  • -082 *
  • hbase.master.balancer.stochastic.storefileSizeCost
  • -083 *
-084 * -085 *

In addition to the above configurations, the balancer can be tuned by the following -086 * configuration values:

-087 *
    -088 *
  • hbase.master.balancer.stochastic.maxMoveRegions which -089 * controls what the max number of regions that can be moved in a single invocation of this -090 * balancer.
  • -091 *
  • hbase.master.balancer.stochastic.stepsPerRegion is the coefficient by which the number of -092 * regions is multiplied to try and get the number of times the balancer will -093 * mutate all servers.
  • -094 *
  • hbase.master.balancer.stochastic.maxSteps which controls the maximum number of times that -095 * the balancer will try and mutate all the servers. The balancer will use the minimum of this -096 * value and the above computation.
  • -097 *
-098 * -099 *

This balancer is best used with hbase.master.loadbalance.bytable set to false -100 * so that the balancer gets the full picture of all loads on the cluster.

-101 */ -102@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) -103@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC", -104 justification="Complaint is about costFunctions not being synchronized; not end of the world") -105public class StochasticLoadBalancer extends BaseLoadBalancer { -106 -107

[06/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/testdevapidocs/src-html/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.MockCluster.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.MockCluster.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.MockCluster.html
index 13b918c..fe1ee7f 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.MockCluster.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.MockCluster.html
@@ -139,7 +139,7 @@
 131  
when(rl.getWriteRequestCount()).thenReturn(0L);
 132  
when(rl.getMemStoreSize()).thenReturn(Size.ZERO);
 133  
when(rl.getStoreFileSize()).thenReturn(Size.ZERO);
-134  
regionLoadMap.put(info.getEncodedNameAsBytes(), rl);
+134  
regionLoadMap.put(info.getRegionName(), rl);
 135}
 136
when(serverMetrics.getRegionMetrics()).thenReturn(regionLoadMap);
 137return serverMetrics;
@@ -230,273 +230,275 @@
 222
 223  
loadBalancer.setClusterMetrics(clusterStatus);
 224}
-225
assertTrue(loadBalancer.loads.get(REGION_KEY) != null);
-226
assertTrue(loadBalancer.loads.get(REGION_KEY).size() == 15);
-227
-228Queue loads 
= loadBalancer.loads.get(REGION_KEY);
-229int i = 0;
-230while(loads.size() > 0) {
-231  BalancerRegionLoad rl = 
loads.remove();
-232  assertEquals(i + 
(numClusterStatusToAdd - 15), rl.getStorefileSizeMB());
-233  i ++;
-234}
-235  }
-236
-237  @Test
-238  public void testNeedBalance() {
-239float minCost = 
conf.getFloat("hbase.master.balancer.stochastic.minCostNeedBalance", 0.05f);
-240
conf.setFloat("hbase.master.balancer.stochastic.minCostNeedBalance", 1.0f);
-241loadBalancer.setConf(conf);
-242for (int[] mockCluster : 
clusterStateMocks) {
-243  Map> servers = mockClusterServers(mockCluster);
-244  List plans = 
loadBalancer.balanceCluster(servers);
-245  assertNull(plans);
-246}
-247// reset config
-248
conf.setFloat("hbase.master.balancer.stochastic.minCostNeedBalance", 
minCost);
-249loadBalancer.setConf(conf);
-250  }
-251
-252  @Test
-253  public void testLocalityCost() throws 
Exception {
-254Configuration conf = 
HBaseConfiguration.create();
-255MockNoopMasterServices master = new 
MockNoopMasterServices();
-256StochasticLoadBalancer.CostFunction
-257costFunction = new 
ServerLocalityCostFunction(conf, master);
-258
-259for (int test = 0; test < 
clusterRegionLocationMocks.length; test++) {
-260  int[][] clusterRegionLocations = 
clusterRegionLocationMocks[test];
-261  MockCluster cluster = new 
MockCluster(clusterRegionLocations);
-262  costFunction.init(cluster);
-263  double cost = 
costFunction.cost();
-264  double expected = 1 - 
expectedLocalities[test];
-265  assertEquals(expected, cost, 
0.001);
-266}
-267  }
-268
-269  @Test
-270  public void testMoveCost() throws 
Exception {
-271Configuration conf = 
HBaseConfiguration.create();
-272StochasticLoadBalancer.CostFunction
-273costFunction = new 
StochasticLoadBalancer.MoveCostFunction(conf);
-274for (int[] mockCluster : 
clusterStateMocks) {
-275  BaseLoadBalancer.Cluster cluster = 
mockCluster(mockCluster);
-276  costFunction.init(cluster);
-277  double cost = 
costFunction.cost();
-278  assertEquals(0.0f, cost, 0.001);
-279
-280  // cluster region number is smaller 
than maxMoves=600
-281  cluster.setNumRegions(200);
-282  cluster.setNumMovedRegions(10);
-283  cost = costFunction.cost();
-284  assertEquals(0.05f, cost, 0.001);
-285  cluster.setNumMovedRegions(100);
-286  cost = costFunction.cost();
-287  assertEquals(0.5f, cost, 0.001);
-288  cluster.setNumMovedRegions(200);
-289  cost = costFunction.cost();
-290  assertEquals(1.0f, cost, 0.001);
-291
-292
-293  // cluster region number is bigger 
than maxMoves=2500
-294  cluster.setNumRegions(1);
-295  cluster.setNumMovedRegions(250);
-296  cost = costFunction.cost();
-297  assertEquals(0.1f, cost, 0.001);
-298  cluster.setNumMovedRegions(1250);
-299  cost = costFunction.cost();
-300  assertEquals(0.5f, cost, 0.001);
-301  cluster.setNumMovedRegions(2500);
-302  cost = costFunction.cost();
-303  assertEquals(1.0f, cost, 0.01);
-304}
-305  }
-306
-307  @Test
-308  public void testSkewCost() {
-309Configuration conf = 
HBaseConfiguration.create();
-310StochasticLoadBalancer.CostFunction
-311costFunction = new 
StochasticLoadBalancer.RegionCountSkewCostFunction(conf);
-312for (int[] mockCluster : 
clusterStateMocks) {
-313  
costFunction.init(mockClu

[14/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.WriteRequestCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.WriteRequestCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.WriteRequestCostFunction.html
index 9d1542c..ec5f688 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.WriteRequestCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.WriteRequestCostFunction.html
@@ -53,502 +53,502 @@
 045import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType;
 046import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
 047import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction;
-048import 
org.apache.hadoop.hbase.util.Bytes;
-049import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-050import 
org.apache.yetus.audience.InterfaceAudience;
-051import org.slf4j.Logger;
-052import org.slf4j.LoggerFactory;
-053
-054import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-055import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
-056import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+048import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+049import 
org.apache.yetus.audience.InterfaceAudience;
+050import org.slf4j.Logger;
+051import org.slf4j.LoggerFactory;
+052
+053import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+054import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
+055import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+056
 057
-058
-059/**
-060 * 

This is a best effort load balancer. Given a Cost function F(C) => x It will -061 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the -062 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

-063 *
    -064 *
  • Region Load
  • -065 *
  • Table Load
  • -066 *
  • Data Locality
  • -067 *
  • Memstore Sizes
  • -068 *
  • Storefile Sizes
  • -069 *
+058/** +059 *

This is a best effort load balancer. Given a Cost function F(C) => x It will +060 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the +061 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

+062 *
    +063 *
  • Region Load
  • +064 *
  • Table Load
  • +065 *
  • Data Locality
  • +066 *
  • Memstore Sizes
  • +067 *
  • Storefile Sizes
  • +068 *
+069 * 070 * -071 * -072 *

Every cost function returns a number between 0 and 1 inclusive; where 0 is the lowest cost -073 * best solution, and 1 is the highest possible cost and the worst solution. The computed costs are -074 * scaled by their respective multipliers:

-075 * -076 *
    -077 *
  • hbase.master.balancer.stochastic.regionLoadCost
  • -078 *
  • hbase.master.balancer.stochastic.moveCost
  • -079 *
  • hbase.master.balancer.stochastic.tableLoadCost
  • -080 *
  • hbase.master.balancer.stochastic.localityCost
  • -081 *
  • hbase.master.balancer.stochastic.memstoreSizeCost
  • -082 *
  • hbase.master.balancer.stochastic.storefileSizeCost
  • -083 *
-084 * -085 *

In addition to the above configurations, the balancer can be tuned by the following -086 * configuration values:

-087 *
    -088 *
  • hbase.master.balancer.stochastic.maxMoveRegions which -089 * controls what the max number of regions that can be moved in a single invocation of this -090 * balancer.
  • -091 *
  • hbase.master.balancer.stochastic.stepsPerRegion is the coefficient by which the number of -092 * regions is multiplied to try and get the number of times the balancer will -093 * mutate all servers.
  • -094 *
  • hbase.master.balancer.stochastic.maxSteps which controls the maximum number of times that -095 * the balancer will try and mutate all the servers. The balancer will use the minimum of this -096 * value and the above computation.
  • -097 *
-098 * -099 *

This balancer is best used with hbase.master.loadbalance.bytable set to false -100 * so that the balancer gets the full picture of all loads on the cluster.

-101 */ -102@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) -103@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC", -104 justification="Complaint is about costFunctions not being synchronized; not end of the world") -105public class StochasticLoadBalancer extends BaseLoadBalancer { -106 -107 protected static final String STE

[18/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaRackCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaRackCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaRackCostFunction.html
index 9d1542c..ec5f688 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaRackCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaRackCostFunction.html
@@ -53,502 +53,502 @@
 045import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType;
 046import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
 047import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction;
-048import 
org.apache.hadoop.hbase.util.Bytes;
-049import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-050import 
org.apache.yetus.audience.InterfaceAudience;
-051import org.slf4j.Logger;
-052import org.slf4j.LoggerFactory;
-053
-054import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-055import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
-056import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+048import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+049import 
org.apache.yetus.audience.InterfaceAudience;
+050import org.slf4j.Logger;
+051import org.slf4j.LoggerFactory;
+052
+053import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+054import 
org.apache.hbase.thirdparty.com.google.common.base.Optional;
+055import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+056
 057
-058
-059/**
-060 * 

This is a best effort load balancer. Given a Cost function F(C) => x It will -061 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the -062 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

-063 *
    -064 *
  • Region Load
  • -065 *
  • Table Load
  • -066 *
  • Data Locality
  • -067 *
  • Memstore Sizes
  • -068 *
  • Storefile Sizes
  • -069 *
+058/** +059 *

This is a best effort load balancer. Given a Cost function F(C) => x It will +060 * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the +061 * new cluster state becomes the plan. It includes costs functions to compute the cost of:

+062 *
    +063 *
  • Region Load
  • +064 *
  • Table Load
  • +065 *
  • Data Locality
  • +066 *
  • Memstore Sizes
  • +067 *
  • Storefile Sizes
  • +068 *
+069 * 070 * -071 * -072 *

Every cost function returns a number between 0 and 1 inclusive; where 0 is the lowest cost -073 * best solution, and 1 is the highest possible cost and the worst solution. The computed costs are -074 * scaled by their respective multipliers:

-075 * -076 *
    -077 *
  • hbase.master.balancer.stochastic.regionLoadCost
  • -078 *
  • hbase.master.balancer.stochastic.moveCost
  • -079 *
  • hbase.master.balancer.stochastic.tableLoadCost
  • -080 *
  • hbase.master.balancer.stochastic.localityCost
  • -081 *
  • hbase.master.balancer.stochastic.memstoreSizeCost
  • -082 *
  • hbase.master.balancer.stochastic.storefileSizeCost
  • -083 *
-084 * -085 *

In addition to the above configurations, the balancer can be tuned by the following -086 * configuration values:

-087 *
    -088 *
  • hbase.master.balancer.stochastic.maxMoveRegions which -089 * controls what the max number of regions that can be moved in a single invocation of this -090 * balancer.
  • -091 *
  • hbase.master.balancer.stochastic.stepsPerRegion is the coefficient by which the number of -092 * regions is multiplied to try and get the number of times the balancer will -093 * mutate all servers.
  • -094 *
  • hbase.master.balancer.stochastic.maxSteps which controls the maximum number of times that -095 * the balancer will try and mutate all the servers. The balancer will use the minimum of this -096 * value and the above computation.
  • -097 *
-098 * -099 *

This balancer is best used with hbase.master.loadbalance.bytable set to false -100 * so that the balancer gets the full picture of all loads on the cluster.

-101 */ -102@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) -103@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC", -104 justification="Complaint is about costFunctions not being synchronized; not end of the world") -105public class StochasticLoadBalancer extends BaseLoadBalancer { -106 -107 protected

[05/47] hbase-site git commit: Published site at fe2265fa4a1e828b2e68ff8e42639c5942dccb1b.

2018-11-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9592fdb5/testdevapidocs/src-html/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.html
index 13b918c..fe1ee7f 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.html
@@ -139,7 +139,7 @@
 131  
when(rl.getWriteRequestCount()).thenReturn(0L);
 132  
when(rl.getMemStoreSize()).thenReturn(Size.ZERO);
 133  
when(rl.getStoreFileSize()).thenReturn(Size.ZERO);
-134  
regionLoadMap.put(info.getEncodedNameAsBytes(), rl);
+134  
regionLoadMap.put(info.getRegionName(), rl);
 135}
 136
when(serverMetrics.getRegionMetrics()).thenReturn(regionLoadMap);
 137return serverMetrics;
@@ -230,273 +230,275 @@
 222
 223  
loadBalancer.setClusterMetrics(clusterStatus);
 224}
-225
assertTrue(loadBalancer.loads.get(REGION_KEY) != null);
-226
assertTrue(loadBalancer.loads.get(REGION_KEY).size() == 15);
-227
-228Queue loads 
= loadBalancer.loads.get(REGION_KEY);
-229int i = 0;
-230while(loads.size() > 0) {
-231  BalancerRegionLoad rl = 
loads.remove();
-232  assertEquals(i + 
(numClusterStatusToAdd - 15), rl.getStorefileSizeMB());
-233  i ++;
-234}
-235  }
-236
-237  @Test
-238  public void testNeedBalance() {
-239float minCost = 
conf.getFloat("hbase.master.balancer.stochastic.minCostNeedBalance", 0.05f);
-240
conf.setFloat("hbase.master.balancer.stochastic.minCostNeedBalance", 1.0f);
-241loadBalancer.setConf(conf);
-242for (int[] mockCluster : 
clusterStateMocks) {
-243  Map> servers = mockClusterServers(mockCluster);
-244  List plans = 
loadBalancer.balanceCluster(servers);
-245  assertNull(plans);
-246}
-247// reset config
-248
conf.setFloat("hbase.master.balancer.stochastic.minCostNeedBalance", 
minCost);
-249loadBalancer.setConf(conf);
-250  }
-251
-252  @Test
-253  public void testLocalityCost() throws 
Exception {
-254Configuration conf = 
HBaseConfiguration.create();
-255MockNoopMasterServices master = new 
MockNoopMasterServices();
-256StochasticLoadBalancer.CostFunction
-257costFunction = new 
ServerLocalityCostFunction(conf, master);
-258
-259for (int test = 0; test < 
clusterRegionLocationMocks.length; test++) {
-260  int[][] clusterRegionLocations = 
clusterRegionLocationMocks[test];
-261  MockCluster cluster = new 
MockCluster(clusterRegionLocations);
-262  costFunction.init(cluster);
-263  double cost = 
costFunction.cost();
-264  double expected = 1 - 
expectedLocalities[test];
-265  assertEquals(expected, cost, 
0.001);
-266}
-267  }
-268
-269  @Test
-270  public void testMoveCost() throws 
Exception {
-271Configuration conf = 
HBaseConfiguration.create();
-272StochasticLoadBalancer.CostFunction
-273costFunction = new 
StochasticLoadBalancer.MoveCostFunction(conf);
-274for (int[] mockCluster : 
clusterStateMocks) {
-275  BaseLoadBalancer.Cluster cluster = 
mockCluster(mockCluster);
-276  costFunction.init(cluster);
-277  double cost = 
costFunction.cost();
-278  assertEquals(0.0f, cost, 0.001);
-279
-280  // cluster region number is smaller 
than maxMoves=600
-281  cluster.setNumRegions(200);
-282  cluster.setNumMovedRegions(10);
-283  cost = costFunction.cost();
-284  assertEquals(0.05f, cost, 0.001);
-285  cluster.setNumMovedRegions(100);
-286  cost = costFunction.cost();
-287  assertEquals(0.5f, cost, 0.001);
-288  cluster.setNumMovedRegions(200);
-289  cost = costFunction.cost();
-290  assertEquals(1.0f, cost, 0.001);
-291
-292
-293  // cluster region number is bigger 
than maxMoves=2500
-294  cluster.setNumRegions(1);
-295  cluster.setNumMovedRegions(250);
-296  cost = costFunction.cost();
-297  assertEquals(0.1f, cost, 0.001);
-298  cluster.setNumMovedRegions(1250);
-299  cost = costFunction.cost();
-300  assertEquals(0.5f, cost, 0.001);
-301  cluster.setNumMovedRegions(2500);
-302  cost = costFunction.cost();
-303  assertEquals(1.0f, cost, 0.01);
-304}
-305  }
-306
-307  @Test
-308  public void testSkewCost() {
-309Configuration conf = 
HBaseConfiguration.create();
-310StochasticLoadBalancer.CostFunction
-311costFunction = new 
StochasticLoadBalancer.RegionCountSkewCostFunction(conf);
-312for (int[] mockCluster : 
clusterStateMocks) {
-313  
costFunction.init(mockCluster(mockCluster));
-314  double cost = 
costFunction.co

hbase git commit: HBASE-21437 Bypassed procedure throw IllegalArgumentException when its state is WAITING_TIMEOUT

2018-11-09 Thread allan163
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 6584a76d3 -> e8404c7c2


HBASE-21437 Bypassed procedure throw IllegalArgumentException when its state is 
WAITING_TIMEOUT

Signed-off-by: Allan Yang 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e8404c7c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e8404c7c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e8404c7c

Branch: refs/heads/branch-2.0
Commit: e8404c7c21b5ceaddd1359037be1303171c97ae9
Parents: 6584a76
Author: jingyuntian 
Authored: Fri Nov 9 22:45:43 2018 +0800
Committer: Allan Yang 
Committed: Fri Nov 9 22:45:43 2018 +0800

--
 .../hbase/procedure2/ProcedureExecutor.java | 19 ++
 .../hbase/procedure2/TimeoutExecutorThread.java |  2 +-
 .../hbase/procedure2/TestProcedureBypass.java   | 38 +++-
 3 files changed, 51 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e8404c7c/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index 3bd5e0f..a70a9ef 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -1033,15 +1033,22 @@ public class ProcedureExecutor {
 store.update(procedure);
   }
 
-  // If we don't have the lock, we can't re-submit the queue,
-  // since it is already executing. To get rid of the stuck situation, we
-  // need to restart the master. With the procedure set to bypass, the 
procedureExecutor
-  // will bypass it and won't get stuck again.
-  if (lockEntry != null) {
-// add the procedure to run queue,
+  // If state of procedure is WAITING_TIMEOUT, we can directly submit it 
to the scheduler.
+  // Instead we should remove it from timeout Executor queue and tranfer 
its state to RUNNABLE
+  if (procedure.getState() == ProcedureState.WAITING_TIMEOUT) {
+LOG.debug("transform procedure {} from WAITING_TIMEOUT to RUNNABLE", 
procedure);
+if (timeoutExecutor.remove(procedure)) {
+  LOG.debug("removed procedure {} from timeoutExecutor", procedure);
+  timeoutExecutor.executeTimedoutProcedure(procedure);
+}
+  } else if (lockEntry != null) {
 scheduler.addFront(procedure);
 LOG.info("Bypassing {} and its ancestors successfully, adding to 
queue", procedure);
   } else {
+// If we don't have the lock, we can't re-submit the queue,
+// since it is already executing. To get rid of the stuck situation, we
+// need to restart the master. With the procedure set to bypass, the 
procedureExecutor
+// will bypass it and won't get stuck again.
 LOG.info("Bypassing {} and its ancestors successfully, but since it is 
already running, "
 + "skipping add to queue", procedure);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e8404c7c/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java
index 9e050a2..4416177 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java
@@ -126,7 +126,7 @@ class TimeoutExecutorThread extends 
StoppableThread {
 }
   }
 
-  private void executeTimedoutProcedure(Procedure proc) {
+  protected void executeTimedoutProcedure(Procedure proc) {
 // The procedure received a timeout. if the procedure itself does not 
handle it,
 // call abort() and add the procedure back in the queue for rollback.
 if (proc.setTimeoutFailure(executor.getEnvironment())) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/e8404c7c/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java
--
diff --git 
a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java
 
b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java
index fa40631..de7a0a1 100644
--- 
a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/pr