hbase git commit: HBASE-13844 Move static helper methods from KeyValue into CellUtils

2017-09-28 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/branch-2 ac933f7af -> 0658252ed


HBASE-13844 Move static helper methods from KeyValue into CellUtils

Signed-off-by: Chia-Ping Tsai 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0658252e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0658252e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0658252e

Branch: refs/heads/branch-2
Commit: 0658252ed67d3a61468b2112e645df03d89c37f2
Parents: ac933f7
Author: Andy Yang 
Authored: Tue Sep 26 05:19:31 2017 -0700
Committer: Chia-Ping Tsai 
Committed: Fri Sep 29 13:20:24 2017 +0800

--
 .../hadoop/hbase/client/RegionInfoBuilder.java  |   9 -
 .../java/org/apache/hadoop/hbase/CellUtil.java  |  48 
 .../java/org/apache/hadoop/hbase/KeyValue.java  | 257 +--
 .../java/org/apache/hadoop/hbase/TableName.java |  16 --
 .../hadoop/hbase/mapred/GroupingTableMap.java   |   3 +-
 .../hbase/mapreduce/GroupingTableMapper.java|   3 +-
 .../hbase/mapreduce/TableInputFormat.java   |   4 +-
 .../apache/hadoop/hbase/rest/RowResource.java   |  19 +-
 .../hadoop/hbase/rest/RowResultGenerator.java   |   4 +-
 .../hbase/rest/ScannerInstanceResource.java |   3 +-
 .../hbase/rest/ScannerResultGenerator.java  |   4 +-
 .../hadoop/hbase/rest/client/RemoteHTable.java  |   2 +-
 .../hadoop/hbase/rest/model/CellModel.java  |   3 +-
 .../hadoop/hbase/rest/TestScannerResource.java  |   4 +-
 .../hbase/rest/TestScannersWithFilters.java |   2 +-
 .../hbase/rest/TestScannersWithLabels.java  |   4 +-
 .../hadoop/hbase/rest/TestTableResource.java|   4 +-
 .../hadoop/hbase/io/hfile/FixedFileTrailer.java |   4 +-
 .../org/apache/hadoop/hbase/HBaseTestCase.java  |   2 +-
 .../hadoop/hbase/thrift/IncrementCoalescer.java |   4 +-
 .../hadoop/hbase/thrift/ThriftServerRunner.java |  35 +--
 .../hadoop/hbase/thrift/ThriftUtilities.java|  10 +-
 22 files changed, 107 insertions(+), 337 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0658252e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java
index 7d5c476..acff186 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java
@@ -487,14 +487,5 @@ public class RegionInfoBuilder {
   return RegionInfo.COMPARATOR.compare(this, other);
 }
 
-/**
- * @return Comparator to use comparing {@link KeyValue}s.
- * @deprecated Use Region#getCellComparator().  deprecated for hbase 2.0, 
remove for hbase 3.0
- */
-@Deprecated
-public KeyValue.KVComparator getComparator() {
-  return isMetaRegion()?
-  KeyValue.META_COMPARATOR: KeyValue.COMPARATOR;
-}
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/0658252e/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
--
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
index a3029f8..dc5df30 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
@@ -20,6 +20,9 @@ package org.apache.hadoop.hbase;
 
 import static org.apache.hadoop.hbase.HConstants.EMPTY_BYTE_ARRAY;
 import static org.apache.hadoop.hbase.Tag.TAG_LENGTH_SIZE;
+import static org.apache.hadoop.hbase.KeyValue.COLUMN_FAMILY_DELIMITER;
+import static org.apache.hadoop.hbase.KeyValue.getDelimiter;
+import static org.apache.hadoop.hbase.KeyValue.COLUMN_FAMILY_DELIM_ARRAY;
 
 import java.io.DataOutputStream;
 import java.io.IOException;
@@ -126,6 +129,51 @@ public final class CellUtil {
 return output;
   }
 
+  /**
+   * Makes a column in family:qualifier form from separate byte arrays.
+   * 
+   * Not recommended for usage as this is old-style API.
+   * @param family
+   * @param qualifier
+   * @return family:qualifier
+   */
+  public static byte [] makeColumn(byte [] family, byte [] qualifier) {
+return Bytes.add(family, COLUMN_FAMILY_DELIM_ARRAY, qualifier);
+  }
+
+  /**
+   * Splits a column in {@code family:qualifier} form into separate byte 
arrays. An empty qualifier
+   * (ie, {@code fam:}) is parsed as { fam, EMPTY_BYTE_ARRAY } 
while no delimiter (ie,
+   * {@code fam}) is parsed as an array of one 

hbase git commit: HBASE-13844 Move static helper methods from KeyValue into CellUtils

2017-09-28 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/master b5b4108fc -> afce850cf


HBASE-13844 Move static helper methods from KeyValue into CellUtils

Signed-off-by: Chia-Ping Tsai 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/afce850c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/afce850c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/afce850c

Branch: refs/heads/master
Commit: afce850cfdc827cd727418b5f7a2543f2160f790
Parents: b5b4108
Author: Andy Yang 
Authored: Tue Sep 26 05:19:31 2017 -0700
Committer: Chia-Ping Tsai 
Committed: Fri Sep 29 11:50:23 2017 +0800

--
 .../hadoop/hbase/client/RegionInfoBuilder.java  |   9 -
 .../java/org/apache/hadoop/hbase/CellUtil.java  |  48 
 .../java/org/apache/hadoop/hbase/KeyValue.java  | 257 +--
 .../java/org/apache/hadoop/hbase/TableName.java |  16 --
 .../hadoop/hbase/mapred/GroupingTableMap.java   |   3 +-
 .../hbase/mapreduce/GroupingTableMapper.java|   3 +-
 .../hbase/mapreduce/TableInputFormat.java   |   4 +-
 .../apache/hadoop/hbase/rest/RowResource.java   |  19 +-
 .../hadoop/hbase/rest/RowResultGenerator.java   |   4 +-
 .../hbase/rest/ScannerInstanceResource.java |   3 +-
 .../hbase/rest/ScannerResultGenerator.java  |   4 +-
 .../hadoop/hbase/rest/client/RemoteHTable.java  |   2 +-
 .../hadoop/hbase/rest/model/CellModel.java  |   3 +-
 .../hadoop/hbase/rest/TestScannerResource.java  |   4 +-
 .../hbase/rest/TestScannersWithFilters.java |   2 +-
 .../hbase/rest/TestScannersWithLabels.java  |   4 +-
 .../hadoop/hbase/rest/TestTableResource.java|   4 +-
 .../hadoop/hbase/io/hfile/FixedFileTrailer.java |   4 +-
 .../org/apache/hadoop/hbase/HBaseTestCase.java  |   2 +-
 .../hadoop/hbase/thrift/IncrementCoalescer.java |   4 +-
 .../hadoop/hbase/thrift/ThriftServerRunner.java |  35 +--
 .../hadoop/hbase/thrift/ThriftUtilities.java|  10 +-
 22 files changed, 107 insertions(+), 337 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/afce850c/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java
index 7d5c476..acff186 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java
@@ -487,14 +487,5 @@ public class RegionInfoBuilder {
   return RegionInfo.COMPARATOR.compare(this, other);
 }
 
-/**
- * @return Comparator to use comparing {@link KeyValue}s.
- * @deprecated Use Region#getCellComparator().  deprecated for hbase 2.0, 
remove for hbase 3.0
- */
-@Deprecated
-public KeyValue.KVComparator getComparator() {
-  return isMetaRegion()?
-  KeyValue.META_COMPARATOR: KeyValue.COMPARATOR;
-}
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/afce850c/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
--
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
index a3029f8..dc5df30 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
@@ -20,6 +20,9 @@ package org.apache.hadoop.hbase;
 
 import static org.apache.hadoop.hbase.HConstants.EMPTY_BYTE_ARRAY;
 import static org.apache.hadoop.hbase.Tag.TAG_LENGTH_SIZE;
+import static org.apache.hadoop.hbase.KeyValue.COLUMN_FAMILY_DELIMITER;
+import static org.apache.hadoop.hbase.KeyValue.getDelimiter;
+import static org.apache.hadoop.hbase.KeyValue.COLUMN_FAMILY_DELIM_ARRAY;
 
 import java.io.DataOutputStream;
 import java.io.IOException;
@@ -126,6 +129,51 @@ public final class CellUtil {
 return output;
   }
 
+  /**
+   * Makes a column in family:qualifier form from separate byte arrays.
+   * 
+   * Not recommended for usage as this is old-style API.
+   * @param family
+   * @param qualifier
+   * @return family:qualifier
+   */
+  public static byte [] makeColumn(byte [] family, byte [] qualifier) {
+return Bytes.add(family, COLUMN_FAMILY_DELIM_ARRAY, qualifier);
+  }
+
+  /**
+   * Splits a column in {@code family:qualifier} form into separate byte 
arrays. An empty qualifier
+   * (ie, {@code fam:}) is parsed as { fam, EMPTY_BYTE_ARRAY } 
while no delimiter (ie,
+   * {@code fam}) is parsed as an array of one element, { 

hbase git commit: HBASE-18884 Coprocessor Design Improvements follow up of HBASE-17732; MINOR AMENDMENT adding README to design-doc dir

2017-09-28 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master 74d0adce6 -> b5b4108fc


HBASE-18884 Coprocessor Design Improvements follow up of HBASE-17732; MINOR 
AMENDMENT adding README to design-doc dir


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b5b4108f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b5b4108f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b5b4108f

Branch: refs/heads/master
Commit: b5b4108fcedeea4e25e86f426e313ffa51229d10
Parents: 74d0adc
Author: Michael Stack 
Authored: Thu Sep 28 13:24:40 2017 -0700
Committer: Michael Stack 
Committed: Thu Sep 28 13:24:40 2017 -0700

--
 dev-support/design-docs/README.txt| 10 ++
 .../main/java/org/apache/hadoop/hbase/Coprocessor.java|  2 +-
 2 files changed, 11 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b5b4108f/dev-support/design-docs/README.txt
--
diff --git a/dev-support/design-docs/README.txt 
b/dev-support/design-docs/README.txt
new file mode 100644
index 000..be57e8b
--- /dev/null
+++ b/dev-support/design-docs/README.txt
@@ -0,0 +1,10 @@
+This directory hosts design docs and proposals. Add here final
+or near-final writeups so they are easy to find and part of
+the code base.
+
+Be warned that final delivery may not be a perfect reflection
+of what is captured at design time; implementation bends as
+it encounters hurdles not conceptualized at design-time.
+
+The effort at capturing all design in a single directory ratherthan spread
+about JIRA as attachments was begun in September of 2017.

http://git-wip-us.apache.org/repos/asf/hbase/blob/b5b4108f/hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java
index c4003ae..422e064 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java
@@ -33,7 +33,7 @@ import org.apache.yetus.audience.InterfaceStability;
  * the above mentioned 4 coprocessors, it'll fail to be loaded by any 
coprocessor host.
  *
  * Example:
- * Building a coprocessor to observer Master operations.
+ * Building a coprocessor to observe Master operations.
  * 
  * class MyMasterCoprocessor implements MasterCoprocessor {
  *   Override



hbase git commit: HBASE-18884 Coprocessor Design Improvements follow up of HBASE-17732; MINOR AMENDMENT adding README to design-doc dir

2017-09-28 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2 a6a303816 -> ac933f7af


HBASE-18884 Coprocessor Design Improvements follow up of HBASE-17732; MINOR 
AMENDMENT adding README to design-doc dir


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ac933f7a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ac933f7a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ac933f7a

Branch: refs/heads/branch-2
Commit: ac933f7af2188f1c834a4d5334880c5f6d5dbc5c
Parents: a6a3038
Author: Michael Stack 
Authored: Thu Sep 28 13:24:40 2017 -0700
Committer: Michael Stack 
Committed: Thu Sep 28 13:25:07 2017 -0700

--
 dev-support/design-docs/README.txt| 10 ++
 .../main/java/org/apache/hadoop/hbase/Coprocessor.java|  2 +-
 2 files changed, 11 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ac933f7a/dev-support/design-docs/README.txt
--
diff --git a/dev-support/design-docs/README.txt 
b/dev-support/design-docs/README.txt
new file mode 100644
index 000..be57e8b
--- /dev/null
+++ b/dev-support/design-docs/README.txt
@@ -0,0 +1,10 @@
+This directory hosts design docs and proposals. Add here final
+or near-final writeups so they are easy to find and part of
+the code base.
+
+Be warned that final delivery may not be a perfect reflection
+of what is captured at design time; implementation bends as
+it encounters hurdles not conceptualized at design-time.
+
+The effort at capturing all design in a single directory ratherthan spread
+about JIRA as attachments was begun in September of 2017.

http://git-wip-us.apache.org/repos/asf/hbase/blob/ac933f7a/hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java
index c4003ae..422e064 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java
@@ -33,7 +33,7 @@ import org.apache.yetus.audience.InterfaceStability;
  * the above mentioned 4 coprocessors, it'll fail to be loaded by any 
coprocessor host.
  *
  * Example:
- * Building a coprocessor to observer Master operations.
+ * Building a coprocessor to observe Master operations.
  * 
  * class MyMasterCoprocessor implements MasterCoprocessor {
  *   Override



hbase git commit: HBASE-18884 Coprocessor Design Improvements follow up of HBASE-17732

2017-09-28 Thread appy
Repository: hbase
Updated Branches:
  refs/heads/branch-2 a71f62de0 -> a6a303816


HBASE-18884 Coprocessor Design Improvements follow up of HBASE-17732

- Change Service Coprocessor#getService() to List 
Coprocessor#getServices()
- Checkin the finalized design doc into repo
- Added example to javadoc of Coprocessor base interface on how to implement 
one in the new design


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a6a30381
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a6a30381
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a6a30381

Branch: refs/heads/branch-2
Commit: a6a303816cc429db6ed90f634b2d6126f0e8d8fc
Parents: a71f62d
Author: Apekshit Sharma 
Authored: Wed Sep 27 18:06:12 2017 -0700
Committer: Apekshit Sharma 
Committed: Thu Sep 28 10:33:30 2017 -0700

--
 ...ad_of_inheritance-HBASE-17732-2017_09_27.pdf | Bin 0 -> 161724 bytes
 .../org/apache/hadoop/hbase/Coprocessor.java|  36 ---
 .../coprocessor/AggregateImplementation.java|   6 ++--
 .../apache/hadoop/hbase/coprocessor/Export.java |   6 ++--
 .../security/access/SecureBulkLoadEndpoint.java |   6 ++--
 .../coprocessor/ColumnAggregationEndpoint.java  |   7 ++--
 .../ColumnAggregationEndpointNullResponse.java  |   6 ++--
 .../ColumnAggregationEndpointWithErrors.java|   6 ++--
 .../coprocessor/ProtobufCoprocessorService.java |   6 ++--
 .../TestAsyncCoprocessorEndpoint.java   |   6 ++--
 .../TestRegionServerCoprocessorEndpoint.java|   8 ++---
 .../regionserver/TestServerCustomProtocol.java  |   6 ++--
 .../coprocessor/example/BulkDeleteEndpoint.java |   6 ++--
 .../example/RefreshHFilesEndpoint.java  |   6 ++--
 .../coprocessor/example/RowCountEndpoint.java   |   6 ++--
 .../hbase/rsgroup/RSGroupAdminEndpoint.java |   5 +--
 .../coprocessor/BaseRowProcessorEndpoint.java   |   6 ++--
 .../CoprocessorServiceBackwardCompatiblity.java |  14 
 .../coprocessor/MultiRowMutationEndpoint.java   |   6 ++--
 .../hbase/coprocessor/WALCoprocessor.java   |   2 +-
 .../hbase/master/MasterCoprocessorHost.java |   6 +++-
 .../regionserver/RegionCoprocessorHost.java |   7 ++--
 .../RegionServerCoprocessorHost.java|   6 +++-
 .../hbase/security/access/AccessController.java |   8 ++---
 .../hbase/security/token/TokenProvider.java |   7 ++--
 .../visibility/VisibilityController.java|   6 ++--
 .../security/access/TestAccessController.java   |   6 ++--
 27 files changed, 117 insertions(+), 79 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a6a30381/dev-support/design-docs/Coprocessor_Design_Improvements-Use_composition_instead_of_inheritance-HBASE-17732-2017_09_27.pdf
--
diff --git 
a/dev-support/design-docs/Coprocessor_Design_Improvements-Use_composition_instead_of_inheritance-HBASE-17732-2017_09_27.pdf
 
b/dev-support/design-docs/Coprocessor_Design_Improvements-Use_composition_instead_of_inheritance-HBASE-17732-2017_09_27.pdf
new file mode 100644
index 000..30a6d54
Binary files /dev/null and 
b/dev-support/design-docs/Coprocessor_Design_Improvements-Use_composition_instead_of_inheritance-HBASE-17732-2017_09_27.pdf
 differ

http://git-wip-us.apache.org/repos/asf/hbase/blob/a6a30381/hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java
index 38fe74e..c4003ae 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java
@@ -20,14 +20,42 @@
 package org.apache.hadoop.hbase;
 
 import java.io.IOException;
-import java.util.Optional;
+import java.util.Collections;
 
 import com.google.protobuf.Service;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.yetus.audience.InterfaceStability;
 
 /**
- * Coprocessor interface.
+ * Base interface for the 4 coprocessors - MasterCoprocessor, 
RegionCoprocessor,
+ * RegionServerCoprocessor, and WALCoprocessor.
+ * Do NOT implement this interface directly. Unless an implementation 
implements one (or more) of
+ * the above mentioned 4 coprocessors, it'll fail to be loaded by any 
coprocessor host.
+ *
+ * Example:
+ * Building a coprocessor to observer Master operations.
+ * 
+ * class MyMasterCoprocessor implements MasterCoprocessor {
+ *   Override
+ *   public OptionalMasterObserver> getMasterObserver() {
+ * return new MyMasterObserver();
+ *   }
+ * }
+ *
+ * class MyMasterObserver implements MasterObserver {
+ *   
+ * }
+ * 
+ *
+ 

hbase git commit: HBASE-18884 Coprocessor Design Improvements follow up of HBASE-17732

2017-09-28 Thread appy
Repository: hbase
Updated Branches:
  refs/heads/master ca2959824 -> 74d0adce6


HBASE-18884 Coprocessor Design Improvements follow up of HBASE-17732

- Change Service Coprocessor#getService() to List 
Coprocessor#getServices()
- Checkin the finalized design doc into repo
- Added example to javadoc of Coprocessor base interface on how to implement 
one in the new design


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/74d0adce
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/74d0adce
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/74d0adce

Branch: refs/heads/master
Commit: 74d0adce61fc39ef9d1ec2254dcd0f53181bb62c
Parents: ca29598
Author: Apekshit Sharma 
Authored: Wed Sep 27 18:06:12 2017 -0700
Committer: Apekshit Sharma 
Committed: Thu Sep 28 10:25:47 2017 -0700

--
 ...ad_of_inheritance-HBASE-17732-2017_09_27.pdf | Bin 0 -> 161724 bytes
 .../org/apache/hadoop/hbase/Coprocessor.java|  36 ---
 .../coprocessor/AggregateImplementation.java|   6 ++--
 .../apache/hadoop/hbase/coprocessor/Export.java |   6 ++--
 .../security/access/SecureBulkLoadEndpoint.java |   6 ++--
 .../coprocessor/ColumnAggregationEndpoint.java  |   7 ++--
 .../ColumnAggregationEndpointNullResponse.java  |   6 ++--
 .../ColumnAggregationEndpointWithErrors.java|   6 ++--
 .../coprocessor/ProtobufCoprocessorService.java |   6 ++--
 .../TestAsyncCoprocessorEndpoint.java   |   6 ++--
 .../TestRegionServerCoprocessorEndpoint.java|   8 ++---
 .../regionserver/TestServerCustomProtocol.java  |   6 ++--
 .../coprocessor/example/BulkDeleteEndpoint.java |   6 ++--
 .../example/RefreshHFilesEndpoint.java  |   6 ++--
 .../coprocessor/example/RowCountEndpoint.java   |   6 ++--
 .../hbase/rsgroup/RSGroupAdminEndpoint.java |   5 +--
 .../coprocessor/BaseRowProcessorEndpoint.java   |   6 ++--
 .../CoprocessorServiceBackwardCompatiblity.java |  14 
 .../coprocessor/MultiRowMutationEndpoint.java   |   6 ++--
 .../hbase/coprocessor/WALCoprocessor.java   |   2 +-
 .../hbase/master/MasterCoprocessorHost.java |   6 +++-
 .../regionserver/RegionCoprocessorHost.java |   7 ++--
 .../RegionServerCoprocessorHost.java|   6 +++-
 .../hbase/security/access/AccessController.java |   8 ++---
 .../hbase/security/token/TokenProvider.java |   7 ++--
 .../visibility/VisibilityController.java|   6 ++--
 .../security/access/TestAccessController.java   |   6 ++--
 27 files changed, 117 insertions(+), 79 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/74d0adce/dev-support/design-docs/Coprocessor_Design_Improvements-Use_composition_instead_of_inheritance-HBASE-17732-2017_09_27.pdf
--
diff --git 
a/dev-support/design-docs/Coprocessor_Design_Improvements-Use_composition_instead_of_inheritance-HBASE-17732-2017_09_27.pdf
 
b/dev-support/design-docs/Coprocessor_Design_Improvements-Use_composition_instead_of_inheritance-HBASE-17732-2017_09_27.pdf
new file mode 100644
index 000..30a6d54
Binary files /dev/null and 
b/dev-support/design-docs/Coprocessor_Design_Improvements-Use_composition_instead_of_inheritance-HBASE-17732-2017_09_27.pdf
 differ

http://git-wip-us.apache.org/repos/asf/hbase/blob/74d0adce/hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java
index 38fe74e..c4003ae 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java
@@ -20,14 +20,42 @@
 package org.apache.hadoop.hbase;
 
 import java.io.IOException;
-import java.util.Optional;
+import java.util.Collections;
 
 import com.google.protobuf.Service;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.yetus.audience.InterfaceStability;
 
 /**
- * Coprocessor interface.
+ * Base interface for the 4 coprocessors - MasterCoprocessor, 
RegionCoprocessor,
+ * RegionServerCoprocessor, and WALCoprocessor.
+ * Do NOT implement this interface directly. Unless an implementation 
implements one (or more) of
+ * the above mentioned 4 coprocessors, it'll fail to be loaded by any 
coprocessor host.
+ *
+ * Example:
+ * Building a coprocessor to observer Master operations.
+ * 
+ * class MyMasterCoprocessor implements MasterCoprocessor {
+ *   Override
+ *   public OptionalMasterObserver> getMasterObserver() {
+ * return new MyMasterObserver();
+ *   }
+ * }
+ *
+ * class MyMasterObserver implements MasterObserver {
+ *   
+ * }
+ * 
+ *
+ * 

hbase git commit: HBASE-18887 After full backup passed on hdfs root and incremental failed, full backup cannot be cleaned (Vladimir Rodionov)

2017-09-28 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/branch-2 6693f45fa -> a71f62de0


HBASE-18887 After full backup passed on hdfs root and incremental failed, full 
backup cannot be cleaned (Vladimir Rodionov)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a71f62de
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a71f62de
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a71f62de

Branch: refs/heads/branch-2
Commit: a71f62de025d0c494543cbe304f2a6c9bc3697cf
Parents: 6693f45
Author: tedyu 
Authored: Thu Sep 28 10:20:48 2017 -0700
Committer: tedyu 
Committed: Thu Sep 28 10:20:48 2017 -0700

--
 .../hadoop/hbase/backup/impl/BackupCommands.java  | 18 +++---
 .../hadoop/hbase/backup/TestBackupBase.java   | 12 
 .../hbase/backup/TestBackupCommandLineTool.java   | 12 +++-
 3 files changed, 34 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a71f62de/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
--
diff --git 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
index 2dfd46e..194d350 100644
--- 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
+++ 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
@@ -58,11 +58,11 @@ import 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.BackupCommand;
 import org.apache.hadoop.hbase.backup.BackupType;
 import org.apache.hadoop.hbase.backup.util.BackupSet;
 import org.apache.hadoop.hbase.backup.util.BackupUtils;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.yetus.audience.InterfaceAudience;
 
 /**
  * General backup commands, options and usage messages
@@ -73,6 +73,9 @@ public final class BackupCommands {
 
   public final static String INCORRECT_USAGE = "Incorrect usage";
 
+  public final static String TOP_LEVEL_NOT_ALLOWED =
+  "Top level (root) folder is not allowed to be a backup destination";
+
   public static final String USAGE = "Usage: hbase backup COMMAND 
[command-specific arguments]\n"
   + "where COMMAND is one of:\n" + "  create create a new backup 
image\n"
   + "  delete delete an existing backup image\n"
@@ -283,7 +286,11 @@ public final class BackupCommands {
 printUsage();
 throw new IOException(INCORRECT_USAGE);
   }
-
+  String targetBackupDir = args[2];
+  // Check if backup destination is top level (root) folder - not allowed
+  if (isRootFolder(targetBackupDir)) {
+throw new IOException(TOP_LEVEL_NOT_ALLOWED);
+  }
   String tables = null;
 
   // Check if we have both: backup set and list of tables
@@ -331,7 +338,7 @@ public final class BackupCommands {
 .withBackupType(BackupType.valueOf(args[1].toUpperCase()))
 .withTableList(
   tables != null ? 
Lists.newArrayList(BackupUtils.parseTableNames(tables)) : null)
-.withTargetRootDir(args[2]).withTotalTasks(workers)
+.withTargetRootDir(targetBackupDir).withTotalTasks(workers)
 
.withBandwidthPerTasks(bandwidth).withBackupSetName(setName).build();
 String backupId = admin.backupTables(request);
 System.out.println("Backup session " + backupId + " finished. Status: 
SUCCESS");
@@ -341,6 +348,11 @@ public final class BackupCommands {
   }
 }
 
+private boolean isRootFolder(String targetBackupDir) {
+  Path p = new Path(targetBackupDir);
+  return p.isRoot();
+}
+
 private boolean verifyPath(String path) {
   try {
 Path p = new Path(path);

http://git-wip-us.apache.org/repos/asf/hbase/blob/a71f62de/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
--
diff --git 
a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java 
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
index 8752ca2..69db342 100644
--- 
a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
+++ 
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
@@ -98,8 +98,8 @@ public class TestBackupBase {
   protected static final byte[] qualName = 

hbase git commit: HBASE-18887 After full backup passed on hdfs root and incremental failed, full backup cannot be cleaned (Vladimir Rodionov)

2017-09-28 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/master a11a35a11 -> ca2959824


HBASE-18887 After full backup passed on hdfs root and incremental failed, full 
backup cannot be cleaned (Vladimir Rodionov)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ca295982
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ca295982
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ca295982

Branch: refs/heads/master
Commit: ca2959824ddf206143b819ae85bb29b6b60faa3b
Parents: a11a35a
Author: tedyu 
Authored: Thu Sep 28 10:20:02 2017 -0700
Committer: tedyu 
Committed: Thu Sep 28 10:20:02 2017 -0700

--
 .../hadoop/hbase/backup/impl/BackupCommands.java  | 18 +++---
 .../hadoop/hbase/backup/TestBackupBase.java   | 12 
 .../hbase/backup/TestBackupCommandLineTool.java   | 12 +++-
 3 files changed, 34 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ca295982/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
--
diff --git 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
index 2dfd46e..194d350 100644
--- 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
+++ 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
@@ -58,11 +58,11 @@ import 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.BackupCommand;
 import org.apache.hadoop.hbase.backup.BackupType;
 import org.apache.hadoop.hbase.backup.util.BackupSet;
 import org.apache.hadoop.hbase.backup.util.BackupUtils;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.yetus.audience.InterfaceAudience;
 
 /**
  * General backup commands, options and usage messages
@@ -73,6 +73,9 @@ public final class BackupCommands {
 
   public final static String INCORRECT_USAGE = "Incorrect usage";
 
+  public final static String TOP_LEVEL_NOT_ALLOWED =
+  "Top level (root) folder is not allowed to be a backup destination";
+
   public static final String USAGE = "Usage: hbase backup COMMAND 
[command-specific arguments]\n"
   + "where COMMAND is one of:\n" + "  create create a new backup 
image\n"
   + "  delete delete an existing backup image\n"
@@ -283,7 +286,11 @@ public final class BackupCommands {
 printUsage();
 throw new IOException(INCORRECT_USAGE);
   }
-
+  String targetBackupDir = args[2];
+  // Check if backup destination is top level (root) folder - not allowed
+  if (isRootFolder(targetBackupDir)) {
+throw new IOException(TOP_LEVEL_NOT_ALLOWED);
+  }
   String tables = null;
 
   // Check if we have both: backup set and list of tables
@@ -331,7 +338,7 @@ public final class BackupCommands {
 .withBackupType(BackupType.valueOf(args[1].toUpperCase()))
 .withTableList(
   tables != null ? 
Lists.newArrayList(BackupUtils.parseTableNames(tables)) : null)
-.withTargetRootDir(args[2]).withTotalTasks(workers)
+.withTargetRootDir(targetBackupDir).withTotalTasks(workers)
 
.withBandwidthPerTasks(bandwidth).withBackupSetName(setName).build();
 String backupId = admin.backupTables(request);
 System.out.println("Backup session " + backupId + " finished. Status: 
SUCCESS");
@@ -341,6 +348,11 @@ public final class BackupCommands {
   }
 }
 
+private boolean isRootFolder(String targetBackupDir) {
+  Path p = new Path(targetBackupDir);
+  return p.isRoot();
+}
+
 private boolean verifyPath(String path) {
   try {
 Path p = new Path(path);

http://git-wip-us.apache.org/repos/asf/hbase/blob/ca295982/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
--
diff --git 
a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java 
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
index 8752ca2..69db342 100644
--- 
a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
+++ 
b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
@@ -98,8 +98,8 @@ public class TestBackupBase {
   protected static final byte[] qualName = 

[25/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/devapidocs/org/apache/hadoop/hbase/CoprocessorEnvironment.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/CoprocessorEnvironment.html 
b/devapidocs/org/apache/hadoop/hbase/CoprocessorEnvironment.html
index 4463954..31a5cac 100644
--- a/devapidocs/org/apache/hadoop/hbase/CoprocessorEnvironment.html
+++ b/devapidocs/org/apache/hadoop/hbase/CoprocessorEnvironment.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = {"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":6,"i7":6,"i8":6};
+var methods = 
{"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":6,"i7":6,"i8":6,"i9":6,"i10":6};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -93,7 +93,7 @@ var activeTableTab = "activeTableTab";
 
 
 org.apache.hadoop.hbase
-Interface 
CoprocessorEnvironment
+Interface 
CoprocessorEnvironmentC extends Coprocessor
 
 
 
@@ -105,12 +105,12 @@ var activeTableTab = "activeTableTab";
 
 
 All Known Implementing Classes:
-CoprocessorHost.Environment, MasterCoprocessorHost.MasterEnvironment, RegionCoprocessorHost.RegionEnvironment,
 RegionServerCoprocessorHost.RegionServerEnvironment,
 WALCoprocessorHost.WALEnvironment
+BaseEnvironment, MasterCoprocessorHost.MasterEnvironment, RegionCoprocessorHost.RegionEnvironment,
 RegionServerCoprocessorHost.RegionServerEnvironment,
 WALCoprocessorHost.WALEnvironment
 
 
 
 @InterfaceAudience.Private
-public interface CoprocessorEnvironment
+public interface CoprocessorEnvironmentC
 extends Coprocessor
 Coprocessor environment state.
 
 
@@ -143,7 +143,7 @@ public interface getHBaseVersion()
 
 
-Coprocessor
+C
 getInstance()
 
 
@@ -167,6 +167,19 @@ public interface int
 getVersion()
 
+
+void
+shutdown()
+Clean up the environment.
+
+
+
+void
+startup()
+After a coprocessor has been loaded in an encapsulation of 
an environment, CoprocessorHost
+ calls this function to initialize the environment.
+
+
 
 
 
@@ -214,7 +227,7 @@ public interface 
 
 getInstance
-CoprocessorgetInstance()
+CgetInstance()
 
 Returns:
 the loaded coprocessor instance
@@ -297,7 +310,7 @@ public interface 
 
 
-
+
 
 getClassLoader
 http://docs.oracle.com/javase/8/docs/api/java/lang/ClassLoader.html?is-external=true;
 title="class or interface in java.lang">ClassLoadergetClassLoader()
@@ -307,6 +320,32 @@ public interface 
+
+
+
+
+startup
+voidstartup()
+  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
+After a coprocessor has been loaded in an encapsulation of 
an environment, CoprocessorHost
+ calls this function to initialize the environment.
+
+Throws:
+http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
+
+
+
+
+
+
+
+
+shutdown
+voidshutdown()
+Clean up the environment. Called by CoprocessorHost when it 
itself is shutting down.
+
+
 
 
 



[15/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/devapidocs/org/apache/hadoop/hbase/class-use/HRegionLocation.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/HRegionLocation.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/HRegionLocation.html
index 75703e3..90760f3 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/HRegionLocation.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/HRegionLocation.html
@@ -159,8 +159,8 @@ service.
 
 
 static HRegionLocation
-MetaTableAccessor.getRegionLocation(Connectionconnection,
- HRegionInforegionInfo)
+MetaTableAccessor.getRegionLocation(Connectionconnection,
+ RegionInforegionInfo)
 Returns the HRegionLocation from meta for the given 
region
 
 
@@ -170,8 +170,8 @@ service.
 
 
 private static HRegionLocation
-AsyncMetaTableAccessor.getRegionLocation(Resultr,
- HRegionInforegionInfo,
+AsyncMetaTableAccessor.getRegionLocation(Resultr,
+ RegionInforegionInfo,
  intreplicaId)
 Returns the HRegionLocation parsed from the given meta row 
Result
  for the given regionInfo and replicaId.
@@ -179,8 +179,8 @@ service.
 
 
 private static HRegionLocation
-MetaTableAccessor.getRegionLocation(Resultr,
- HRegionInforegionInfo,
+MetaTableAccessor.getRegionLocation(Resultr,
+ RegionInforegionInfo,
  intreplicaId)
 Returns the HRegionLocation parsed from the given meta row 
Result
  for the given regionInfo and replicaId.
@@ -1116,11 +1116,11 @@ service.
 
 
 
-RegionReplicaReplayCallable(ClusterConnectionconnection,
+RegionReplicaReplayCallable(ClusterConnectionconnection,
RpcControllerFactoryrpcControllerFactory,
TableNametableName,
HRegionLocationlocation,
-   HRegionInforegionInfo,
+   RegionInforegionInfo,
byte[]row,
http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListWAL.Entryentries,
http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
 title="class or interface in 
java.util.concurrent.atomic">AtomicLongskippedEntries)

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/devapidocs/org/apache/hadoop/hbase/class-use/NotAllMetaRegionsOnlineException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/class-use/NotAllMetaRegionsOnlineException.html
 
b/devapidocs/org/apache/hadoop/hbase/class-use/NotAllMetaRegionsOnlineException.html
index f3a66bc..c90ac47 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/class-use/NotAllMetaRegionsOnlineException.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/class-use/NotAllMetaRegionsOnlineException.html
@@ -108,8 +108,8 @@
 
 
 static void
-MetaTableAccessor.addDaughter(Connectionconnection,
-   HRegionInforegionInfo,
+MetaTableAccessor.addDaughter(Connectionconnection,
+   RegionInforegionInfo,
ServerNamesn,
longopenSeqNum)
 Adds a daughter region entry to meta.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/devapidocs/org/apache/hadoop/hbase/class-use/PleaseHoldException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/class-use/PleaseHoldException.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/PleaseHoldException.html
index eaa3968..17e2ece 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/PleaseHoldException.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/PleaseHoldException.html
@@ -126,7 +126,7 @@
 
 
 private void
-AssignmentManager.checkFailoverCleanupCompleted(HRegionInfohri)
+AssignmentManager.checkFailoverCleanupCompleted(RegionInfohri)
 Used to check if the failover cleanup is done.
 
 
@@ -136,9 +136,9 @@
 
 
 private void
-AssignmentManager.updateRegionTransition(ServerNameserverName,
+AssignmentManager.updateRegionTransition(ServerNameserverName,
   
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCodestate,
-  HRegionInforegionInfo,
+  RegionInforegionInfo,
   longseqId)
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/devapidocs/org/apache/hadoop/hbase/class-use/Server.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/Server.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/Server.html
index 6fa70e3..8b52311 100644
--- 

[29/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/checkstyle.rss
--
diff --git a/checkstyle.rss b/checkstyle.rss
index da21a01..0dcc4f4 100644
--- a/checkstyle.rss
+++ b/checkstyle.rss
@@ -25,8 +25,8 @@ under the License.
 en-us
 2007 - 2017 The Apache Software Foundation
 
-  File: 2042,
- Errors: 14018,
+  File: 2051,
+ Errors: 13704,
  Warnings: 0,
  Infos: 0
   
@@ -69,7 +69,7 @@ under the License.
   0
 
 
-  2
+  1
 
   
   
@@ -279,7 +279,7 @@ under the License.
   0
 
 
-  17
+  18
 
   
   
@@ -377,7 +377,7 @@ under the License.
   0
 
 
-  27
+  13
 
   
   
@@ -601,7 +601,7 @@ under the License.
   0
 
 
-  2
+  1
 
   
   
@@ -671,7 +671,7 @@ under the License.
   0
 
 
-  5
+  1
 
   
   
@@ -993,7 +993,7 @@ under the License.
   0
 
 
-  63
+  59
 
   
   
@@ -1175,7 +1175,7 @@ under the License.
   0
 
 
-  5
+  4
 
   
   
@@ -1217,7 +1217,7 @@ under the License.
   0
 
 
-  10
+  14
 
   
   
@@ -1628,6 +1628,20 @@ under the License.
   
   
 
+  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.regionserver.ImmutableOnlineRegions.java;>org/apache/hadoop/hbase/regionserver/ImmutableOnlineRegions.java
+
+
+  0
+
+
+  0
+
+
+  6
+
+  
+  
+
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.types.RawBytesFixedLength.java;>org/apache/hadoop/hbase/types/RawBytesFixedLength.java
 
 
@@ -2127,7 +2141,7 @@ under the License.
   0
 
 
-  3
+  1
 
   
   
@@ -2267,7 +2281,7 @@ under the License.
   0
 
 
-  2
+  1
 
   
   
@@ -3121,7 +3135,7 @@ under the License.
   0
 
 
-  8
+  9
 
   
   
@@ -3289,7 +3303,7 @@ under the License.
   0
 
 
-  23
+  21
 
   
   
@@ -3863,7 +3877,7 @@ under the License.
   0
 
 
-  1
+  2
 
   
   
@@ -3947,7 +3961,7 @@ under the License.
   0
 
 
-  19
+  6
 
   
   
@@ -3975,7 +3989,7 @@ under the License.
   0
 
 
-  13
+  11
 
   
   
@@ -4507,7 +4521,7 @@ under the License.
   0
 
 
-  63
+  62
 
   
   
@@ -4521,7 +4535,7 @@ under the License.
   0
 
 
-  16
+  13
 
   
   
@@ -4857,7 +4871,7 @@ under the License.
   0
 
 
-  7
+  8
 
   
   
@@ -5011,7 +5025,7 @@ under the License.
   0
 
  

[26/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/devapidocs/org/apache/hadoop/hbase/Abortable.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/Abortable.html 
b/devapidocs/org/apache/hadoop/hbase/Abortable.html
index 4eafb2b..8b9355f 100644
--- a/devapidocs/org/apache/hadoop/hbase/Abortable.html
+++ b/devapidocs/org/apache/hadoop/hbase/Abortable.html
@@ -101,7 +101,7 @@ var activeTableTab = "activeTableTab";
 
 
 All Known Subinterfaces:
-Admin, ClusterConnection, Connection, MasterServices, OnlineRegions, RegionServerServices, Server
 
+Admin, ClusterConnection, Connection, MasterServices, RegionServerServices, Server
 
 
 All Known Implementing Classes:
@@ -109,9 +109,8 @@ var activeTableTab = "activeTableTab";
 
 
 
-@InterfaceAudience.LimitedPrivate(value={"Coprocesssor","Phoenix"})
- @InterfaceStability.Evolving
-public interface Abortable
+@InterfaceAudience.Private
+public interface Abortable
 Interface to support the aborting of a given server or 
client.
  
  This is used primarily for ZooKeeper usage when we could get an unexpected
@@ -170,7 +169,7 @@ public interface 
 
 abort
-voidabort(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringwhy,
+voidabort(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringwhy,
http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in java.lang">Throwablee)
 Abort the server or client.
 
@@ -186,7 +185,7 @@ public interface 
 
 isAborted
-booleanisAborted()
+booleanisAborted()
 Check if the server or client was aborted.
 
 Returns:

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/devapidocs/org/apache/hadoop/hbase/AsyncMetaTableAccessor.MetaTableRawScanResultConsumer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/AsyncMetaTableAccessor.MetaTableRawScanResultConsumer.html
 
b/devapidocs/org/apache/hadoop/hbase/AsyncMetaTableAccessor.MetaTableRawScanResultConsumer.html
index 9f5c436..2316dd0 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/AsyncMetaTableAccessor.MetaTableRawScanResultConsumer.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/AsyncMetaTableAccessor.MetaTableRawScanResultConsumer.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private static final class AsyncMetaTableAccessor.MetaTableRawScanResultConsumer
+private static final class AsyncMetaTableAccessor.MetaTableRawScanResultConsumer
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements RawScanResultConsumer
 
@@ -257,7 +257,7 @@ implements 
 
 currentRowCount
-privateint currentRowCount
+privateint currentRowCount
 
 
 
@@ -266,7 +266,7 @@ implements 
 
 rowUpperLimit
-private finalint rowUpperLimit
+private finalint rowUpperLimit
 
 
 
@@ -275,7 +275,7 @@ implements 
 
 visitor
-private finalMetaTableAccessor.Visitor visitor
+private finalMetaTableAccessor.Visitor visitor
 
 
 
@@ -284,7 +284,7 @@ implements 
 
 future
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void future
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void future
 
 
 
@@ -301,7 +301,7 @@ implements 
 
 MetaTableRawScanResultConsumer
-MetaTableRawScanResultConsumer(introwUpperLimit,
+MetaTableRawScanResultConsumer(introwUpperLimit,
MetaTableAccessor.Visitorvisitor,
http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Voidfuture)
 
@@ -320,7 +320,7 @@ implements 
 
 onError
-publicvoidonError(http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in java.lang">Throwableerror)
+publicvoidonError(http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in java.lang">Throwableerror)
 Description copied from 
interface:RawScanResultConsumer
 Indicate that we hit an unrecoverable error and the scan 

[33/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/apidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.html 
b/apidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.html
index bb89c8d..87257da 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.html
@@ -27,9 +27,9 @@
 019package 
org.apache.hadoop.hbase.snapshot;
 020
 021import java.io.BufferedInputStream;
-022import java.io.FileNotFoundException;
-023import java.io.DataInput;
-024import java.io.DataOutput;
+022import java.io.DataInput;
+023import java.io.DataOutput;
+024import java.io.FileNotFoundException;
 025import java.io.IOException;
 026import java.io.InputStream;
 027import java.util.ArrayList;
@@ -42,1079 +42,1080 @@
 034import org.apache.commons.cli.Option;
 035import org.apache.commons.logging.Log;
 036import 
org.apache.commons.logging.LogFactory;
-037import 
org.apache.yetus.audience.InterfaceAudience;
-038import 
org.apache.hadoop.conf.Configuration;
-039import 
org.apache.hadoop.fs.FSDataInputStream;
-040import 
org.apache.hadoop.fs.FSDataOutputStream;
-041import 
org.apache.hadoop.fs.FileChecksum;
-042import org.apache.hadoop.fs.FileStatus;
-043import org.apache.hadoop.fs.FileSystem;
-044import org.apache.hadoop.fs.FileUtil;
-045import org.apache.hadoop.fs.Path;
-046import 
org.apache.hadoop.fs.permission.FsPermission;
-047import 
org.apache.hadoop.hbase.TableName;
-048import 
org.apache.hadoop.hbase.HBaseConfiguration;
-049import 
org.apache.hadoop.hbase.HConstants;
-050import 
org.apache.hadoop.hbase.HRegionInfo;
-051import 
org.apache.hadoop.hbase.io.FileLink;
-052import 
org.apache.hadoop.hbase.io.HFileLink;
-053import 
org.apache.hadoop.hbase.io.WALLink;
+037import 
org.apache.hadoop.conf.Configuration;
+038import 
org.apache.hadoop.fs.FSDataInputStream;
+039import 
org.apache.hadoop.fs.FSDataOutputStream;
+040import 
org.apache.hadoop.fs.FileChecksum;
+041import org.apache.hadoop.fs.FileStatus;
+042import org.apache.hadoop.fs.FileSystem;
+043import org.apache.hadoop.fs.FileUtil;
+044import org.apache.hadoop.fs.Path;
+045import 
org.apache.hadoop.fs.permission.FsPermission;
+046import 
org.apache.hadoop.hbase.HBaseConfiguration;
+047import 
org.apache.hadoop.hbase.HConstants;
+048import 
org.apache.hadoop.hbase.TableName;
+049import 
org.apache.hadoop.hbase.client.RegionInfo;
+050import 
org.apache.hadoop.hbase.io.FileLink;
+051import 
org.apache.hadoop.hbase.io.HFileLink;
+052import 
org.apache.hadoop.hbase.io.WALLink;
+053import 
org.apache.hadoop.hbase.io.hadoopbackport.ThrottledInputStream;
 054import 
org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
 055import 
org.apache.hadoop.hbase.mob.MobUtils;
-056import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
-057import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotFileInfo;
-058import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
-059import 
org.apache.hadoop.hbase.util.AbstractHBaseTool;
-060import 
org.apache.hadoop.hbase.util.FSUtils;
-061import 
org.apache.hadoop.hbase.util.HFileArchiveUtil;
-062import 
org.apache.hadoop.hbase.util.Pair;
-063import 
org.apache.hadoop.io.BytesWritable;
-064import org.apache.hadoop.io.IOUtils;
-065import 
org.apache.hadoop.io.NullWritable;
-066import org.apache.hadoop.io.Writable;
-067import org.apache.hadoop.mapreduce.Job;
-068import 
org.apache.hadoop.mapreduce.JobContext;
-069import 
org.apache.hadoop.mapreduce.Mapper;
-070import 
org.apache.hadoop.mapreduce.InputFormat;
-071import 
org.apache.hadoop.mapreduce.InputSplit;
-072import 
org.apache.hadoop.mapreduce.RecordReader;
-073import 
org.apache.hadoop.mapreduce.TaskAttemptContext;
-074import 
org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
-075import 
org.apache.hadoop.mapreduce.security.TokenCache;
-076import 
org.apache.hadoop.hbase.io.hadoopbackport.ThrottledInputStream;
-077import 
org.apache.hadoop.util.StringUtils;
-078import org.apache.hadoop.util.Tool;
-079
-080/**
-081 * Export the specified snapshot to a 
given FileSystem.
-082 *
-083 * The .snapshot/name folder is copied to 
the destination cluster
-084 * and then all the hfiles/wals are 
copied using a Map-Reduce Job in the .archive/ location.
-085 * When everything is done, the second 
cluster can restore the snapshot.
-086 */
-087@InterfaceAudience.Public
-088public class ExportSnapshot extends 
AbstractHBaseTool implements Tool {
-089  public static final String NAME = 
"exportsnapshot";
-090  /** Configuration prefix for overrides 
for the source filesystem */
-091  public static final String 
CONF_SOURCE_PREFIX = NAME + ".from.";
-092  /** Configuration prefix for overrides 
for the destination filesystem */

[50/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/acid-semantics.html
--
diff --git a/acid-semantics.html b/acid-semantics.html
index eb7ce06..94d28a6 100644
--- a/acid-semantics.html
+++ b/acid-semantics.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase   
   Apache HBase (TM) ACID Properties
@@ -606,7 +606,7 @@ under the License. -->
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-09-26
+  Last Published: 
2017-09-28
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/apache_hbase_reference_guide.pdf
--
diff --git a/apache_hbase_reference_guide.pdf b/apache_hbase_reference_guide.pdf
index 597b627..57f0d04 100644
--- a/apache_hbase_reference_guide.pdf
+++ b/apache_hbase_reference_guide.pdf
@@ -5,8 +5,8 @@
 /Author (Apache HBase Team)
 /Creator (Asciidoctor PDF 1.5.0.alpha.15, based on Prawn 2.2.2)
 /Producer (Apache HBase Team)
-/ModDate (D:20170926144645+00'00')
-/CreationDate (D:20170926144645+00'00')
+/ModDate (D:20170928144637+00'00')
+/CreationDate (D:20170928144637+00'00')
 >>
 endobj
 2 0 obj

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/apidocs/deprecated-list.html
--
diff --git a/apidocs/deprecated-list.html b/apidocs/deprecated-list.html
index 48ea1c6..efd46ff 100644
--- a/apidocs/deprecated-list.html
+++ b/apidocs/deprecated-list.html
@@ -392,7 +392,7 @@
 
 org.apache.hadoop.hbase.HRegionInfo.convert(HRegionInfo)
 As of release 2.0.0, this 
will be removed in HBase 3.0.0
- Use toProtoRegionInfo(org.apache.hadoop.hbase.client.RegionInfo)
+ Use toRegionInfo(org.apache.hadoop.hbase.client.RegionInfo)
  in 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.
 
 
@@ -725,480 +725,485 @@
 
 
 
+org.apache.hadoop.hbase.HRegionLocation.getRegionInfo()
+Since 2.0.0. Will remove 
in 3.0.0. Use HRegionLocation.getRegion()}
 instead.
+
+
+
 org.apache.hadoop.hbase.HRegionInfo.getRegionNameAsStringForDisplay(HRegionInfo,
 Configuration)
 As of release 2.0.0, this 
will be removed in HBase 3.0.0
  Use RegionInfoDisplay#getRegionNameAsStringForDisplay(RegionInfo, 
Configuration)
  over in hbase-server module.
 
 
-
+
 org.apache.hadoop.hbase.HRegionInfo.getRegionNameForDisplay(HRegionInfo,
 Configuration)
 As of release 2.0.0, this 
will be removed in HBase 3.0.0
  Use RegionInfoDisplay#getRegionNameForDisplay(RegionInfo, 
Configuration)
  over in hbase-server module.
 
 
-
+
 org.apache.hadoop.hbase.TableName.getRowComparator()
 The comparator is an 
internal property of the table. Should
  not have been exposed here
 
 
-
+
 org.apache.hadoop.hbase.client.Table.getRpcTimeout()
 Use getReadRpcTimeout or 
getWriteRpcTimeout instead
 
 
-
+
 org.apache.hadoop.hbase.rest.client.RemoteHTable.getRpcTimeout()
 
-
+
 org.apache.hadoop.hbase.client.Scan.getScanMetrics()
 Use ResultScanner.getScanMetrics()
 instead. And notice that, please do not
  use this method and ResultScanner.getScanMetrics()
 together, the metrics
  will be messed up.
 
 
-
+
 org.apache.hadoop.hbase.ServerName.getServerName(String,
 long)
 Since 2.0. Use ServerName.valueOf(String,
 long) instead.
 
 
-
+
 org.apache.hadoop.hbase.ServerName.getServerNameLessStartCode(String)
 Since 2.0. Use ServerName.getAddress()
 
 
-
+
 org.apache.hadoop.hbase.ServerName.getServerStartcodeFromServerName(String)
 Since 2.0. Use instance of 
ServerName to pull out start code.
 
 
-
+
 org.apache.hadoop.hbase.HRegionInfo.getShortNameToLog(HRegionInfo...)
 As of release 2.0.0, this 
will be removed in HBase 3.0.0
  Use RegionInfo.getShortNameToLog(RegionInfo...).
 
 
-
+
 org.apache.hadoop.hbase.HRegionInfo.getShortNameToLog(ListHRegionInfo)
 As of release 2.0.0, this 
will be removed in HBase 3.0.0
  Use RegionInfo.getShortNameToLog(List))}.
 
 
-
+
 org.apache.hadoop.hbase.util.Bytes.getSize()
 use Bytes.getLength()
 instead
 
 
-
+
 org.apache.hadoop.hbase.io.ImmutableBytesWritable.getSize()
 use ImmutableBytesWritable.getLength()
 instead
 
 
-
+
 org.apache.hadoop.hbase.HRegionInfo.getStartKey(byte[])
 As of release 2.0.0, this 
will be removed in HBase 3.0.0
  Use RegionInfo.getStartKey(byte[]).
 
 
-
+
 org.apache.hadoop.hbase.HRegionInfo.getStartKeyForDisplay(HRegionInfo,
 Configuration)
 As of release 2.0.0, this 
will be removed in HBase 3.0.0
  Use RegionInfoDisplay#getStartKeyForDisplay(RegionInfo, 
Configuration)
  over in hbase-server module.
 
 
-
+
 org.apache.hadoop.hbase.ServerLoad.getStorefileIndexSizeInMB()
 As of 

[41/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/apidocs/src-html/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html
 
b/apidocs/src-html/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html
index 15e99d2..6e8b661 100644
--- 
a/apidocs/src-html/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html
+++ 
b/apidocs/src-html/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html
@@ -591,804 +591,809 @@
 583return this;
 584  }
 585
-586  /**
-587   * An ModifyableFamilyDescriptor 
contains information about a column family such as the
-588   * number of versions, compression 
settings, etc.
-589   *
-590   * It is used as input when creating a 
table or adding a column.
-591   * TODO: make this package-private 
after removing the HColumnDescriptor
-592   */
-593  @InterfaceAudience.Private
-594  public static class 
ModifyableColumnFamilyDescriptor
-595  implements ColumnFamilyDescriptor, 
ComparableModifyableColumnFamilyDescriptor {
-596
-597// Column family name
-598private final byte[] name;
-599
-600// Column metadata
-601private final MapBytes, Bytes 
values = new HashMap();
-602
-603/**
-604 * A map which holds the 
configuration specific to the column family. The
-605 * keys of the map have the same 
names as config keys and override the
-606 * defaults with cf-specific 
settings. Example usage may be for compactions,
-607 * etc.
-608 */
-609private final MapString, 
String configuration = new HashMap();
-610
-611/**
-612 * Construct a column descriptor 
specifying only the family name The other
-613 * attributes are defaulted.
-614 *
-615 * @param name Column family name. 
Must be 'printable' -- digit or
-616 * letter -- and may not contain a 
code:/code
-617 * TODO: make this private after the 
HCD is removed.
-618 */
-619@InterfaceAudience.Private
-620public 
ModifyableColumnFamilyDescriptor(final byte[] name) {
-621  this(isLegalColumnFamilyName(name), 
getDefaultValuesBytes(), Collections.emptyMap());
-622}
-623
-624/**
-625 * Constructor. Makes a deep copy of 
the supplied descriptor.
-626 * TODO: make this private after the 
HCD is removed.
-627 * @param desc The descriptor.
-628 */
-629@InterfaceAudience.Private
-630public 
ModifyableColumnFamilyDescriptor(ColumnFamilyDescriptor desc) {
-631  this(desc.getName(), 
desc.getValues(), desc.getConfiguration());
-632}
-633
-634private 
ModifyableColumnFamilyDescriptor(byte[] name, MapBytes, Bytes values, 
MapString, String config) {
-635  this.name = name;
-636  this.values.putAll(values);
-637  
this.configuration.putAll(config);
-638}
-639
-640@Override
-641public byte[] getName() {
-642  return Bytes.copy(name);
+586  public ColumnFamilyDescriptorBuilder 
setValue(final String key, final String value) {
+587desc.setValue(key, value);
+588return this;
+589  }
+590
+591  /**
+592   * An ModifyableFamilyDescriptor 
contains information about a column family such as the
+593   * number of versions, compression 
settings, etc.
+594   *
+595   * It is used as input when creating a 
table or adding a column.
+596   * TODO: make this package-private 
after removing the HColumnDescriptor
+597   */
+598  @InterfaceAudience.Private
+599  public static class 
ModifyableColumnFamilyDescriptor
+600  implements ColumnFamilyDescriptor, 
ComparableModifyableColumnFamilyDescriptor {
+601
+602// Column family name
+603private final byte[] name;
+604
+605// Column metadata
+606private final MapBytes, Bytes 
values = new HashMap();
+607
+608/**
+609 * A map which holds the 
configuration specific to the column family. The
+610 * keys of the map have the same 
names as config keys and override the
+611 * defaults with cf-specific 
settings. Example usage may be for compactions,
+612 * etc.
+613 */
+614private final MapString, 
String configuration = new HashMap();
+615
+616/**
+617 * Construct a column descriptor 
specifying only the family name The other
+618 * attributes are defaulted.
+619 *
+620 * @param name Column family name. 
Must be 'printable' -- digit or
+621 * letter -- and may not contain a 
code:/code
+622 * TODO: make this private after the 
HCD is removed.
+623 */
+624@InterfaceAudience.Private
+625public 
ModifyableColumnFamilyDescriptor(final byte[] name) {
+626  this(isLegalColumnFamilyName(name), 
getDefaultValuesBytes(), Collections.emptyMap());
+627}
+628
+629/**
+630 * Constructor. Makes a deep copy of 
the supplied descriptor.
+631 * TODO: make this private after the 
HCD is removed.
+632 * @param desc The descriptor.
+633 */

[11/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/devapidocs/org/apache/hadoop/hbase/class-use/ZooKeeperConnectionException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/class-use/ZooKeeperConnectionException.html
 
b/devapidocs/org/apache/hadoop/hbase/class-use/ZooKeeperConnectionException.html
index cefa1c3..daab2c9 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/class-use/ZooKeeperConnectionException.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/class-use/ZooKeeperConnectionException.html
@@ -221,8 +221,8 @@
 
 
 private static void
-HBaseFsckRepair.forceOfflineInZK(Adminadmin,
-HRegionInforegion)
+HBaseFsckRepair.forceOfflineInZK(Adminadmin,
+RegionInforegion)
 In 0.90, this forces an HRI offline by setting the 
RegionTransitionData
  in ZK to have HBCK_CODE_NAME as the server.
 



[21/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/devapidocs/org/apache/hadoop/hbase/MetaTableAccessor.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/MetaTableAccessor.html 
b/devapidocs/org/apache/hadoop/hbase/MetaTableAccessor.html
index f29f0a2..234a25c 100644
--- a/devapidocs/org/apache/hadoop/hbase/MetaTableAccessor.html
+++ b/devapidocs/org/apache/hadoop/hbase/MetaTableAccessor.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":9,"i1":9,"i2":9,"i3":9,"i4":9,"i5":9,"i6":9,"i7":9,"i8":9,"i9":9,"i10":9,"i11":9,"i12":41,"i13":9,"i14":9,"i15":9,"i16":9,"i17":9,"i18":9,"i19":9,"i20":9,"i21":9,"i22":9,"i23":9,"i24":9,"i25":9,"i26":9,"i27":9,"i28":9,"i29":9,"i30":9,"i31":9,"i32":9,"i33":9,"i34":9,"i35":9,"i36":9,"i37":9,"i38":41,"i39":9,"i40":9,"i41":9,"i42":9,"i43":9,"i44":9,"i45":9,"i46":9,"i47":9,"i48":9,"i49":9,"i50":9,"i51":41,"i52":9,"i53":9,"i54":9,"i55":9,"i56":9,"i57":9,"i58":9,"i59":9,"i60":9,"i61":9,"i62":9,"i63":9,"i64":9,"i65":9,"i66":9,"i67":9,"i68":9,"i69":9,"i70":9,"i71":9,"i72":9,"i73":9,"i74":9,"i75":9,"i76":9,"i77":9,"i78":9,"i79":9,"i80":9,"i81":9,"i82":9,"i83":9,"i84":9,"i85":9,"i86":9,"i87":9,"i88":9,"i89":9,"i90":9,"i91":9,"i92":9,"i93":9,"i94":9,"i95":9,"i96":9,"i97":9,"i98":9,"i99":9,"i100":9,"i101":9,"i102":9,"i103":9,"i104":9,"i105":9,"i106":9};
+var methods = 
{"i0":9,"i1":9,"i2":9,"i3":9,"i4":9,"i5":9,"i6":9,"i7":9,"i8":9,"i9":9,"i10":9,"i11":9,"i12":41,"i13":9,"i14":9,"i15":9,"i16":9,"i17":9,"i18":9,"i19":9,"i20":9,"i21":9,"i22":9,"i23":9,"i24":9,"i25":9,"i26":9,"i27":9,"i28":9,"i29":9,"i30":9,"i31":9,"i32":9,"i33":9,"i34":9,"i35":9,"i36":41,"i37":9,"i38":9,"i39":9,"i40":9,"i41":9,"i42":9,"i43":9,"i44":9,"i45":9,"i46":9,"i47":9,"i48":9,"i49":9,"i50":9,"i51":41,"i52":9,"i53":9,"i54":9,"i55":9,"i56":9,"i57":9,"i58":9,"i59":9,"i60":9,"i61":9,"i62":9,"i63":9,"i64":9,"i65":9,"i66":9,"i67":9,"i68":9,"i69":9,"i70":9,"i71":9,"i72":9,"i73":9,"i74":9,"i75":9,"i76":9,"i77":9,"i78":9,"i79":9,"i80":9,"i81":9,"i82":9,"i83":9,"i84":9,"i85":9,"i86":9,"i87":9,"i88":9,"i89":9,"i90":9,"i91":9,"i92":9,"i93":9,"i94":9,"i95":9,"i96":9,"i97":9,"i98":9,"i99":9,"i100":9,"i101":9,"i102":9,"i103":9,"i104":9,"i105":9,"i106":9};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],8:["t4","Concrete Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class MetaTableAccessor
+public class MetaTableAccessor
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Read/write operations on region and assignment information 
store in
  hbase:meta.
@@ -262,8 +262,8 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 static void
-addDaughter(Connectionconnection,
-   HRegionInforegionInfo,
+addDaughter(Connectionconnection,
+   RegionInforegionInfo,
ServerNamesn,
longopenSeqNum)
 Adds a daughter region entry to meta.
@@ -271,9 +271,9 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 static Put
-addDaughtersToPut(Putput,
- HRegionInfosplitA,
- HRegionInfosplitB)
+addDaughtersToPut(Putput,
+ RegionInfosplitA,
+ RegionInfosplitB)
 Adds split daughters to the Put
 
 
@@ -292,13 +292,13 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 static Put
-addRegionInfo(Putp,
- HRegionInfohri)
+addRegionInfo(Putp,
+ RegionInfohri)
 
 
 static void
 addRegionsToMeta(Connectionconnection,
-http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInforegionInfos,
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInforegionInfos,
 intregionReplication)
 Adds a hbase:meta row for each of the specified new 
regions.
 
@@ -306,7 +306,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 static void
 addRegionsToMeta(Connectionconnection,
-http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInforegionInfos,
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInforegionInfos,
 intregionReplication,
 longts)
 Adds a hbase:meta row for each of the specified new 
regions.
@@ -314,33 +314,33 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 static void
-addRegionToMeta(Connectionconnection,
-  

[31/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/apidocs/src-html/org/apache/hadoop/hbase/util/RegionMover.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/util/RegionMover.html 
b/apidocs/src-html/org/apache/hadoop/hbase/util/RegionMover.html
index 341b8f6..7cdce1e 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/util/RegionMover.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/util/RegionMover.html
@@ -51,27 +51,27 @@
 043import 
java.util.concurrent.TimeoutException;
 044
 045import 
org.apache.commons.cli.CommandLine;
-046import 
org.apache.hadoop.conf.Configuration;
-047import org.apache.commons.logging.Log;
-048import 
org.apache.commons.logging.LogFactory;
-049import 
org.apache.hadoop.hbase.HBaseConfiguration;
-050import 
org.apache.hadoop.hbase.HConstants;
-051import 
org.apache.hadoop.hbase.HRegionInfo;
+046import org.apache.commons.logging.Log;
+047import 
org.apache.commons.logging.LogFactory;
+048import 
org.apache.hadoop.conf.Configuration;
+049import 
org.apache.hadoop.hbase.ClusterStatus.Option;
+050import 
org.apache.hadoop.hbase.HBaseConfiguration;
+051import 
org.apache.hadoop.hbase.HConstants;
 052import 
org.apache.hadoop.hbase.ServerName;
 053import 
org.apache.hadoop.hbase.TableName;
-054import 
org.apache.yetus.audience.InterfaceAudience;
-055import 
org.apache.hadoop.hbase.ClusterStatus.Option;
-056import 
org.apache.hadoop.hbase.client.Admin;
-057import 
org.apache.hadoop.hbase.client.Connection;
-058import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-059import 
org.apache.hadoop.hbase.client.Get;
-060import 
org.apache.hadoop.hbase.client.Result;
-061import 
org.apache.hadoop.hbase.client.ResultScanner;
-062import 
org.apache.hadoop.hbase.client.Scan;
-063import 
org.apache.hadoop.hbase.client.Table;
-064import 
org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
-065import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-066import 
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+054import 
org.apache.hadoop.hbase.client.Admin;
+055import 
org.apache.hadoop.hbase.client.Connection;
+056import 
org.apache.hadoop.hbase.client.ConnectionFactory;
+057import 
org.apache.hadoop.hbase.client.Get;
+058import 
org.apache.hadoop.hbase.client.RegionInfo;
+059import 
org.apache.hadoop.hbase.client.Result;
+060import 
org.apache.hadoop.hbase.client.ResultScanner;
+061import 
org.apache.hadoop.hbase.client.Scan;
+062import 
org.apache.hadoop.hbase.client.Table;
+063import 
org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
+064import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
+065import 
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+066import 
org.apache.yetus.audience.InterfaceAudience;
 067
 068/**
 069 * Tool for loading/unloading regions 
to/from given regionserver This tool can be run from Command
@@ -261,7 +261,7 @@
 253public Boolean call() throws 
IOException {
 254  Connection conn = 
ConnectionFactory.createConnection(rm.conf);
 255  try {
-256ListHRegionInfo 
regionsToMove = readRegionsFromFile(rm.filename);
+256ListRegionInfo 
regionsToMove = readRegionsFromFile(rm.filename);
 257if (regionsToMove.isEmpty()) {
 258  LOG.info("No regions to 
load.Exiting");
 259  return true;
@@ -321,7 +321,7 @@
 313
 314  private class Unload implements 
CallableBoolean {
 315
-316ListHRegionInfo movedRegions 
= Collections.synchronizedList(new ArrayListHRegionInfo());
+316ListRegionInfo movedRegions = 
Collections.synchronizedList(new ArrayListRegionInfo());
 317private RegionMover rm;
 318
 319public Unload(RegionMover rm) {
@@ -374,9 +374,9 @@
 366  }
 367
 368  private void loadRegions(Admin admin, 
String hostname, int port,
-369  ListHRegionInfo 
regionsToMove, boolean ack) throws Exception {
+369  ListRegionInfo 
regionsToMove, boolean ack) throws Exception {
 370String server = null;
-371ListHRegionInfo movedRegions 
= Collections.synchronizedList(new ArrayListHRegionInfo());
+371ListRegionInfo movedRegions = 
Collections.synchronizedList(new ArrayListRegionInfo());
 372int maxWaitInSeconds =
 373
admin.getConfiguration().getInt(SERVERSTART_WAIT_MAX_KEY, 
DEFAULT_SERVERSTART_WAIT_MAX);
 374long maxWait = 
EnvironmentEdgeManager.currentTime() + maxWaitInSeconds * 1000;
@@ -410,7 +410,7 @@
 402ListFutureBoolean 
taskList = new ArrayList();
 403int counter = 0;
 404while (counter  
regionsToMove.size()) {
-405  HRegionInfo region = 
regionsToMove.get(counter);
+405  RegionInfo region = 
regionsToMove.get(counter);
 406  String currentServer = 
getServerNameForRegion(admin, region);
 407  if (currentServer == null) {
 408LOG.warn("Could not get server 
for Region:" + region.getEncodedName() + " moving on");
@@ -470,8 +470,8 @@
 462  
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="DLS_DEAD_LOCAL_STORE",
 463  

[40/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/apidocs/src-html/org/apache/hadoop/hbase/client/HTableMultiplexer.HTableMultiplexerStatus.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/client/HTableMultiplexer.HTableMultiplexerStatus.html
 
b/apidocs/src-html/org/apache/hadoop/hbase/client/HTableMultiplexer.HTableMultiplexerStatus.html
index c9037ad..c4a4d8f 100644
--- 
a/apidocs/src-html/org/apache/hadoop/hbase/client/HTableMultiplexer.HTableMultiplexerStatus.html
+++ 
b/apidocs/src-html/org/apache/hadoop/hbase/client/HTableMultiplexer.HTableMultiplexerStatus.html
@@ -27,651 +27,650 @@
 019 */
 020package org.apache.hadoop.hbase.client;
 021
-022import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-023import 
org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
-024
-025import java.io.IOException;
-026import 
java.util.AbstractMap.SimpleEntry;
-027import java.util.ArrayList;
-028import java.util.Collections;
-029import java.util.HashMap;
-030import java.util.List;
-031import java.util.Map;
-032import 
java.util.concurrent.ConcurrentHashMap;
-033import 
java.util.concurrent.ExecutorService;
-034import java.util.concurrent.Executors;
-035import 
java.util.concurrent.LinkedBlockingQueue;
-036import 
java.util.concurrent.ScheduledExecutorService;
-037import java.util.concurrent.TimeUnit;
-038import 
java.util.concurrent.atomic.AtomicInteger;
-039import 
java.util.concurrent.atomic.AtomicLong;
-040
-041import org.apache.commons.logging.Log;
-042import 
org.apache.commons.logging.LogFactory;
-043import 
org.apache.hadoop.conf.Configuration;
-044import 
org.apache.hadoop.hbase.HBaseConfiguration;
-045import 
org.apache.hadoop.hbase.HConstants;
-046import 
org.apache.hadoop.hbase.HRegionInfo;
-047import 
org.apache.hadoop.hbase.HRegionLocation;
-048import 
org.apache.hadoop.hbase.ServerName;
-049import 
org.apache.hadoop.hbase.TableName;
-050import 
org.apache.yetus.audience.InterfaceAudience;
-051import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-052import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-053
-054/**
-055 * HTableMultiplexer provides a 
thread-safe non blocking PUT API across all the tables.
-056 * Each put will be sharded into 
different buffer queues based on its destination region server.
-057 * So each region server buffer queue 
will only have the puts which share the same destination.
-058 * And each queue will have a flush 
worker thread to flush the puts request to the region server.
-059 * If any queue is full, the 
HTableMultiplexer starts to drop the Put requests for that
-060 * particular queue.
-061 *
-062 * Also all the puts will be retried as a 
configuration number before dropping.
-063 * And the HTableMultiplexer can report 
the number of buffered requests and the number of the
-064 * failed (dropped) requests in total or 
on per region server basis.
-065 *
-066 * This class is thread safe.
-067 */
-068@InterfaceAudience.Public
-069public class HTableMultiplexer {
-070  private static final Log LOG = 
LogFactory.getLog(HTableMultiplexer.class.getName());
-071
-072  public static final String 
TABLE_MULTIPLEXER_FLUSH_PERIOD_MS =
-073  
"hbase.tablemultiplexer.flush.period.ms";
-074  public static final String 
TABLE_MULTIPLEXER_INIT_THREADS = "hbase.tablemultiplexer.init.threads";
-075  public static final String 
TABLE_MULTIPLEXER_MAX_RETRIES_IN_QUEUE =
-076  
"hbase.client.max.retries.in.queue";
-077
-078  /** The map between each region server 
to its flush worker */
-079  private final MapHRegionLocation, 
FlushWorker serverToFlushWorkerMap =
-080  new ConcurrentHashMap();
-081
-082  private final Configuration 
workerConf;
-083  private final ClusterConnection conn;
-084  private final ExecutorService pool;
-085  private final int maxAttempts;
-086  private final int 
perRegionServerBufferQueueSize;
-087  private final int maxKeyValueSize;
-088  private final ScheduledExecutorService 
executor;
-089  private final long flushPeriod;
-090
-091  /**
-092   * @param conf The HBaseConfiguration
-093   * @param 
perRegionServerBufferQueueSize determines the max number of the buffered Put 
ops for
-094   *  each region server before 
dropping the request.
-095   */
-096  public HTableMultiplexer(Configuration 
conf, int perRegionServerBufferQueueSize)
-097  throws IOException {
-098
this(ConnectionFactory.createConnection(conf), conf, 
perRegionServerBufferQueueSize);
-099  }
-100
-101  /**
-102   * @param conn The HBase connection.
-103   * @param conf The HBase 
configuration
-104   * @param 
perRegionServerBufferQueueSize determines the max number of the buffered Put 
ops for
-105   *  each region server before 
dropping the request.
-106   */
-107  public HTableMultiplexer(Connection 
conn, Configuration conf,
-108  int perRegionServerBufferQueueSize) 
{
-109 

[03/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/devapidocs/org/apache/hadoop/hbase/client/HRegionLocator.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/HRegionLocator.html 
b/devapidocs/org/apache/hadoop/hbase/client/HRegionLocator.html
index 5934934..5084df6 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/HRegionLocator.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/HRegionLocator.html
@@ -115,7 +115,7 @@ var activeTableTab = "activeTableTab";
 
 @InterfaceAudience.Private
  @InterfaceStability.Stable
-public class HRegionLocator
+public class HRegionLocator
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements RegionLocator
 An implementation of RegionLocator. Used to view 
region location information for a single
@@ -271,7 +271,7 @@ implements 
 
 tableName
-private finalTableName tableName
+private finalTableName tableName
 
 
 
@@ -280,7 +280,7 @@ implements 
 
 connection
-private finalClusterConnection connection
+private finalClusterConnection connection
 
 
 
@@ -297,7 +297,7 @@ implements 
 
 HRegionLocator
-publicHRegionLocator(TableNametableName,
+publicHRegionLocator(TableNametableName,
   ClusterConnectionconnection)
 
 
@@ -315,7 +315,7 @@ implements 
 
 close
-publicvoidclose()
+publicvoidclose()
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Specified by:
@@ -333,7 +333,7 @@ implements 
 
 getRegionLocation
-publicHRegionLocationgetRegionLocation(byte[]row)
+publicHRegionLocationgetRegionLocation(byte[]row)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Finds the region on which the given row is being served. 
Does not reload the cache.
 
@@ -354,7 +354,7 @@ implements 
 
 getRegionLocation
-publicHRegionLocationgetRegionLocation(byte[]row,
+publicHRegionLocationgetRegionLocation(byte[]row,
  booleanreload)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Finds the region on which the given row is being 
served.
@@ -377,7 +377,7 @@ implements 
 
 getAllRegionLocations
-publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionLocationgetAllRegionLocations()
+publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionLocationgetAllRegionLocations()
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Description copied from 
interface:RegionLocator
 Retrieves all of the regions associated with this 
table.
@@ -397,7 +397,7 @@ implements 
 
 getStartKeys
-publicbyte[][]getStartKeys()
+publicbyte[][]getStartKeys()
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Gets the starting row key for every region in the currently 
open table.
  
@@ -418,7 +418,7 @@ implements 
 
 getEndKeys
-publicbyte[][]getEndKeys()
+publicbyte[][]getEndKeys()
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Gets the ending row key for every region in the currently 
open table.
  
@@ -439,7 +439,7 @@ implements 
 
 getStartEndKeys
-publicPairbyte[][],byte[][]getStartEndKeys()
+publicPairbyte[][],byte[][]getStartEndKeys()
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Gets the starting and ending row keys for every region in 
the currently
  open table.
@@ -461,7 +461,7 @@ implements 
 
 getStartEndKeys
-Pairbyte[][],byte[][]getStartEndKeys(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionLocationsregions)
+Pairbyte[][],byte[][]getStartEndKeys(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionLocationsregions)
 
 
 
@@ -470,7 +470,7 @@ implements 
 
 getName
-publicTableNamegetName()
+publicTableNamegetName()
 Description copied from 
interface:RegionLocator
 Gets the fully qualified table name instance of this 
table.
 
@@ -485,7 +485,7 @@ implements 
 
 listRegionLocations

[44/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/apidocs/src-html/org/apache/hadoop/hbase/HRegionInfo.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/HRegionInfo.html 
b/apidocs/src-html/org/apache/hadoop/hbase/HRegionInfo.html
index 05331ef..332eacd 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/HRegionInfo.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/HRegionInfo.html
@@ -36,996 +36,997 @@
 028import org.apache.commons.logging.Log;
 029import 
org.apache.commons.logging.LogFactory;
 030import 
org.apache.hadoop.conf.Configuration;
-031import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-032import 
org.apache.yetus.audience.InterfaceAudience;
-033import 
org.apache.hadoop.hbase.client.RegionInfo;
-034import 
org.apache.hadoop.hbase.KeyValue.KVComparator;
+031import 
org.apache.hadoop.hbase.KeyValue.KVComparator;
+032import 
org.apache.hadoop.hbase.client.RegionInfo;
+033import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
+034import 
org.apache.hadoop.hbase.client.RegionInfoDisplay;
 035import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
 036import 
org.apache.hadoop.hbase.master.RegionState;
-037import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-038import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-039import 
org.apache.hadoop.hbase.util.Bytes;
-040import 
org.apache.hadoop.hbase.client.RegionInfoDisplay;
-041import 
org.apache.hadoop.io.DataInputBuffer;
-042
-043/**
-044 * Information about a region. A region 
is a range of keys in the whole keyspace of a table, an
-045 * identifier (a timestamp) for 
differentiating between subset ranges (after region split)
-046 * and a replicaId for differentiating 
the instance for the same range and some status information
-047 * about the region.
-048 *
-049 * The region has a unique name which 
consists of the following fields:
-050 * ul
-051 * li tableName   : The name of 
the table /li
-052 * li startKey: The startKey 
for the region. /li
-053 * li regionId: A timestamp 
when the region is created. /li
-054 * li replicaId   : An id 
starting from 0 to differentiate replicas of the same region range
-055 * but hosted in separated servers. The 
same region range can be hosted in multiple locations./li
-056 * li encodedName : An MD5 
encoded string for the region name./li
-057 * /ul
-058 *
-059 * br Other than the fields in 
the region name, region info contains:
-060 * ul
-061 * li endKey  : the endKey 
for the region (exclusive) /li
-062 * li split   : Whether the 
region is split /li
-063 * li offline : Whether the 
region is offline /li
-064 * /ul
-065 *
-066 * In 0.98 or before, a list of table's 
regions would fully cover the total keyspace, and at any
-067 * point in time, a row key always 
belongs to a single region, which is hosted in a single server.
-068 * In 0.99+, a region can have multiple 
instances (called replicas), and thus a range (or row) can
-069 * correspond to multiple HRegionInfo's. 
These HRI's share the same fields however except the
-070 * replicaId field. If the replicaId is 
not set, it defaults to 0, which is compatible with the
-071 * previous behavior of a range 
corresponding to 1 region.
-072 * @deprecated As of release 2.0.0, this 
will be removed in HBase 3.0.0.
-073 * use {@link 
RegionInfoBuilder} to build {@link RegionInfo}.
-074 */
-075@Deprecated
-076@InterfaceAudience.Public
-077public class HRegionInfo implements 
RegionInfo, ComparableHRegionInfo {
-078  private static final Log LOG = 
LogFactory.getLog(HRegionInfo.class);
-079
-080  /**
-081   * The new format for a region name 
contains its encodedName at the end.
-082   * The encoded name also serves as the 
directory name for the region
-083   * in the filesystem.
-084   *
-085   * New region name format:
-086   *
lt;tablename,,lt;startkey,lt;regionIdTimestamp.lt;encodedName.
-087   * where,
-088   *lt;encodedName is a hex 
version of the MD5 hash of
-089   *
lt;tablename,lt;startkey,lt;regionIdTimestamp
-090   *
-091   * The old region name format:
-092   *
lt;tablename,lt;startkey,lt;regionIdTimestamp
-093   * For region names in the old format, 
the encoded name is a 32-bit
-094   * JenkinsHash integer value (in its 
decimal notation, string form).
-095   *p
-096   * **NOTE**
-097   *
-098   * The first hbase:meta region, and 
regions created by an older
-099   * version of HBase (0.20 or prior) 
will continue to use the
-100   * old region name format.
-101   */
-102
-103  /** A non-capture group so that this 
can be embedded. */
-104  public static final String 
ENCODED_REGION_NAME_REGEX = RegionInfoBuilder.ENCODED_REGION_NAME_REGEX;
-105
-106  private static final int MAX_REPLICA_ID 
= 0x;
-107
-108  /**
-109   * @param regionName
-110   * @return the encodedName
-111   * @deprecated As of release 2.0.0, 
this will be removed in HBase 3.0.0

[10/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
index a38b3b6..18a03c2 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
@@ -106,7 +106,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public interface AsyncAdmin
+public interface AsyncAdmin
 The asynchronous administrative API for HBase.
  
  This feature is still under development, so marked as IA.Private. Will change 
to public when
@@ -458,7 +458,7 @@ public interface 
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInfo
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
 getOnlineRegions(ServerNameserverName)
 Get all the online regions on a region server.
 
@@ -509,7 +509,7 @@ public interface 
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInfo
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
 getTableRegions(TableNametableName)
 Get the regions of a given table.
 
@@ -960,7 +960,7 @@ public interface 
 
 tableExists
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">BooleantableExists(TableNametableName)
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">BooleantableExists(TableNametableName)
 
 Parameters:
 tableName - Table to check.
@@ -976,7 +976,7 @@ public interface 
 
 listTables
-defaulthttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableDescriptorlistTables()
+defaulthttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableDescriptorlistTables()
 List all the userspace tables.
 
 Returns:
@@ -992,7 +992,7 @@ public interface 
 
 listTables
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableDescriptorlistTables(http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">Optionalhttp://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in java.util.regex">Patternpattern,
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableDescriptorlistTables(http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">Optionalhttp://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or 

[04/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.html 
b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.html
index 69f9351..4f5f7e0 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.html
@@ -115,7 +115,7 @@ var activeTableTab = "activeTableTab";
 
 @InterfaceAudience.Private
  @InterfaceStability.Evolving
-public class HBaseAdmin
+public class HBaseAdmin
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements Admin
 HBaseAdmin is no longer a client API. It is marked 
InterfaceAudience.Private indicating that
@@ -530,8 +530,8 @@ implements 
 
 private void
-compact(ServerNamesn,
-   HRegionInfohri,
+compact(ServerNamesn,
+   RegionInfohri,
booleanmajor,
byte[]family)
 
@@ -1052,7 +1052,7 @@ implements 
 
 
-(package private) PairHRegionInfo,ServerName
+(package private) PairRegionInfo,ServerName
 getRegion(byte[]regionName)
 
 
@@ -1779,7 +1779,7 @@ implements 
 
 (package private) http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Future.html?is-external=true;
 title="class or interface in java.util.concurrent">Futurehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
-splitRegionAsync(HRegionInfohri,
+splitRegionAsync(RegionInfohri,
 byte[]splitPoint)
 
 
@@ -1891,7 +1891,7 @@ implements 
 
 LOG
-private static finalorg.apache.commons.logging.Log LOG
+private static finalorg.apache.commons.logging.Log LOG
 
 
 
@@ -1900,7 +1900,7 @@ implements 
 
 ZK_IDENTIFIER_PREFIX
-private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String ZK_IDENTIFIER_PREFIX
+private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String ZK_IDENTIFIER_PREFIX
 
 See Also:
 Constant
 Field Values
@@ -1913,7 +1913,7 @@ implements 
 
 connection
-privateClusterConnection connection
+privateClusterConnection connection
 
 
 
@@ -1922,7 +1922,7 @@ implements 
 
 conf
-private volatileorg.apache.hadoop.conf.Configuration conf
+private volatileorg.apache.hadoop.conf.Configuration conf
 
 
 
@@ -1931,7 +1931,7 @@ implements 
 
 pause
-private finallong pause
+private finallong pause
 
 
 
@@ -1940,7 +1940,7 @@ implements 
 
 numRetries
-private finalint numRetries
+private finalint numRetries
 
 
 
@@ -1949,7 +1949,7 @@ implements 
 
 syncWaitTimeout
-private finalint syncWaitTimeout
+private finalint syncWaitTimeout
 
 
 
@@ -1958,7 +1958,7 @@ implements 
 
 aborted
-privateboolean aborted
+privateboolean aborted
 
 
 
@@ -1967,7 +1967,7 @@ implements 
 
 operationTimeout
-privateint operationTimeout
+privateint operationTimeout
 
 
 
@@ -1976,7 +1976,7 @@ implements 
 
 rpcTimeout
-privateint rpcTimeout
+privateint rpcTimeout
 
 
 
@@ -1985,7 +1985,7 @@ implements 
 
 rpcCallerFactory
-privateRpcRetryingCallerFactory 
rpcCallerFactory
+privateRpcRetryingCallerFactory 
rpcCallerFactory
 
 
 
@@ -1994,7 +1994,7 @@ implements 
 
 rpcControllerFactory
-privateRpcControllerFactory rpcControllerFactory
+privateRpcControllerFactory rpcControllerFactory
 
 
 
@@ -2003,7 +2003,7 @@ implements 
 
 ng
-privateNonceGenerator ng
+privateNonceGenerator ng
 
 
 
@@ -2020,7 +2020,7 @@ implements 
 
 HBaseAdmin
-HBaseAdmin(ClusterConnectionconnection)
+HBaseAdmin(ClusterConnectionconnection)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:
@@ -2042,7 +2042,7 @@ implements 
 
 getOperationTimeout
-publicintgetOperationTimeout()
+publicintgetOperationTimeout()
 
 Specified by:
 getOperationTimeoutin
 interfaceAdmin
@@ -2055,7 +2055,7 @@ implements 
 
 abort
-publicvoidabort(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringwhy,
+publicvoidabort(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringwhy,
   http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in java.lang">Throwablee)
 Description copied from 
interface:Abortable
 Abort the server or client.
@@ -2076,7 +2076,7 @@ implements 
 
 isAborted
-publicbooleanisAborted()
+publicbooleanisAborted()
 Description copied from 
interface:Abortable
 Check if the server or client was aborted.
 
@@ -2095,7 +2095,7 @@ implements 
 
 abortProcedure
-publicbooleanabortProcedure(longprocId,

[06/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html 
b/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html
index 8762bf1..ea38a87 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":9,"i2":10,"i3":9,"i4":9,"i5":10,"i6":9,"i7":9,"i8":9,"i9":9,"i10":9,"i11":9,"i12":9,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":9};
+var methods = 
{"i0":10,"i1":9,"i2":10,"i3":9,"i4":9,"i5":10,"i6":9,"i7":9,"i8":9,"i9":9,"i10":9,"i11":9,"i12":9,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":9};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -760,6 +760,11 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 Bytesvalue)
 
 
+ColumnFamilyDescriptorBuilder
+setValue(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringkey,
+http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringvalue)
+
+
 static byte[]
 toByteArray(ColumnFamilyDescriptordesc)
 
@@ -2262,13 +2267,23 @@ public static finalhttp://docs.oracle.com/javase/8/docs/api/java/
 
 
 
-
+
 
 setValue
 publicColumnFamilyDescriptorBuildersetValue(byte[]key,
   byte[]value)
 
 
+
+
+
+
+
+setValue
+publicColumnFamilyDescriptorBuildersetValue(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringkey,
+  http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringvalue)
+
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/devapidocs/org/apache/hadoop/hbase/client/FlushRegionCallable.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/FlushRegionCallable.html 
b/devapidocs/org/apache/hadoop/hbase/client/FlushRegionCallable.html
index 7b4f2d8..005cc37 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/FlushRegionCallable.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/FlushRegionCallable.html
@@ -178,9 +178,9 @@ extends Constructor and Description
 
 
-FlushRegionCallable(ClusterConnectionconnection,
+FlushRegionCallable(ClusterConnectionconnection,
RpcControllerFactoryrpcControllerFactory,
-   HRegionInforegionInfo,
+   RegionInforegionInfo,
booleanwriteFlushWalMarker)
 
 
@@ -305,7 +305,7 @@ extends 
+
 
 
 
@@ -313,7 +313,7 @@ extends FlushRegionCallable(ClusterConnectionconnection,
RpcControllerFactoryrpcControllerFactory,
-   HRegionInforegionInfo,
+   RegionInforegionInfo,
booleanwriteFlushWalMarker)
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.AbortProcedureFuture.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.AbortProcedureFuture.html
 
b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.AbortProcedureFuture.html
index 34a07f8..cb212ae 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.AbortProcedureFuture.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.AbortProcedureFuture.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private static class HBaseAdmin.AbortProcedureFuture
+private static class HBaseAdmin.AbortProcedureFuture
 extends HBaseAdmin.ProcedureFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean
 
 
@@ -235,7 +235,7 @@ extends 
 
 isAbortInProgress

[20/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/devapidocs/org/apache/hadoop/hbase/RegionLocations.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/RegionLocations.html 
b/devapidocs/org/apache/hadoop/hbase/RegionLocations.html
index fda500f..f6034ce 100644
--- a/devapidocs/org/apache/hadoop/hbase/RegionLocations.html
+++ b/devapidocs/org/apache/hadoop/hbase/RegionLocations.html
@@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class RegionLocations
+public class RegionLocations
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Container for holding a list of HRegionLocation's that correspond to 
the
  same range. The list is indexed by the replicaId. This is an immutable list,
@@ -304,7 +304,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 numNonNullElements
-private finalint numNonNullElements
+private finalint numNonNullElements
 
 
 
@@ -313,7 +313,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 locations
-private finalHRegionLocation[] locations
+private finalHRegionLocation[] locations
 
 
 
@@ -330,7 +330,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 RegionLocations
-publicRegionLocations(HRegionLocation...locations)
+publicRegionLocations(HRegionLocation...locations)
 Constructs the region location list. The locations array 
should
  contain all the locations for known replicas for the region, and should be
  sorted in replicaId ascending order, although it can contain nulls indicating 
replicaIds
@@ -347,7 +347,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 RegionLocations
-publicRegionLocations(http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionHRegionLocationlocations)
+publicRegionLocations(http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionHRegionLocationlocations)
 
 
 
@@ -364,7 +364,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 size
-publicintsize()
+publicintsize()
 Returns the size of the list even if some of the elements
  might be null.
 
@@ -379,7 +379,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 numNonNullElements
-publicintnumNonNullElements()
+publicintnumNonNullElements()
 Returns the size of not-null locations
 
 Returns:
@@ -393,7 +393,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 isEmpty
-publicbooleanisEmpty()
+publicbooleanisEmpty()
 Returns whether there are non-null elements in the 
list
 
 Returns:
@@ -407,7 +407,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 removeByServer
-publicRegionLocationsremoveByServer(ServerNameserverName)
+publicRegionLocationsremoveByServer(ServerNameserverName)
 Returns a new RegionLocations with the locations removed 
(set to null)
  which have the destination server as given.
 
@@ -425,7 +425,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 remove
-publicRegionLocationsremove(HRegionLocationlocation)
+publicRegionLocationsremove(HRegionLocationlocation)
 Removes the given location from the list
 
 Parameters:
@@ -442,7 +442,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 remove
-publicRegionLocationsremove(intreplicaId)
+publicRegionLocationsremove(intreplicaId)
 Removes location of the given replicaId from the list
 
 Parameters:
@@ -459,7 +459,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 mergeLocations
-publicRegionLocationsmergeLocations(RegionLocationsother)
+publicRegionLocationsmergeLocations(RegionLocationsother)
 Merges this RegionLocations list with the given list 
assuming
  same range, and keeping the most up to date version of the
  HRegionLocation entries from either list according to seqNum. If seqNums
@@ -479,7 +479,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 selectRegionLocation
-privateHRegionLocationselectRegionLocation(HRegionLocationoldLocation,
+privateHRegionLocationselectRegionLocation(HRegionLocationoldLocation,
  HRegionLocationlocation,
  booleancheckForEquals,
  booleanforce)
@@ -491,7 +491,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 updateLocation
-publicRegionLocationsupdateLocation(HRegionLocationlocation,
+publicRegionLocationsupdateLocation(HRegionLocationlocation,
   booleancheckForEquals,

hbase-site git commit: INFRA-10751 Empty commit

2017-09-28 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 67deb422f -> d33b6ba3f


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/d33b6ba3
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/d33b6ba3
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/d33b6ba3

Branch: refs/heads/asf-site
Commit: d33b6ba3f1250b89dc4b2a608214a23f896ef46b
Parents: 67deb42
Author: jenkins 
Authored: Thu Sep 28 15:14:30 2017 +
Committer: jenkins 
Committed: Thu Sep 28 15:14:30 2017 +

--

--




[13/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/devapidocs/org/apache/hadoop/hbase/class-use/Stoppable.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/Stoppable.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/Stoppable.html
index d6e4738..123d15e 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/Stoppable.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/Stoppable.html
@@ -701,12 +701,6 @@
 
 
 interface
-OnlineRegions
-Interface to Map of online regions.
-
-
-
-interface
 RegionServerServices
 Services provided by HRegionServer
 



[32/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/apidocs/src-html/org/apache/hadoop/hbase/snapshot/SnapshotInfo.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/snapshot/SnapshotInfo.html 
b/apidocs/src-html/org/apache/hadoop/hbase/snapshot/SnapshotInfo.html
index 251d4fe..6c97088 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/snapshot/SnapshotInfo.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/snapshot/SnapshotInfo.html
@@ -26,8 +26,8 @@
 018
 019package 
org.apache.hadoop.hbase.snapshot;
 020
-021import java.io.IOException;
-022import java.io.FileNotFoundException;
+021import java.io.FileNotFoundException;
+022import java.io.IOException;
 023import java.net.URI;
 024import java.text.SimpleDateFormat;
 025import java.util.ArrayList;
@@ -44,679 +44,677 @@
 036import org.apache.commons.cli.Option;
 037import org.apache.commons.logging.Log;
 038import 
org.apache.commons.logging.LogFactory;
-039
-040import org.apache.hadoop.fs.Path;
-041import org.apache.hadoop.fs.FileStatus;
-042import org.apache.hadoop.fs.FileSystem;
-043import 
org.apache.yetus.audience.InterfaceAudience;
-044import 
org.apache.hadoop.hbase.client.SnapshotDescription;
-045import 
org.apache.hadoop.hbase.HRegionInfo;
-046import 
org.apache.hadoop.hbase.TableName;
-047import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
+039import 
org.apache.hadoop.conf.Configuration;
+040import org.apache.hadoop.fs.FileStatus;
+041import org.apache.hadoop.fs.FileSystem;
+042import org.apache.hadoop.fs.Path;
+043import 
org.apache.hadoop.hbase.TableName;
+044import 
org.apache.hadoop.hbase.client.RegionInfo;
+045import 
org.apache.hadoop.hbase.client.SnapshotDescription;
+046import 
org.apache.hadoop.hbase.io.HFileLink;
+047import 
org.apache.hadoop.hbase.io.WALLink;
 048import 
org.apache.hadoop.hbase.util.AbstractHBaseTool;
-049import 
org.apache.hadoop.util.StringUtils;
-050
-051import 
org.apache.hadoop.conf.Configuration;
-052import 
org.apache.hadoop.hbase.io.HFileLink;
-053import 
org.apache.hadoop.hbase.io.WALLink;
-054import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-055import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-056import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
-057import 
org.apache.hadoop.hbase.util.FSUtils;
-058
-059/**
-060 * Tool for dumping snapshot 
information.
-061 * ol
-062 * li Table Descriptor
-063 * li Snapshot creation time, 
type, format version, ...
-064 * li List of hfiles and wals
-065 * li Stats about hfiles and logs 
sizes, percentage of shared with the source table, ...
-066 * /ol
-067 */
-068@InterfaceAudience.Public
-069public final class SnapshotInfo extends 
AbstractHBaseTool {
-070  private static final Log LOG = 
LogFactory.getLog(SnapshotInfo.class);
-071
-072  static final class Options {
-073static final Option SNAPSHOT = new 
Option(null, "snapshot", true, "Snapshot to examine.");
-074static final Option REMOTE_DIR = new 
Option(null, "remote-dir", true,
-075"Root directory that contains the 
snapshots.");
-076static final Option LIST_SNAPSHOTS = 
new Option(null, "list-snapshots", false,
-077"List all the available snapshots 
and exit.");
-078static final Option FILES = new 
Option(null, "files", false, "Files and logs list.");
-079static final Option STATS = new 
Option(null, "stats", false, "Files and logs stats.");
-080static final Option SCHEMA = new 
Option(null, "schema", false,
-081"Describe the snapshotted 
table.");
-082static final Option SIZE_IN_BYTES = 
new Option(null, "size-in-bytes", false,
-083"Print the size of the files in 
bytes.");
-084  }
-085
-086  /**
-087   * Statistics about the snapshot
-088   * ol
-089   * li How many store files and 
logs are in the archive
-090   * li How many store files and 
logs are shared with the table
-091   * li Total store files and 
logs size and shared amount
-092   * /ol
-093   */
-094  public static class SnapshotStats {
-095/** Information about the file 
referenced by the snapshot */
-096static class FileInfo {
-097  private final boolean corrupted;
-098  private final boolean inArchive;
-099  private final long size;
-100
-101  FileInfo(final boolean inArchive, 
final long size, final boolean corrupted) {
-102this.corrupted = corrupted;
-103this.inArchive = inArchive;
-104this.size = size;
-105  }
-106
-107  /** @return true if the file is in 
the archive */
-108  public boolean inArchive() {
-109return this.inArchive;
-110  }
-111
-112  /** @return true if the file is 
corrupted */
-113  public boolean isCorrupted() {
-114return this.corrupted;
-115  }
-116
-117  /** @return true if the file is 
missing */
-118  public boolean isMissing() {
-119return this.size  0;
-120   

[42/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html 
b/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
index 365becb..ad90124 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
@@ -25,9 +25,9 @@
 017 */
 018package org.apache.hadoop.hbase.client;
 019
-020import java.util.List;
-021import java.util.Collection;
-022import java.util.EnumSet;
+020import java.util.Collection;
+021import java.util.EnumSet;
+022import java.util.List;
 023import java.util.Map;
 024import java.util.Optional;
 025import java.util.Set;
@@ -37,1107 +37,1106 @@
 029
 030import 
org.apache.hadoop.hbase.ClusterStatus;
 031import 
org.apache.hadoop.hbase.ClusterStatus.Option;
-032import 
org.apache.hadoop.hbase.HRegionInfo;
+032import 
org.apache.hadoop.hbase.NamespaceDescriptor;
 033import 
org.apache.hadoop.hbase.RegionLoad;
 034import 
org.apache.hadoop.hbase.ServerName;
-035import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-036import 
org.apache.hadoop.hbase.TableName;
-037import 
org.apache.yetus.audience.InterfaceAudience;
-038import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-039import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-040import 
org.apache.hadoop.hbase.client.RawAsyncTable.CoprocessorCallable;
-041import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-042import 
org.apache.hadoop.hbase.client.security.SecurityCapability;
-043import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-044import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-045import 
org.apache.hadoop.hbase.util.Pair;
-046
-047import com.google.protobuf.RpcChannel;
-048
-049/**
-050 * The asynchronous administrative API 
for HBase.
-051 * p
-052 * This feature is still under 
development, so marked as IA.Private. Will change to public when
-053 * done. Use it with caution.
-054 * @since 2.0.0
-055 */
-056@InterfaceAudience.Public
-057public interface AsyncAdmin {
-058
-059  /**
-060   * @param tableName Table to check.
-061   * @return True if table exists 
already. The return value will be wrapped by a
-062   * {@link CompletableFuture}.
-063   */
-064  CompletableFutureBoolean 
tableExists(TableName tableName);
-065
-066  /**
-067   * List all the userspace tables.
-068   * @return - returns a list of 
TableDescriptors wrapped by a {@link CompletableFuture}.
-069   * @see #listTables(Optional, 
boolean)
-070   */
-071  default 
CompletableFutureListTableDescriptor listTables() {
-072return listTables(Optional.empty(), 
false);
-073  }
-074
-075  /**
-076   * List all the tables matching the 
given pattern.
-077   * @param pattern The compiled regular 
expression to match against
-078   * @param includeSysTables False to 
match only against userspace tables
-079   * @return - returns a list of 
TableDescriptors wrapped by a {@link CompletableFuture}.
-080   */
-081  
CompletableFutureListTableDescriptor 
listTables(OptionalPattern pattern,
-082  boolean includeSysTables);
-083
-084  /**
-085   * List all of the names of userspace 
tables.
-086   * @return a list of table names 
wrapped by a {@link CompletableFuture}.
-087   * @see #listTableNames(Optional, 
boolean)
-088   */
-089  default 
CompletableFutureListTableName listTableNames() {
-090return 
listTableNames(Optional.empty(), false);
-091  }
-092
-093  /**
-094   * List all of the names of userspace 
tables.
-095   * @param pattern The regular 
expression to match against
-096   * @param includeSysTables False to 
match only against userspace tables
-097   * @return a list of table names 
wrapped by a {@link CompletableFuture}.
-098   */
-099  
CompletableFutureListTableName 
listTableNames(OptionalPattern pattern,
-100  boolean includeSysTables);
-101
-102  /**
-103   * Method for getting the 
tableDescriptor
-104   * @param tableName as a {@link 
TableName}
-105   * @return the read-only 
tableDescriptor wrapped by a {@link CompletableFuture}.
-106   */
-107  
CompletableFutureTableDescriptor getTableDescriptor(TableName 
tableName);
-108
-109  /**
-110   * Creates a new table.
-111   * @param desc table descriptor for 
table
-112   */
-113  default CompletableFutureVoid 
createTable(TableDescriptor desc) {
-114return createTable(desc, 
Optional.empty());
-115  }
-116
-117  /**
-118   * Creates a new table with the 
specified number of regions. The start key specified will become
-119   * the end key of the first region of 
the table, and the end key specified will become the start
-120   * key of the last region of the table 
(the first region has a null start key and the last region
-121   * has a null end key). BigInteger math 
will be used to divide the key range specified into enough

[37/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/apidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTable.CoprocessorCallback.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTable.CoprocessorCallback.html
 
b/apidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTable.CoprocessorCallback.html
index 523d98a..22eba3b 100644
--- 
a/apidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTable.CoprocessorCallback.html
+++ 
b/apidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTable.CoprocessorCallback.html
@@ -25,236 +25,235 @@
 017 */
 018package org.apache.hadoop.hbase.client;
 019
-020import com.google.protobuf.RpcCallback;
-021import com.google.protobuf.RpcChannel;
-022import 
com.google.protobuf.RpcController;
-023
-024import 
java.util.concurrent.CompletableFuture;
-025import java.util.function.Function;
-026
-027import 
org.apache.hadoop.hbase.HRegionInfo;
-028import 
org.apache.yetus.audience.InterfaceAudience;
-029
-030/**
-031 * A low level asynchronous table.
-032 * p
-033 * The implementation is required to be 
thread safe.
-034 * p
-035 * The returned {@code CompletableFuture} 
will be finished directly in the rpc framework's callback
-036 * thread, so typically you should not do 
any time consuming work inside these methods, otherwise
-037 * you will be likely to block at least 
one connection to RS(even more if the rpc framework uses
-038 * NIO).
-039 * p
-040 * So, only experts that want to build 
high performance service should use this interface directly,
-041 * especially for the {@link #scan(Scan, 
RawScanResultConsumer)} below.
-042 * p
-043 * TODO: For now the only difference 
between this interface and {@link AsyncTable} is the scan
-044 * method. The {@link 
RawScanResultConsumer} exposes the implementation details of a 
scan(heartbeat)
-045 * so it is not suitable for a normal 
user. If it is still the only difference after we implement
-046 * most features of AsyncTable, we can 
think about merge these two interfaces.
-047 * @since 2.0.0
-048 */
-049@InterfaceAudience.Public
-050public interface RawAsyncTable extends 
AsyncTableBase {
-051
-052  /**
-053   * The basic scan API uses the observer 
pattern. All results that match the given scan object will
-054   * be passed to the given {@code 
consumer} by calling {@code RawScanResultConsumer.onNext}.
-055   * {@code 
RawScanResultConsumer.onComplete} means the scan is finished, and
-056   * {@code 
RawScanResultConsumer.onError} means we hit an unrecoverable error and the scan 
is
-057   * terminated. {@code 
RawScanResultConsumer.onHeartbeat} means the RS is still working but we can
-058   * not get a valid result to call 
{@code RawScanResultConsumer.onNext}. This is usually because
-059   * the matched results are too sparse, 
for example, a filter which almost filters out everything
-060   * is specified.
-061   * p
-062   * Notice that, the methods of the 
given {@code consumer} will be called directly in the rpc
-063   * framework's callback thread, so 
typically you should not do any time consuming work inside
-064   * these methods, otherwise you will be 
likely to block at least one connection to RS(even more if
-065   * the rpc framework uses NIO).
-066   * @param scan A configured {@link 
Scan} object.
-067   * @param consumer the consumer used to 
receive results.
-068   */
-069  void scan(Scan scan, 
RawScanResultConsumer consumer);
-070
-071  /**
-072   * Delegate to a protobuf rpc call.
-073   * p
-074   * Usually, it is just a simple lambda 
expression, like:
-075   *
-076   * pre
-077   * code
-078   * (stub, controller, rpcCallback) 
- {
-079   *   XXXRequest request = ...; // 
prepare the request
-080   *   stub.xxx(controller, request, 
rpcCallback);
-081   * }
-082   * /code
-083   * /pre
-084   *
-085   * And if you can prepare the {@code 
request} before calling the coprocessorService method, the
-086   * lambda expression will be:
-087   *
-088   * pre
-089   * code
-090   * (stub, controller, rpcCallback) 
- stub.xxx(controller, request, rpcCallback)
-091   * /code
-092   * /pre
-093   */
-094  @InterfaceAudience.Public
-095  @FunctionalInterface
-096  interface CoprocessorCallableS, 
R {
-097
-098/**
-099 * Represent the actual protobuf rpc 
call.
-100 * @param stub the asynchronous 
stub
-101 * @param controller the rpc 
controller, has already been prepared for you
-102 * @param rpcCallback the rpc 
callback, has already been prepared for you
-103 */
-104void call(S stub, RpcController 
controller, RpcCallbackR rpcCallback);
-105  }
-106
-107  /**
-108   * Execute the given coprocessor call 
on the region which contains the given {@code row}.
-109   * p
-110   * The {@code stubMaker} is just a 
delegation to the {@code newStub} call. Usually it is only a
-111   * one line lambda expression, like:
-112   *
-113   * pre
-114   * code
-115   * channel - 

[51/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
Published site at .


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/67deb422
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/67deb422
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/67deb422

Branch: refs/heads/asf-site
Commit: 67deb422f5a3ca37d01439e2ccd3e6e25aa4e995
Parents: 50f0a57
Author: jenkins 
Authored: Thu Sep 28 15:13:34 2017 +
Committer: jenkins 
Committed: Thu Sep 28 15:13:34 2017 +

--
 acid-semantics.html | 4 +-
 apache_hbase_reference_guide.pdf| 4 +-
 apidocs/deprecated-list.html|   213 +-
 apidocs/index-all.html  |28 +-
 .../org/apache/hadoop/hbase/HRegionInfo.html|   122 +-
 .../apache/hadoop/hbase/HRegionLocation.html|77 +-
 .../apache/hadoop/hbase/HTableDescriptor.html   |61 +-
 .../hadoop/hbase/class-use/HRegionInfo.html |45 +-
 .../hadoop/hbase/class-use/ServerName.html  | 6 +-
 .../hadoop/hbase/class-use/TableName.html   | 2 +-
 .../apache/hadoop/hbase/client/AsyncAdmin.html  |   264 +-
 .../client/ColumnFamilyDescriptorBuilder.html   |19 +-
 ...ableMultiplexer.HTableMultiplexerStatus.html |20 +-
 .../hadoop/hbase/client/HTableMultiplexer.html  |28 +-
 .../hadoop/hbase/client/IsolationLevel.html | 4 +-
 .../RawAsyncTable.CoprocessorCallable.html  | 4 +-
 .../RawAsyncTable.CoprocessorCallback.html  |36 +-
 .../hadoop/hbase/client/RawAsyncTable.html  |10 +-
 .../apache/hadoop/hbase/client/RegionInfo.html  |70 +-
 .../hadoop/hbase/client/Scan.ReadType.html  | 4 +-
 .../ColumnFamilyDescriptorBuilder.html  | 5 +
 .../hbase/client/class-use/RegionInfo.html  |60 +-
 .../hbase/coprocessor/package-summary.html  | 5 +-
 .../hbase/mapreduce/HFileOutputFormat2.html |20 +-
 .../mapreduce/TableSnapshotInputFormat.html |10 +-
 .../apache/hadoop/hbase/package-summary.html| 2 +-
 .../org/apache/hadoop/hbase/package-use.html| 4 +-
 .../hadoop/hbase/snapshot/ExportSnapshot.html   |22 +-
 .../hadoop/hbase/snapshot/SnapshotInfo.html |22 +-
 .../org/apache/hadoop/hbase/HRegionInfo.html|  1975 +-
 .../apache/hadoop/hbase/HRegionLocation.html|   214 +-
 .../apache/hadoop/hbase/HTableDescriptor.html   |   397 +-
 .../apache/hadoop/hbase/client/AsyncAdmin.html  |  2203 +-
 .../client/ColumnFamilyDescriptorBuilder.html   |  1481 +-
 ...ableMultiplexer.HTableMultiplexerStatus.html |  1289 +-
 .../hadoop/hbase/client/HTableMultiplexer.html  |  1289 +-
 .../RawAsyncTable.CoprocessorCallable.html  |   459 +-
 .../RawAsyncTable.CoprocessorCallback.html  |   459 +-
 .../hadoop/hbase/client/RawAsyncTable.html  |   459 +-
 .../apache/hadoop/hbase/client/RegionInfo.html  |  1461 +-
 .../hbase/mapreduce/HFileOutputFormat2.html |   218 +-
 .../mapreduce/TableSnapshotInputFormat.html |   364 +-
 .../hadoop/hbase/snapshot/ExportSnapshot.html   |  2149 +-
 .../hadoop/hbase/snapshot/SnapshotInfo.html |  1338 +-
 .../apache/hadoop/hbase/util/RegionMover.html   |90 +-
 book.html   | 2 +-
 bulk-loads.html | 4 +-
 checkstyle-aggregate.html   | 23198 -
 checkstyle.rss  |   406 +-
 coc.html| 4 +-
 cygwin.html | 4 +-
 dependencies.html   | 4 +-
 dependency-convergence.html | 4 +-
 dependency-info.html| 4 +-
 dependency-management.html  | 4 +-
 devapidocs/allclasses-frame.html|25 +-
 devapidocs/allclasses-noframe.html  |25 +-
 devapidocs/constant-values.html |29 +-
 devapidocs/deprecated-list.html |   291 +-
 devapidocs/index-all.html   |  2030 +-
 .../org/apache/hadoop/hbase/Abortable.html  |11 +-
 ...Accessor.MetaTableRawScanResultConsumer.html |18 +-
 .../hadoop/hbase/AsyncMetaTableAccessor.html|84 +-
 .../apache/hadoop/hbase/Coprocessor.State.html  |18 +-
 .../org/apache/hadoop/hbase/Coprocessor.html|56 +-
 .../hadoop/hbase/CoprocessorEnvironment.html|53 +-
 .../org/apache/hadoop/hbase/HRegionInfo.html|   196 +-
 .../apache/hadoop/hbase/HRegionLocation.html|85 +-
 .../apache/hadoop/hbase/HTableDescriptor.html   |61 +-
 .../org/apache/hadoop/hbase/JMXListener.html|69 +-
 .../MetaTableAccessor.CloseableVisitor.html | 2 +-
 .../MetaTableAccessor.CollectAllVisitor.html| 6 +-
 

[16/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/devapidocs/org/apache/hadoop/hbase/class-use/HRegionInfo.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/HRegionInfo.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/HRegionInfo.html
index a316414..b94fd89 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/HRegionInfo.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/HRegionInfo.html
@@ -87,53 +87,12 @@
 
 
 
-org.apache.hadoop.hbase.backup
-
-
-
-org.apache.hadoop.hbase.backup.util
-
-
-
 org.apache.hadoop.hbase.client
 
 Provides HBase Client
 
 
 
-org.apache.hadoop.hbase.client.coprocessor
-
-Provides client classes for invoking Coprocessor RPC 
protocols
-
-
- Overview
- Example 
Usage
-
-
-
-
-org.apache.hadoop.hbase.client.locking
-
-
-
-org.apache.hadoop.hbase.coordination
-
-
-
-org.apache.hadoop.hbase.coprocessor
-
-Table of Contents
-
-
-
-org.apache.hadoop.hbase.favored
-
-
-
-org.apache.hadoop.hbase.io
-
-
-
 org.apache.hadoop.hbase.mapred
 
 Provides HBase http://wiki.apache.org/hadoop/HadoopMapReduce;>MapReduce
@@ -148,89 +107,13 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
-org.apache.hadoop.hbase.master
-
-
-
-org.apache.hadoop.hbase.master.assignment
-
-
-
-org.apache.hadoop.hbase.master.balancer
-
-
-
-org.apache.hadoop.hbase.master.locking
-
-
-
-org.apache.hadoop.hbase.master.normalizer
-
-
-
-org.apache.hadoop.hbase.master.procedure
-
-
-
-org.apache.hadoop.hbase.master.snapshot
-
-
-
-org.apache.hadoop.hbase.mob
-
-
-
-org.apache.hadoop.hbase.namespace
-
-
-
-org.apache.hadoop.hbase.quotas
-
-
-
-org.apache.hadoop.hbase.regionserver
-
-
-
-org.apache.hadoop.hbase.regionserver.handler
-
-
-
 org.apache.hadoop.hbase.regionserver.wal
 
 
 
-org.apache.hadoop.hbase.replication.regionserver
-
-
-
-org.apache.hadoop.hbase.rsgroup
-
-
-
-org.apache.hadoop.hbase.snapshot
-
-
-
-org.apache.hadoop.hbase.tmpl.regionserver
-
-
-
-org.apache.hadoop.hbase.tool
-
-
-
 org.apache.hadoop.hbase.util
 
 
-
-org.apache.hadoop.hbase.wal
-
-
-
-org.apache.hadoop.hbase.zookeeper
-
-
 
 
 
@@ -254,10 +137,6 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 HRegionInfo for first meta region
 
 
-
-private HRegionInfo
-HRegionLocation.regionInfo
-
 
 
 
@@ -278,32 +157,14 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
-static HRegionInfo
-MetaTableAccessor.getClosestRegionInfo(Connectionconnection,
-TableNametableName,
-byte[]row)
-
-
-static HRegionInfo
-MetaTableAccessor.getHRegionInfo(Resultdata)
-Returns HRegionInfo object from the column
- HConstants.CATALOG_FAMILY:HConstants.REGIONINFO_QUALIFIER of the catalog
- table Result.
-
-
-
-private static HRegionInfo
-MetaTableAccessor.getHRegionInfo(Resultr,
-  byte[]qualifier)
-Returns the HRegionInfo object from the column HConstants.CATALOG_FAMILY
 and
- qualifier of the catalog table result.
+HRegionInfo
+HRegionLocation.getRegionInfo()
+Deprecated.
+Since 2.0.0. Will remove 
in 3.0.0. Use HRegionLocation.getRegion()}
 instead.
+
 
 
 
-HRegionInfo
-HRegionLocation.getRegionInfo()
-
-
 static HRegionInfo
 HRegionInfo.parseFrom(byte[]bytes)
 Deprecated.
@@ -312,7 +173,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
-
+
 static HRegionInfo
 HRegionInfo.parseFrom(byte[]bytes,
  intoffset,
@@ -323,7 +184,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
-
+
 static HRegionInfo
 HRegionInfo.parseFrom(http://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true;
 title="class or interface in java.io">DataInputStreamin)
 Deprecated.
@@ -332,7 +193,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
-
+
 static HRegionInfo
 HRegionInfo.parseFromOrNull(byte[]bytes)
 Deprecated.
@@ -341,7 +202,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
-
+
 static HRegionInfo
 HRegionInfo.parseFromOrNull(byte[]bytes,
intoffset,
@@ -352,12 +213,6 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
-
-static HRegionInfo
-MetaTableAccessor.parseRegionInfoFromRegionName(byte[]regionName)
-Returns an HRI parsed from this regionName.
-
-
 
 
 
@@ -368,114 +223,6 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
-static http://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true;
 title="class or interface in java.util">NavigableMapHRegionInfo,ServerName
-MetaTableAccessor.allTableRegions(Connectionconnection,
-   TableNametableName)
-Deprecated.
-use MetaTableAccessor.getTableRegionsAndLocations(org.apache.hadoop.hbase.client.Connection,
 org.apache.hadoop.hbase.TableName), region can have multiple 
locations
-
-
-
-

[07/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor.html
 
b/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor.html
index 422a3da..81299c1 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor.html
@@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public static class ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor
+public static class ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements ColumnFamilyDescriptor, http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor
 An ModifyableFamilyDescriptor contains information about a 
column family such as the
@@ -627,7 +627,7 @@ implements 
 
 name
-private finalbyte[] name
+private finalbyte[] name
 
 
 
@@ -636,7 +636,7 @@ implements 
 
 values
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapBytes,Bytes values
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapBytes,Bytes values
 
 
 
@@ -645,7 +645,7 @@ implements 
 
 configuration
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String configuration
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String configuration
 A map which holds the configuration specific to the column 
family. The
  keys of the map have the same names as config keys and override the
  defaults with cf-specific settings. Example usage may be for compactions,
@@ -667,7 +667,7 @@ implements 
 ModifyableColumnFamilyDescriptor
 @InterfaceAudience.Private
-publicModifyableColumnFamilyDescriptor(byte[]name)
+publicModifyableColumnFamilyDescriptor(byte[]name)
 Construct a column descriptor specifying only the family 
name The other
  attributes are defaulted.
 
@@ -685,7 +685,7 @@ public
 ModifyableColumnFamilyDescriptor
 @InterfaceAudience.Private
-publicModifyableColumnFamilyDescriptor(ColumnFamilyDescriptordesc)
+publicModifyableColumnFamilyDescriptor(ColumnFamilyDescriptordesc)
 Constructor. Makes a deep copy of the supplied descriptor.
  TODO: make this private after the HCD is removed.
 
@@ -700,7 +700,7 @@ public
 
 ModifyableColumnFamilyDescriptor
-privateModifyableColumnFamilyDescriptor(byte[]name,
+privateModifyableColumnFamilyDescriptor(byte[]name,
  http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapBytes,Bytesvalues,
  http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringconfig)
 
@@ -719,7 +719,7 @@ public
 
 getName
-publicbyte[]getName()
+publicbyte[]getName()
 
 Specified by:
 getNamein
 interfaceColumnFamilyDescriptor
@@ -734,7 +734,7 @@ public
 
 getNameAsString
-publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetNameAsString()
+publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetNameAsString()
 
 Specified by:
 getNameAsStringin
 interfaceColumnFamilyDescriptor
@@ 

[23/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/devapidocs/org/apache/hadoop/hbase/HRegionLocation.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/HRegionLocation.html 
b/devapidocs/org/apache/hadoop/hbase/HRegionLocation.html
index 863b818..2b43774 100644
--- a/devapidocs/org/apache/hadoop/hbase/HRegionLocation.html
+++ b/devapidocs/org/apache/hadoop/hbase/HRegionLocation.html
@@ -18,8 +18,8 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10};
-var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":42,"i7":10,"i8":10,"i9":10,"i10":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
 var tableTab = "tableTab";
@@ -114,10 +114,10 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public class HRegionLocation
+public class HRegionLocation
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableHRegionLocation
-Data structure to hold HRegionInfo and the address for the 
hosting
+Data structure to hold RegionInfo and the address for the 
hosting
  HRegionServer.  Immutable.  Comparable, but we compare the 'location' only:
  i.e. the hostname and port, and *not* the regioninfo.  This means two
  instances are the same if they refer to the same 'location' (the same
@@ -147,7 +147,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 Field and Description
 
 
-private HRegionInfo
+private RegionInfo
 regionInfo
 
 
@@ -173,11 +173,11 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 Constructor and Description
 
 
-HRegionLocation(HRegionInforegionInfo,
+HRegionLocation(RegionInforegionInfo,
ServerNameserverName)
 
 
-HRegionLocation(HRegionInforegionInfo,
+HRegionLocation(RegionInforegionInfo,
ServerNameserverName,
longseqNum)
 
@@ -191,7 +191,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 Method Summary
 
-All MethodsInstance MethodsConcrete Methods
+All MethodsInstance MethodsConcrete MethodsDeprecated Methods
 
 Modifier and Type
 Method and Description
@@ -217,22 +217,30 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 getPort()
 
 
-HRegionInfo
-getRegionInfo()
+RegionInfo
+getRegion()
 
 
+HRegionInfo
+getRegionInfo()
+Deprecated.
+Since 2.0.0. Will remove 
in 3.0.0. Use getRegion()}
 instead.
+
+
+
+
 long
 getSeqNum()
 
-
+
 ServerName
 getServerName()
 
-
+
 int
 hashCode()
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 toString()
 
@@ -264,7 +272,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 regionInfo
-private finalHRegionInfo regionInfo
+private finalRegionInfo regionInfo
 
 
 
@@ -273,7 +281,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 serverName
-private finalServerName serverName
+private finalServerName serverName
 
 
 
@@ -282,7 +290,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 seqNum
-private finallong seqNum
+private finallong seqNum
 
 
 
@@ -293,23 +301,23 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 Constructor Detail
-
+
 
 
 
 
 HRegionLocation
-publicHRegionLocation(HRegionInforegionInfo,
+publicHRegionLocation(RegionInforegionInfo,
ServerNameserverName)
 
 
-
+
 
 
 
 
 HRegionLocation
-publicHRegionLocation(HRegionInforegionInfo,
+publicHRegionLocation(RegionInforegionInfo,
ServerNameserverName,
longseqNum)
 
@@ -328,7 +336,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 toString
-publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtoString()
+publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtoString()
 
 Overrides:
 http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--;
 title="class or interface in java.lang">toStringin 
classhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
@@ -343,7 +351,7 @@ implements 

[27/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/devapidocs/index-all.html
--
diff --git a/devapidocs/index-all.html b/devapidocs/index-all.html
index 84915a2..e4f6817 100644
--- a/devapidocs/index-all.html
+++ b/devapidocs/index-all.html
@@ -292,7 +292,7 @@
 
 abortRequested
 - Variable in class org.apache.hadoop.hbase.regionserver.HRegionServer
 
-abortServer(CoprocessorEnvironment,
 Throwable) - Method in class org.apache.hadoop.hbase.coprocessor.CoprocessorHost
+abortServer(E,
 Throwable) - Method in class org.apache.hadoop.hbase.coprocessor.CoprocessorHost
 
 abortServer(String,
 Throwable) - Method in class org.apache.hadoop.hbase.coprocessor.CoprocessorHost
 
@@ -464,7 +464,7 @@
 
 Base class for all the Region procedures that want to use a 
StateMachine.
 
-AbstractStateMachineRegionProcedure(MasterProcedureEnv,
 HRegionInfo) - Constructor for class 
org.apache.hadoop.hbase.master.procedure.AbstractStateMachineRegionProcedure
+AbstractStateMachineRegionProcedure(MasterProcedureEnv,
 RegionInfo) - Constructor for class 
org.apache.hadoop.hbase.master.procedure.AbstractStateMachineRegionProcedure
 
 AbstractStateMachineRegionProcedure()
 - Constructor for class org.apache.hadoop.hbase.master.procedure.AbstractStateMachineRegionProcedure
 
@@ -547,7 +547,7 @@
 
 acceptInboundMessage(Object)
 - Method in class org.apache.hadoop.hbase.client.ClusterStatusListener.MulticastListener.ClusterStatusHandler
 
-acceptPlan(HashMapHRegionInfo,
 RegionStates.RegionStateNode, MapServerName, 
ListHRegionInfo) - Method in class 
org.apache.hadoop.hbase.master.assignment.AssignmentManager
+acceptPlan(HashMapRegionInfo,
 RegionStates.RegionStateNode, MapServerName, 
ListRegionInfo) - Method in class 
org.apache.hadoop.hbase.master.assignment.AssignmentManager
 
 access(long)
 - Method in class org.apache.hadoop.hbase.io.hfile.bucket.BucketCache.BucketEntry
 
@@ -1264,7 +1264,7 @@
 
 Add this table to the tracker and then read a watch on that 
node.
 
-addAssignment(HRegionInfo,
 ServerName) - Method in class org.apache.hadoop.hbase.master.SnapshotOfRegionAssignmentFromMeta
+addAssignment(RegionInfo,
 ServerName) - Method in class org.apache.hadoop.hbase.master.SnapshotOfRegionAssignmentFromMeta
 
 addAttribute(String,
 Object) - Method in class org.apache.hadoop.hbase.rest.model.ColumnSchemaModel
 
@@ -1318,8 +1318,6 @@
 
 addChangedReaderObserver(ChangedReadersObserver)
 - Method in class org.apache.hadoop.hbase.regionserver.HStore
 
-addChangedReaderObserver(ChangedReadersObserver)
 - Method in interface org.apache.hadoop.hbase.regionserver.Store
-
 addChild(TokenizerNode)
 - Method in class org.apache.hadoop.hbase.codec.prefixtree.encode.tokenize.TokenizerNode
 
 addChildExp(ExpressionNode)
 - Method in class org.apache.hadoop.hbase.security.visibility.expression.NonLeafExpressionNode
@@ -1537,11 +1535,11 @@
 
 addCurrentScanners(List?
 extends KeyValueScanner) - Method in class 
org.apache.hadoop.hbase.regionserver.StoreScanner
 
-addDaughter(Connection,
 HRegionInfo, ServerName, long) - Static method in class 
org.apache.hadoop.hbase.MetaTableAccessor
+addDaughter(Connection,
 RegionInfo, ServerName, long) - Static method in class 
org.apache.hadoop.hbase.MetaTableAccessor
 
 Adds a daughter region entry to meta.
 
-addDaughtersToPut(Put,
 HRegionInfo, HRegionInfo) - Static method in class 
org.apache.hadoop.hbase.MetaTableAccessor
+addDaughtersToPut(Put,
 RegionInfo, RegionInfo) - Static method in class 
org.apache.hadoop.hbase.MetaTableAccessor
 
 Adds split daughters to the Put
 
@@ -1933,9 +1931,9 @@
 Add Dropwizard-Metrics rate information to a 
Hadoop-Metrics2 record builder, converting the
  rates to the appropriate unit.
 
-addMobRegion(HRegionInfo)
 - Method in class org.apache.hadoop.hbase.snapshot.SnapshotManifest
+addMobRegion(RegionInfo)
 - Method in class org.apache.hadoop.hbase.snapshot.SnapshotManifest
 
-addMobRegion(HRegionInfo,
 SnapshotManifest.RegionVisitor) - Method in class 
org.apache.hadoop.hbase.snapshot.SnapshotManifest
+addMobRegion(RegionInfo,
 SnapshotManifest.RegionVisitor) - Method in class 
org.apache.hadoop.hbase.snapshot.SnapshotManifest
 
 addMultiException(MultiException,
 Exception) - Method in class org.apache.hadoop.hbase.http.HttpServer
 
@@ -2032,7 +2030,7 @@
 
 addPeerToHFileRefs(String)
 - Method in class org.apache.hadoop.hbase.replication.TableBasedReplicationQueuesImpl
 
-addPrimaryAssignment(HRegionInfo,
 ServerName) - Method in class org.apache.hadoop.hbase.master.SnapshotOfRegionAssignmentFromMeta
+addPrimaryAssignment(RegionInfo,
 ServerName) - Method in class org.apache.hadoop.hbase.master.SnapshotOfRegionAssignmentFromMeta
 
 addProperty(String,
 String) - Method in class org.apache.hadoop.hbase.rest.model.NamespacesInstanceModel
 
@@ -2062,13 +2060,19 @@
 
 addRegion(int[],
 int) - Method in class org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster
 

[19/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/devapidocs/org/apache/hadoop/hbase/backup/HFileArchiver.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/backup/HFileArchiver.html 
b/devapidocs/org/apache/hadoop/hbase/backup/HFileArchiver.html
index 9be298a..2db8dcc 100644
--- a/devapidocs/org/apache/hadoop/hbase/backup/HFileArchiver.html
+++ b/devapidocs/org/apache/hadoop/hbase/backup/HFileArchiver.html
@@ -241,9 +241,9 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 static void
-archiveFamily(org.apache.hadoop.fs.FileSystemfs,
+archiveFamily(org.apache.hadoop.fs.FileSystemfs,
  org.apache.hadoop.conf.Configurationconf,
- HRegionInfoparent,
+ RegionInfoparent,
  org.apache.hadoop.fs.PathtableDir,
  byte[]family)
 Remove from the specified region the store files of the 
specified column family,
@@ -252,9 +252,9 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 static void
-archiveFamilyByFamilyDir(org.apache.hadoop.fs.FileSystemfs,
+archiveFamilyByFamilyDir(org.apache.hadoop.fs.FileSystemfs,
 org.apache.hadoop.conf.Configurationconf,
-HRegionInfoparent,
+RegionInfoparent,
 org.apache.hadoop.fs.PathfamilyDir,
 byte[]family)
 Removes from the specified region the store files of the 
specified column family,
@@ -263,9 +263,9 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 static void
-archiveRegion(org.apache.hadoop.conf.Configurationconf,
+archiveRegion(org.apache.hadoop.conf.Configurationconf,
  org.apache.hadoop.fs.FileSystemfs,
- HRegionInfoinfo)
+ RegionInfoinfo)
 Cleans up all the files for a HRegion by archiving the 
HFiles to the
  archive directory
 
@@ -281,9 +281,9 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 static void
-archiveStoreFile(org.apache.hadoop.conf.Configurationconf,
+archiveStoreFile(org.apache.hadoop.conf.Configurationconf,
 org.apache.hadoop.fs.FileSystemfs,
-HRegionInforegionInfo,
+RegionInforegionInfo,
 org.apache.hadoop.fs.PathtableDir,
 byte[]family,
 org.apache.hadoop.fs.PathstoreFile)
@@ -292,9 +292,9 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 static void
-archiveStoreFiles(org.apache.hadoop.conf.Configurationconf,
+archiveStoreFiles(org.apache.hadoop.conf.Configurationconf,
  org.apache.hadoop.fs.FileSystemfs,
- HRegionInforegionInfo,
+ RegionInforegionInfo,
  org.apache.hadoop.fs.PathtableDir,
  byte[]family,
  http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionHStoreFilecompactedFiles)
@@ -316,9 +316,9 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 static boolean
-exists(org.apache.hadoop.conf.Configurationconf,
+exists(org.apache.hadoop.conf.Configurationconf,
   org.apache.hadoop.fs.FileSystemfs,
-  HRegionInfoinfo)
+  RegionInfoinfo)
 
 
 private static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHFileArchiver.File
@@ -430,7 +430,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 Method Detail
-
+
 
 
 
@@ -438,7 +438,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 exists
 public staticbooleanexists(org.apache.hadoop.conf.Configurationconf,
  org.apache.hadoop.fs.FileSystemfs,
- HRegionInfoinfo)
+ RegionInfoinfo)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Returns:
@@ -448,7 +448,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 
-
+
 
 
 
@@ -456,7 +456,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 archiveRegion
 public staticvoidarchiveRegion(org.apache.hadoop.conf.Configurationconf,
  org.apache.hadoop.fs.FileSystemfs,
- HRegionInfoinfo)
+ RegionInfoinfo)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Cleans up all the files for a HRegion by archiving the 
HFiles to the
  archive directory
@@ -464,7 +464,7 @@ extends 

[48/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/apidocs/org/apache/hadoop/hbase/HTableDescriptor.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/HTableDescriptor.html 
b/apidocs/org/apache/hadoop/hbase/HTableDescriptor.html
index e6d66dc..3aacd19 100644
--- a/apidocs/org/apache/hadoop/hbase/HTableDescriptor.html
+++ b/apidocs/org/apache/hadoop/hbase/HTableDescriptor.html
@@ -1219,7 +1219,7 @@ implements 
 
 NAMESPACE_FAMILY_INFO
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String NAMESPACE_FAMILY_INFO
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String NAMESPACE_FAMILY_INFO
 Deprecated.
 
 See Also:
@@ -1233,7 +1233,7 @@ implements 
 
 NAMESPACE_FAMILY_INFO_BYTES
-public static finalbyte[] NAMESPACE_FAMILY_INFO_BYTES
+public static finalbyte[] NAMESPACE_FAMILY_INFO_BYTES
 Deprecated.
 
 
@@ -1243,7 +1243,7 @@ implements 
 
 NAMESPACE_COL_DESC_BYTES
-public static finalbyte[] NAMESPACE_COL_DESC_BYTES
+public static finalbyte[] NAMESPACE_COL_DESC_BYTES
 Deprecated.
 
 
@@ -1253,7 +1253,7 @@ implements 
 
 NAMESPACE_TABLEDESC
-public static finalHTableDescriptor NAMESPACE_TABLEDESC
+public static finalHTableDescriptor NAMESPACE_TABLEDESC
 Deprecated.
 Table descriptor for namespace table
 
@@ -2244,12 +2244,11 @@ public
 
 addCoprocessor
-publicHTableDescriptoraddCoprocessor(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringclassName)
+publicHTableDescriptoraddCoprocessor(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringclassName)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Deprecated.
 Add a table coprocessor to this table. The coprocessor
- type must be org.apache.hadoop.hbase.coprocessor.RegionObserver
- or Endpoint.
+ type must be org.apache.hadoop.hbase.coprocessor.RegionCoprocessor.
  It won't check if the class can be loaded or not.
  Whether a coprocessor is loadable or not will be determined when
  a region is opened.
@@ -2267,15 +2266,14 @@ public
 
 addCoprocessor
-publicHTableDescriptoraddCoprocessor(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringclassName,
+publicHTableDescriptoraddCoprocessor(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringclassName,

org.apache.hadoop.fs.PathjarFilePath,
intpriority,
http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringkvs)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Deprecated.
 Add a table coprocessor to this table. The coprocessor
- type must be org.apache.hadoop.hbase.coprocessor.RegionObserver
- or Endpoint.
+ type must be org.apache.hadoop.hbase.coprocessor.RegionCoprocessor.
  It won't check if the class can be loaded or not.
  Whether a coprocessor is loadable or not will be determined when
  a region is opened.
@@ -2297,12 +2295,11 @@ public
 
 addCoprocessorWithSpec
-publicHTableDescriptoraddCoprocessorWithSpec(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringspecStr)
+publicHTableDescriptoraddCoprocessorWithSpec(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringspecStr)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Deprecated.
 Add a table coprocessor to this table. The coprocessor
- type must be org.apache.hadoop.hbase.coprocessor.RegionObserver
- or Endpoint.
+ type must be org.apache.hadoop.hbase.coprocessor.RegionCoprocessor.
  It won't check if the class can be loaded or not.
  Whether a coprocessor is loadable or not will be determined when
  a region is opened.
@@ -2321,7 +2318,7 @@ public
 
 hasCoprocessor

[01/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 50f0a5731 -> 67deb422f


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/devapidocs/org/apache/hadoop/hbase/client/RegionInfo.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/RegionInfo.html 
b/devapidocs/org/apache/hadoop/hbase/client/RegionInfo.html
index 56d899b..3a9dd60 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/RegionInfo.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/RegionInfo.html
@@ -106,7 +106,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public interface RegionInfo
+public interface RegionInfo
 Information about a region. A region is a range of keys in 
the whole keyspace
  of a table, an identifier (a timestamp) for differentiating between subset
  ranges (after region split) and a replicaId for differentiating the instance
@@ -441,7 +441,7 @@ public interface 
 ENC_SEPARATOR
 @InterfaceAudience.Private
-static finalint ENC_SEPARATOR
+static finalint ENC_SEPARATOR
 Separator used to demarcate the encodedName in a region name
  in the new format. See description on new format above.
 
@@ -457,7 +457,7 @@ static finalint 
 MD5_HEX_LENGTH
 @InterfaceAudience.Private
-static finalint MD5_HEX_LENGTH
+static finalint MD5_HEX_LENGTH
 
 See Also:
 Constant
 Field Values
@@ -471,7 +471,7 @@ static finalint 
 DEFAULT_REPLICA_ID
 @InterfaceAudience.Private
-static finalint DEFAULT_REPLICA_ID
+static finalint DEFAULT_REPLICA_ID
 
 See Also:
 Constant
 Field Values
@@ -485,7 +485,7 @@ static finalint 
 REPLICA_ID_FORMAT
 @InterfaceAudience.Private
-static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String REPLICA_ID_FORMAT
+static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String REPLICA_ID_FORMAT
 to keep appended int's sorted in string format. Only allows 
2 bytes
  to be sorted for replicaId.
 
@@ -501,7 +501,7 @@ static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/St
 
 REPLICA_ID_DELIMITER
 @InterfaceAudience.Private
-static finalbyte REPLICA_ID_DELIMITER
+static finalbyte REPLICA_ID_DELIMITER
 
 See Also:
 Constant
 Field Values
@@ -515,7 +515,7 @@ static finalbyte 
 INVALID_REGION_NAME_FORMAT_MESSAGE
 @InterfaceAudience.Private
-static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String INVALID_REGION_NAME_FORMAT_MESSAGE
+static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String INVALID_REGION_NAME_FORMAT_MESSAGE
 
 See Also:
 Constant
 Field Values
@@ -529,7 +529,7 @@ static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/St
 
 COMPARATOR
 @InterfaceAudience.Private
-static finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Comparator.html?is-external=true;
 title="class or interface in java.util">ComparatorRegionInfo COMPARATOR
+static finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Comparator.html?is-external=true;
 title="class or interface in java.util">ComparatorRegionInfo COMPARATOR
 
 
 
@@ -546,7 +546,7 @@ static finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Co
 
 
 getShortNameToLog
-http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetShortNameToLog()
+http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetShortNameToLog()
 
 Returns:
 Return a short, printable name for this region
@@ -560,7 +560,7 @@ static finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Co
 
 
 getRegionId
-longgetRegionId()
+longgetRegionId()
 
 Returns:
 the regionId.
@@ -573,7 +573,7 @@ static finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Co
 
 
 getRegionName
-byte[]getRegionName()
+byte[]getRegionName()
 
 Returns:
 the regionName as an array of bytes.
@@ -588,7 +588,7 @@ static finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Co
 
 
 getRegionNameAsString
-http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetRegionNameAsString()
+http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetRegionNameAsString()
 
 Returns:
 Region name as a String for use in logging, etc.
@@ -601,7 +601,7 @@ static finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Co
 
 
 getEncodedName
-http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetEncodedName()
+http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or 

[46/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/apidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html
--
diff --git 
a/apidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html 
b/apidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html
index 58cd077..b3153ef 100644
--- a/apidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html
+++ b/apidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":9,"i2":10,"i3":9,"i4":10,"i5":9,"i6":9,"i7":9,"i8":9,"i9":9,"i10":9,"i11":9,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":9};
+var methods = 
{"i0":10,"i1":9,"i2":10,"i3":9,"i4":10,"i5":9,"i6":9,"i7":9,"i8":9,"i9":9,"i10":9,"i11":9,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":9};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -460,6 +460,11 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 Bytesvalue)
 
 
+ColumnFamilyDescriptorBuilder
+setValue(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringkey,
+http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringvalue)
+
+
 static byte[]
 toByteArray(ColumnFamilyDescriptordesc)
 
@@ -1224,13 +1229,23 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 
-
+
 
 setValue
 publicColumnFamilyDescriptorBuildersetValue(byte[]key,
   byte[]value)
 
 
+
+
+
+
+
+setValue
+publicColumnFamilyDescriptorBuildersetValue(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringkey,
+  http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringvalue)
+
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/apidocs/org/apache/hadoop/hbase/client/HTableMultiplexer.HTableMultiplexerStatus.html
--
diff --git 
a/apidocs/org/apache/hadoop/hbase/client/HTableMultiplexer.HTableMultiplexerStatus.html
 
b/apidocs/org/apache/hadoop/hbase/client/HTableMultiplexer.HTableMultiplexerStatus.html
index 2868934..b7c7a33 100644
--- 
a/apidocs/org/apache/hadoop/hbase/client/HTableMultiplexer.HTableMultiplexerStatus.html
+++ 
b/apidocs/org/apache/hadoop/hbase/client/HTableMultiplexer.HTableMultiplexerStatus.html
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public static class HTableMultiplexer.HTableMultiplexerStatus
+public static class HTableMultiplexer.HTableMultiplexerStatus
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 HTableMultiplexerStatus keeps track of the current status 
of the HTableMultiplexer.
  report the number of buffered requests and the number of the failed (dropped) 
requests
@@ -214,7 +214,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 HTableMultiplexerStatus
-publicHTableMultiplexerStatus(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapHRegionLocation,org.apache.hadoop.hbase.client.HTableMultiplexer.FlushWorkerserverToFlushWorkerMap)
+publicHTableMultiplexerStatus(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapHRegionLocation,org.apache.hadoop.hbase.client.HTableMultiplexer.FlushWorkerserverToFlushWorkerMap)
 
 
 
@@ -231,7 +231,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getTotalBufferedCounter
-publiclonggetTotalBufferedCounter()
+publiclonggetTotalBufferedCounter()
 
 
 
@@ -240,7 +240,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getTotalFailedCounter
-publiclonggetTotalFailedCounter()
+publiclonggetTotalFailedCounter()
 
 
 
@@ -249,7 +249,7 @@ extends 

[02/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/devapidocs/org/apache/hadoop/hbase/client/HTableMultiplexer.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/HTableMultiplexer.html 
b/devapidocs/org/apache/hadoop/hbase/client/HTableMultiplexer.html
index f083030..aa20629 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/HTableMultiplexer.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/HTableMultiplexer.html
@@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public class HTableMultiplexer
+public class HTableMultiplexer
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 HTableMultiplexer provides a thread-safe non blocking PUT 
API across all the tables.
  Each put will be sharded into different buffer queues based on its 
destination region server.
@@ -365,7 +365,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 LOG
-private static finalorg.apache.commons.logging.Log LOG
+private static finalorg.apache.commons.logging.Log LOG
 
 
 
@@ -374,7 +374,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 TABLE_MULTIPLEXER_FLUSH_PERIOD_MS
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String TABLE_MULTIPLEXER_FLUSH_PERIOD_MS
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String TABLE_MULTIPLEXER_FLUSH_PERIOD_MS
 
 See Also:
 Constant
 Field Values
@@ -387,7 +387,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 TABLE_MULTIPLEXER_INIT_THREADS
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String TABLE_MULTIPLEXER_INIT_THREADS
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String TABLE_MULTIPLEXER_INIT_THREADS
 
 See Also:
 Constant
 Field Values
@@ -400,7 +400,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 TABLE_MULTIPLEXER_MAX_RETRIES_IN_QUEUE
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String TABLE_MULTIPLEXER_MAX_RETRIES_IN_QUEUE
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String TABLE_MULTIPLEXER_MAX_RETRIES_IN_QUEUE
 
 See Also:
 Constant
 Field Values
@@ -413,7 +413,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 serverToFlushWorkerMap
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapHRegionLocation,HTableMultiplexer.FlushWorker serverToFlushWorkerMap
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapHRegionLocation,HTableMultiplexer.FlushWorker serverToFlushWorkerMap
 The map between each region server to its flush worker
 
 
@@ -423,7 +423,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 workerConf
-private finalorg.apache.hadoop.conf.Configuration workerConf
+private finalorg.apache.hadoop.conf.Configuration workerConf
 
 
 
@@ -432,7 +432,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 conn
-private finalClusterConnection conn
+private finalClusterConnection conn
 
 
 
@@ -441,7 +441,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 pool
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true;
 title="class or interface in java.util.concurrent">ExecutorService pool
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true;
 title="class or interface in java.util.concurrent">ExecutorService pool
 
 
 
@@ -450,7 +450,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 maxAttempts
-private finalint maxAttempts
+private finalint maxAttempts
 
 
 
@@ -459,7 +459,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 perRegionServerBufferQueueSize
-private finalint perRegionServerBufferQueueSize
+private finalint perRegionServerBufferQueueSize
 
 
 
@@ -468,7 +468,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 maxKeyValueSize
-private finalint maxKeyValueSize
+private finalint maxKeyValueSize
 
 
 
@@ -477,7 +477,7 @@ extends 

[14/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html
index c08b707..7de2eb8 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html
@@ -342,7 +342,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
-static http://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true;
 title="class or interface in java.util">NavigableMapHRegionInfo,ServerName
+static http://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true;
 title="class or interface in java.util">NavigableMapRegionInfo,ServerName
 MetaTableAccessor.allTableRegions(Connectionconnection,
TableNametableName)
 Deprecated.
@@ -359,7 +359,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 ClusterStatus.getDeadServerNames()
 
 
-static PairHRegionInfo,ServerName
+static PairRegionInfo,ServerName
 MetaTableAccessor.getRegion(Connectionconnection,
  byte[]regionName)
 Deprecated.
@@ -379,14 +379,14 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 ClusterStatus.getServers()
 
 
-static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairHRegionInfo,ServerName
+static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairRegionInfo,ServerName
 MetaTableAccessor.getTableRegionsAndLocations(Connectionconnection,
TableNametableName)
 Do not use this method to get meta table regions, use 
methods in MetaTableLocator instead.
 
 
 
-static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairHRegionInfo,ServerName
+static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairRegionInfo,ServerName
 MetaTableAccessor.getTableRegionsAndLocations(Connectionconnection,
TableNametableName,
booleanexcludeOfflinedSplitParents)
@@ -394,7 +394,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
-private static http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairHRegionInfo,ServerName
+private static http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairRegionInfo,ServerName
 AsyncMetaTableAccessor.getTableRegionsAndLocations(RawAsyncTablemetaTable,
http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalTableNametableName,
booleanexcludeOfflinedSplitParents)
@@ -412,8 +412,8 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 static void
-MetaTableAccessor.addDaughter(Connectionconnection,
-   HRegionInforegionInfo,
+MetaTableAccessor.addDaughter(Connectionconnection,
+   RegionInforegionInfo,
ServerNamesn,
longopenSeqNum)
 Adds a daughter region entry to meta.
@@ -436,7 +436,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 ClusterStatus.getLoad(ServerNamesn)
 
 
-static http://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true;
 title="class or interface in java.util">NavigableMapHRegionInfo,Result
+static http://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true;
 title="class or interface in java.util">NavigableMapRegionInfo,Result
 MetaTableAccessor.getServerUserRegions(Connectionconnection,
 ServerNameserverName)
 
@@ -467,10 +467,10 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 static void
-MetaTableAccessor.mergeRegions(Connectionconnection,
-HRegionInfomergedRegion,
-HRegionInforegionA,
-HRegionInforegionB,
+MetaTableAccessor.mergeRegions(Connectionconnection,
+RegionInfomergedRegion,
+RegionInforegionA,
+RegionInforegionB,
 

[43/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/apidocs/src-html/org/apache/hadoop/hbase/HRegionLocation.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/HRegionLocation.html 
b/apidocs/src-html/org/apache/hadoop/hbase/HRegionLocation.html
index 3298628..a7220e3 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/HRegionLocation.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/HRegionLocation.html
@@ -26,106 +26,120 @@
 018 */
 019package org.apache.hadoop.hbase;
 020
-021import 
org.apache.yetus.audience.InterfaceAudience;
-022import 
org.apache.hadoop.hbase.util.Addressing;
-023
-024/**
-025 * Data structure to hold HRegionInfo and 
the address for the hosting
-026 * HRegionServer.  Immutable.  
Comparable, but we compare the 'location' only:
-027 * i.e. the hostname and port, and *not* 
the regioninfo.  This means two
-028 * instances are the same if they refer 
to the same 'location' (the same
-029 * hostname and port), though they may be 
carrying different regions.
-030 *
-031 * On a big cluster, each client will 
have thousands of instances of this object, often
-032 *  100 000 of them if not million. It's 
important to keep the object size as small
-033 *  as possible.
-034 *
-035 * brThis interface has been 
marked InterfaceAudience.Public in 0.96 and 0.98.
-036 */
-037@InterfaceAudience.Public
-038public class HRegionLocation implements 
ComparableHRegionLocation {
-039  private final HRegionInfo regionInfo;
-040  private final ServerName serverName;
-041  private final long seqNum;
-042
-043  public HRegionLocation(HRegionInfo 
regionInfo, ServerName serverName) {
-044this(regionInfo, serverName, 
HConstants.NO_SEQNUM);
-045  }
-046
-047  public HRegionLocation(HRegionInfo 
regionInfo, ServerName serverName, long seqNum) {
-048this.regionInfo = regionInfo;
-049this.serverName = serverName;
-050this.seqNum = seqNum;
-051  }
-052
-053  /**
-054   * @see java.lang.Object#toString()
-055   */
-056  @Override
-057  public String toString() {
-058return "region=" + (this.regionInfo 
== null ? "null" : this.regionInfo.getRegionNameAsString())
-059+ ", hostname=" + this.serverName 
+ ", seqNum=" + seqNum;
-060  }
-061
-062  /**
-063   * @see 
java.lang.Object#equals(java.lang.Object)
-064   */
-065  @Override
-066  public boolean equals(Object o) {
-067if (this == o) {
-068  return true;
-069}
-070if (o == null) {
-071  return false;
-072}
-073if (!(o instanceof HRegionLocation)) 
{
-074  return false;
-075}
-076return 
this.compareTo((HRegionLocation)o) == 0;
-077  }
-078
-079  /**
-080   * @see java.lang.Object#hashCode()
-081   */
-082  @Override
-083  public int hashCode() {
-084return this.serverName.hashCode();
-085  }
-086
-087  /** @return HRegionInfo */
-088  public HRegionInfo getRegionInfo(){
-089return regionInfo;
-090  }
-091
-092  public String getHostname() {
-093return 
this.serverName.getHostname();
-094  }
-095
-096  public int getPort() {
-097return this.serverName.getPort();
-098  }
-099
-100  public long getSeqNum() {
-101return seqNum;
-102  }
-103
-104  /**
-105   * @return String made of hostname and 
port formatted as
-106   * per {@link 
Addressing#createHostAndPortStr(String, int)}
-107   */
-108  public String getHostnamePort() {
-109return 
Addressing.createHostAndPortStr(this.getHostname(), this.getPort());
-110  }
-111
-112  public ServerName getServerName() {
-113return serverName;
-114  }
-115
-116  @Override
-117  public int compareTo(HRegionLocation o) 
{
-118return 
serverName.compareTo(o.getServerName());
-119  }
-120}
+021import 
org.apache.hadoop.hbase.client.ImmutableHRegionInfo;
+022import 
org.apache.hadoop.hbase.client.RegionInfo;
+023import 
org.apache.hadoop.hbase.util.Addressing;
+024import 
org.apache.yetus.audience.InterfaceAudience;
+025
+026/**
+027 * Data structure to hold RegionInfo and 
the address for the hosting
+028 * HRegionServer.  Immutable.  
Comparable, but we compare the 'location' only:
+029 * i.e. the hostname and port, and *not* 
the regioninfo.  This means two
+030 * instances are the same if they refer 
to the same 'location' (the same
+031 * hostname and port), though they may be 
carrying different regions.
+032 *
+033 * On a big cluster, each client will 
have thousands of instances of this object, often
+034 *  100 000 of them if not million. It's 
important to keep the object size as small
+035 *  as possible.
+036 *
+037 * brThis interface has been 
marked InterfaceAudience.Public in 0.96 and 0.98.
+038 */
+039@InterfaceAudience.Public
+040public class HRegionLocation implements 
ComparableHRegionLocation {
+041  private final RegionInfo regionInfo;
+042  private final ServerName serverName;
+043  private final long seqNum;
+044
+045  public HRegionLocation(RegionInfo 
regionInfo, ServerName serverName) {
+046this(regionInfo, 

[30/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index f3683e1..a9c1c27 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Checkstyle Results
 
@@ -286,10 +286,10 @@
 Warnings
 Errors
 
-2042
+2051
 0
 0
-14018
+13704
 
 Files
 
@@ -317,7 +317,7 @@
 org/apache/hadoop/hbase/AsyncMetaTableAccessor.java
 0
 0
-54
+52
 
 org/apache/hadoop/hbase/AuthUtil.java
 0
@@ -409,160 +409,160 @@
 0
 1
 
+org/apache/hadoop/hbase/Coprocessor.java
+0
+0
+1
+
 org/apache/hadoop/hbase/CoprocessorEnvironment.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/DoNotRetryIOException.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/DroppedSnapshotException.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/ExtendedCell.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/ExtendedCellBuilderImpl.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/HBaseConfiguration.java
 0
 0
 7
-
+
 org/apache/hadoop/hbase/HColumnDescriptor.java
 0
 0
 42
-
+
 org/apache/hadoop/hbase/HConstants.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/HRegionInfo.java
 0
 0
-58
-
+56
+
 org/apache/hadoop/hbase/HRegionLocation.java
 0
 0
-2
-
+1
+
 org/apache/hadoop/hbase/HTableDescriptor.java
 0
 0
 38
-
+
 org/apache/hadoop/hbase/HealthChecker.java
 0
 0
 17
-
+
 org/apache/hadoop/hbase/IndividualBytesFieldCell.java
 0
 0
 12
-
+
 org/apache/hadoop/hbase/JMXListener.java
 0
 0
-3
-
+5
+
 org/apache/hadoop/hbase/JitterScheduledThreadPoolExecutorImpl.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/KeyValue.java
 0
 0
 136
-
+
 org/apache/hadoop/hbase/KeyValueTestUtil.java
 0
 0
 10
-
+
 org/apache/hadoop/hbase/KeyValueUtil.java
 0
 0
 31
-
+
 org/apache/hadoop/hbase/LocalHBaseCluster.java
 0
 0
 24
-
+
 org/apache/hadoop/hbase/MetaMutationAnnotation.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/MetaTableAccessor.java
 0
 0
-121
-
+120
+
 org/apache/hadoop/hbase/NamespaceDescriptor.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/NoTagsByteBufferKeyValue.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/NoTagsKeyValue.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/NotAllMetaRegionsOnlineException.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/NotServingRegionException.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/RegionLoad.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/RegionLocations.java
 0
 0
-11
-
+12
+
 org/apache/hadoop/hbase/RegionStateListener.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/ScheduledChore.java
 0
 0
 6
-
-org/apache/hadoop/hbase/Server.java
-0
-0
-1
 
 org/apache/hadoop/hbase/ServerLoad.java
 0
@@ -857,7 +857,7 @@
 org/apache/hadoop/hbase/backup/util/BackupUtils.java
 0
 0
-6
+4
 
 org/apache/hadoop/hbase/backup/util/RestoreTool.java
 0
@@ -892,7 +892,7 @@
 org/apache/hadoop/hbase/client/AsyncAdmin.java
 0
 0
-22
+18
 
 org/apache/hadoop/hbase/client/AsyncAdminBuilder.java
 0
@@ -932,7 +932,7 @@
 org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
 0
 0
-4
+2
 
 org/apache/hadoop/hbase/client/AsyncMasterRequestRpcRetryingCaller.java
 0
@@ -1064,825 +1064,825 @@
 0
 3
 
-org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
-0
-0
-1
-
 org/apache/hadoop/hbase/client/ClientSimpleScanner.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/client/ClientUtil.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/client/ClusterConnection.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/client/ClusterStatusListener.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java
 0
 0
 13
-
+
 org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
 0
 0
 56
-
+
 org/apache/hadoop/hbase/client/CompactType.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/client/CompleteScanResultCache.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/client/ConnectionConfiguration.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/client/ConnectionFactory.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/client/ConnectionImplementation.java
 0
 0
-9
-
+8
+
 org/apache/hadoop/hbase/client/ConnectionUtils.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/client/CoprocessorHConnection.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/client/DelayingRunner.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/client/Delete.java
 0
 0
 9
-
+
 org/apache/hadoop/hbase/client/FlushRegionCallable.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/client/Get.java
 0
 0
 10
-
+
 org/apache/hadoop/hbase/client/HBaseAdmin.java
 0
 0
-111
-
+110
+
 org/apache/hadoop/hbase/client/HRegionLocator.java
 0
 0
-3
-
+2
+
 org/apache/hadoop/hbase/client/HTable.java
 0
 0
 64
-
+
 org/apache/hadoop/hbase/client/HTableMultiplexer.java
 0
 0
-7
-
+6
+
 org/apache/hadoop/hbase/client/HTableWrapper.java
 0
 0
 9
-
+
 org/apache/hadoop/hbase/client/ImmutableHColumnDescriptor.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/client/ImmutableHRegionInfo.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/client/ImmutableHTableDescriptor.java
 0
 0
 4
-
+
 

[09/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
index fa5efa8..1e2cb92 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class AsyncHBaseAdmin
+public class AsyncHBaseAdmin
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements AsyncAdmin
 The implementation of AsyncAdmin.
@@ -468,7 +468,7 @@ implements 
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInfo
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
 getOnlineRegions(ServerNameserverName)
 Get all the online regions on a region server.
 
@@ -509,7 +509,7 @@ implements 
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInfo
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
 getTableRegions(TableNametableName)
 Get the regions of a given table.
 
@@ -917,7 +917,7 @@ implements 
 
 LOG
-private static finalorg.apache.commons.logging.Log LOG
+private static finalorg.apache.commons.logging.Log LOG
 
 
 
@@ -926,7 +926,7 @@ implements 
 
 rawAdmin
-private finalRawAsyncHBaseAdmin rawAdmin
+private finalRawAsyncHBaseAdmin rawAdmin
 
 
 
@@ -935,7 +935,7 @@ implements 
 
 pool
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true;
 title="class or interface in java.util.concurrent">ExecutorService pool
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true;
 title="class or interface in java.util.concurrent">ExecutorService pool
 
 
 
@@ -952,7 +952,7 @@ implements 
 
 AsyncHBaseAdmin
-AsyncHBaseAdmin(RawAsyncHBaseAdminrawAdmin,
+AsyncHBaseAdmin(RawAsyncHBaseAdminrawAdmin,
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ExecutorServicepool)
 
 
@@ -970,7 +970,7 @@ implements 
 
 wrap
-privateThttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in 
java.util.concurrent">CompletableFutureTwrap(http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in 
java.util.concurrent">CompletableFutureTfuture)
+privateThttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in 
java.util.concurrent">CompletableFutureTwrap(http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in 
java.util.concurrent">CompletableFutureTfuture)
 
 
 
@@ -979,7 +979,7 @@ implements 
 
 tableExists
-publichttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">BooleantableExists(TableNametableName)
+publichttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">BooleantableExists(TableNametableName)
 
 Specified by:
 tableExistsin
 interfaceAsyncAdmin
@@ -997,7 +997,7 @@ implements 
 
 listTables

[17/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/devapidocs/org/apache/hadoop/hbase/class-use/HBaseIOException.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/HBaseIOException.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/HBaseIOException.html
index e65c773..7404231 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/HBaseIOException.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/HBaseIOException.html
@@ -510,7 +510,7 @@
 
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionPlan
 FavoredNodeLoadBalancer.balanceCluster(TableNametableName,
-  http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapServerName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInfoclusterState)
+  http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapServerName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfoclusterState)
 
 
 void
@@ -522,12 +522,12 @@
 
 
 ServerName
-FavoredNodeLoadBalancer.randomAssignment(HRegionInforegionInfo,
+FavoredNodeLoadBalancer.randomAssignment(RegionInforegionInfo,
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerNameservers)
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapServerName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInfo
-FavoredNodeLoadBalancer.roundRobinAssignment(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInforegions,
+http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapServerName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
+FavoredNodeLoadBalancer.roundRobinAssignment(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInforegions,
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerNameservers)
 
 
@@ -673,14 +673,14 @@
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionPlan
-LoadBalancer.balanceCluster(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapServerName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInfoclusterState)
+LoadBalancer.balanceCluster(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapServerName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfoclusterState)
 Perform the major balance operation
 
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionPlan
 LoadBalancer.balanceCluster(TableNametableName,
-  http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapServerName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInfoclusterState)
+  http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapServerName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfoclusterState)
 Perform the major balance operation
 
 
@@ -697,21 +697,21 @@
 
 
 ServerName
-LoadBalancer.randomAssignment(HRegionInforegionInfo,
+LoadBalancer.randomAssignment(RegionInforegionInfo,
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerNameservers)
 Get a random region server from the list
 
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapServerName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or 

[39/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/apidocs/src-html/org/apache/hadoop/hbase/client/HTableMultiplexer.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/client/HTableMultiplexer.html 
b/apidocs/src-html/org/apache/hadoop/hbase/client/HTableMultiplexer.html
index c9037ad..c4a4d8f 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/client/HTableMultiplexer.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/client/HTableMultiplexer.html
@@ -27,651 +27,650 @@
 019 */
 020package org.apache.hadoop.hbase.client;
 021
-022import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-023import 
org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
-024
-025import java.io.IOException;
-026import 
java.util.AbstractMap.SimpleEntry;
-027import java.util.ArrayList;
-028import java.util.Collections;
-029import java.util.HashMap;
-030import java.util.List;
-031import java.util.Map;
-032import 
java.util.concurrent.ConcurrentHashMap;
-033import 
java.util.concurrent.ExecutorService;
-034import java.util.concurrent.Executors;
-035import 
java.util.concurrent.LinkedBlockingQueue;
-036import 
java.util.concurrent.ScheduledExecutorService;
-037import java.util.concurrent.TimeUnit;
-038import 
java.util.concurrent.atomic.AtomicInteger;
-039import 
java.util.concurrent.atomic.AtomicLong;
-040
-041import org.apache.commons.logging.Log;
-042import 
org.apache.commons.logging.LogFactory;
-043import 
org.apache.hadoop.conf.Configuration;
-044import 
org.apache.hadoop.hbase.HBaseConfiguration;
-045import 
org.apache.hadoop.hbase.HConstants;
-046import 
org.apache.hadoop.hbase.HRegionInfo;
-047import 
org.apache.hadoop.hbase.HRegionLocation;
-048import 
org.apache.hadoop.hbase.ServerName;
-049import 
org.apache.hadoop.hbase.TableName;
-050import 
org.apache.yetus.audience.InterfaceAudience;
-051import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-052import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-053
-054/**
-055 * HTableMultiplexer provides a 
thread-safe non blocking PUT API across all the tables.
-056 * Each put will be sharded into 
different buffer queues based on its destination region server.
-057 * So each region server buffer queue 
will only have the puts which share the same destination.
-058 * And each queue will have a flush 
worker thread to flush the puts request to the region server.
-059 * If any queue is full, the 
HTableMultiplexer starts to drop the Put requests for that
-060 * particular queue.
-061 *
-062 * Also all the puts will be retried as a 
configuration number before dropping.
-063 * And the HTableMultiplexer can report 
the number of buffered requests and the number of the
-064 * failed (dropped) requests in total or 
on per region server basis.
-065 *
-066 * This class is thread safe.
-067 */
-068@InterfaceAudience.Public
-069public class HTableMultiplexer {
-070  private static final Log LOG = 
LogFactory.getLog(HTableMultiplexer.class.getName());
-071
-072  public static final String 
TABLE_MULTIPLEXER_FLUSH_PERIOD_MS =
-073  
"hbase.tablemultiplexer.flush.period.ms";
-074  public static final String 
TABLE_MULTIPLEXER_INIT_THREADS = "hbase.tablemultiplexer.init.threads";
-075  public static final String 
TABLE_MULTIPLEXER_MAX_RETRIES_IN_QUEUE =
-076  
"hbase.client.max.retries.in.queue";
-077
-078  /** The map between each region server 
to its flush worker */
-079  private final MapHRegionLocation, 
FlushWorker serverToFlushWorkerMap =
-080  new ConcurrentHashMap();
-081
-082  private final Configuration 
workerConf;
-083  private final ClusterConnection conn;
-084  private final ExecutorService pool;
-085  private final int maxAttempts;
-086  private final int 
perRegionServerBufferQueueSize;
-087  private final int maxKeyValueSize;
-088  private final ScheduledExecutorService 
executor;
-089  private final long flushPeriod;
-090
-091  /**
-092   * @param conf The HBaseConfiguration
-093   * @param 
perRegionServerBufferQueueSize determines the max number of the buffered Put 
ops for
-094   *  each region server before 
dropping the request.
-095   */
-096  public HTableMultiplexer(Configuration 
conf, int perRegionServerBufferQueueSize)
-097  throws IOException {
-098
this(ConnectionFactory.createConnection(conf), conf, 
perRegionServerBufferQueueSize);
-099  }
-100
-101  /**
-102   * @param conn The HBase connection.
-103   * @param conf The HBase 
configuration
-104   * @param 
perRegionServerBufferQueueSize determines the max number of the buffered Put 
ops for
-105   *  each region server before 
dropping the request.
-106   */
-107  public HTableMultiplexer(Connection 
conn, Configuration conf,
-108  int perRegionServerBufferQueueSize) 
{
-109this.conn = (ClusterConnection) 
conn;
-110this.pool = 
HTable.getDefaultExecutor(conf);
-111// how many times 

[49/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/apidocs/org/apache/hadoop/hbase/HRegionInfo.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/HRegionInfo.html 
b/apidocs/org/apache/hadoop/hbase/HRegionInfo.html
index 838060f..b303c7b 100644
--- a/apidocs/org/apache/hadoop/hbase/HRegionInfo.html
+++ b/apidocs/org/apache/hadoop/hbase/HRegionInfo.html
@@ -119,7 +119,7 @@ var activeTableTab = "activeTableTab";
 
 http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true;
 title="class or interface in java.lang">@Deprecated
  @InterfaceAudience.Public
-public class HRegionInfo
+public class HRegionInfo
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements RegionInfo, http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableHRegionInfo
 Information about a region. A region is a range of keys in 
the whole keyspace of a table, an
@@ -215,24 +215,24 @@ implements Constructor and Description
 
 
-HRegionInfo(HRegionInfoother)
-Deprecated.
-Costruct a copy of another HRegionInfo
-
-
-
 HRegionInfo(HRegionInfoother,
intreplicaId)
 Deprecated.
 
 
-
+
 HRegionInfo(longregionId,
TableNametableName,
intreplicaId)
 Deprecated.
 
 
+
+HRegionInfo(RegionInfoother)
+Deprecated.
+Costruct a copy of another HRegionInfo
+
+
 
 HRegionInfo(TableNametableName)
 Deprecated.
@@ -619,7 +619,7 @@ implements 
 
 ENCODED_REGION_NAME_REGEX
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String ENCODED_REGION_NAME_REGEX
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String ENCODED_REGION_NAME_REGEX
 Deprecated.
 A non-capture group so that this can be embedded.
 
@@ -634,7 +634,7 @@ implements 
 
 NO_HASH
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String NO_HASH
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String NO_HASH
 Deprecated.
 
 
@@ -644,7 +644,7 @@ implements 
 
 HIDDEN_END_KEY
-public static finalbyte[] HIDDEN_END_KEY
+public static finalbyte[] HIDDEN_END_KEY
 Deprecated.
 
 
@@ -654,7 +654,7 @@ implements 
 
 HIDDEN_START_KEY
-public static finalbyte[] HIDDEN_START_KEY
+public static finalbyte[] HIDDEN_START_KEY
 Deprecated.
 
 
@@ -664,7 +664,7 @@ implements 
 
 FIRST_META_REGIONINFO
-public static finalHRegionInfo FIRST_META_REGIONINFO
+public static finalHRegionInfo FIRST_META_REGIONINFO
 Deprecated.
 HRegionInfo for first meta region
 
@@ -683,7 +683,7 @@ implements 
 
 HRegionInfo
-publicHRegionInfo(longregionId,
+publicHRegionInfo(longregionId,
TableNametableName,
intreplicaId)
 Deprecated.
@@ -695,7 +695,7 @@ implements 
 
 HRegionInfo
-publicHRegionInfo(TableNametableName)
+publicHRegionInfo(TableNametableName)
 Deprecated.
 
 
@@ -705,7 +705,7 @@ implements 
 
 HRegionInfo
-publicHRegionInfo(TableNametableName,
+publicHRegionInfo(TableNametableName,
byte[]startKey,
byte[]endKey)
 throws http://docs.oracle.com/javase/8/docs/api/java/lang/IllegalArgumentException.html?is-external=true;
 title="class or interface in java.lang">IllegalArgumentException
@@ -727,7 +727,7 @@ implements 
 
 HRegionInfo
-publicHRegionInfo(TableNametableName,
+publicHRegionInfo(TableNametableName,
byte[]startKey,
byte[]endKey,
booleansplit)
@@ -752,7 +752,7 @@ implements 
 
 HRegionInfo
-publicHRegionInfo(TableNametableName,
+publicHRegionInfo(TableNametableName,
byte[]startKey,
byte[]endKey,
booleansplit,
@@ -779,7 +779,7 @@ implements 
 
 HRegionInfo
-publicHRegionInfo(TableNametableName,
+publicHRegionInfo(TableNametableName,
byte[]startKey,
byte[]endKey,
booleansplit,
@@ -802,13 +802,13 @@ implements 
 
 
-
+
 
 
 
 
 HRegionInfo
-publicHRegionInfo(HRegionInfoother)
+publicHRegionInfo(RegionInfoother)
 Deprecated.
 Costruct a copy of another HRegionInfo
 
@@ -823,7 +823,7 @@ implements 
 
 HRegionInfo
-publicHRegionInfo(HRegionInfoother,
+publicHRegionInfo(HRegionInfoother,
intreplicaId)
 Deprecated.
 
@@ -843,7 +843,7 @@ implements 
 encodeRegionName
 http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true;
 title="class or interface in java.lang">@Deprecated
-public 

[28/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/devapidocs/deprecated-list.html
--
diff --git a/devapidocs/deprecated-list.html b/devapidocs/deprecated-list.html
index b44c65d..e160624 100644
--- a/devapidocs/deprecated-list.html
+++ b/devapidocs/deprecated-list.html
@@ -94,15 +94,25 @@
 
 
 
+org.apache.hadoop.hbase.coprocessor.CoprocessorService
+Since 2.0. Will be removed 
in 3.0
+
+
+
 org.apache.hadoop.hbase.SettableSequenceId
 as of 2.0 and will be 
removed in 3.0. Use ExtendedCell 
instead
 
 
-
+
 org.apache.hadoop.hbase.SettableTimestamp
 as of 2.0 and will be 
removed in 3.0. Use ExtendedCell 
instead
 
 
+
+org.apache.hadoop.hbase.coprocessor.SingletonCoprocessorService
+Since 2.0. Will be removed 
in 3.0
+
+
 
 
 
@@ -119,87 +129,90 @@
 
 
 
+org.apache.hadoop.hbase.coprocessor.CoprocessorServiceBackwardCompatiblity
+
+
 org.apache.hadoop.hbase.util.Counter
 use http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/LongAdder.html?is-external=true;
 title="class or interface in 
java.util.concurrent.atomic">LongAdder instead.
 
 
-
+
 org.apache.hadoop.hbase.filter.FirstKeyValueMatchingQualifiersFilter
 Deprecated in 2.0. See 
HBASE-13347
 
 
-
+
 org.apache.hadoop.hbase.HColumnDescriptor
 
-
+
 org.apache.hadoop.hbase.HRegionInfo
 As of release 2.0.0, this 
will be removed in HBase 3.0.0.
  use RegionInfoBuilder to build RegionInfo.
 
 
-
+
 org.apache.hadoop.hbase.HTableDescriptor
 As of release 2.0.0, this 
will be removed in HBase 3.0.0.
  Use TableDescriptorBuilder to 
build HTableDescriptor.
 
 
-
+
 org.apache.hadoop.hbase.client.ImmutableHColumnDescriptor
 
-
+
 org.apache.hadoop.hbase.client.ImmutableHRegionInfo
 
-
+
 org.apache.hadoop.hbase.client.ImmutableHTableDescriptor
 
-
+
 org.apache.hadoop.hbase.KeyValue.KVComparator
 : Use CellComparator.
 
 
-
+
 org.apache.hadoop.hbase.KeyValue.MetaComparator
 : CellComparator.META_COMPARATOR
 to be used
 
 
-
+
 org.apache.hadoop.hbase.KeyValue.RawBytesComparator
 Not to be used for any 
comparsions
 
 
-
+
 org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles
 As of release 2.0.0, this 
will be removed in HBase 3.0.0. Use
  LoadIncrementalHFiles 
instead.
 
 
-
+
 org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles.LoadQueueItem
 As of release 2.0.0, this 
will be removed in HBase 3.0.0. Use
  LoadIncrementalHFiles.LoadQueueItem
 instead.
 
 
-
+
 org.apache.hadoop.hbase.client.replication.ReplicationAdmin
 use Admin instead.
 
 
-
+
 org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint
 As of release 2.0.0, this 
will be removed in HBase 3.0.0
 
 
-
+
 org.apache.hadoop.hbase.client.UnmodifyableHRegionInfo
 As of release 2.0.0, this 
will be removed in HBase 3.0.0.
 
 
-
+
 org.apache.hadoop.hbase.zookeeper.ZKLeaderManager
 Not used
 
 
-
+
 org.apache.hadoop.hbase.zookeeper.ZKUtil.NodeAndData
 Unused
 
@@ -549,172 +562,167 @@
 org.apache.hadoop.hbase.client.HBaseAdmin.closeRegionWithEncodedRegionName(String,
 String)
 
 
-org.apache.hadoop.hbase.regionserver.Store.compact(CompactionContext,
 ThroughputController)
-see 
compact(CompactionContext, ThroughputController, User)
-
-
-
 org.apache.hadoop.hbase.filter.CompareFilter.compareFamily(CompareFilter.CompareOp,
 ByteArrayComparable, Cell)
 Since 2.0.0. Will be 
removed in 3.0.0.
  Use CompareFilter.compareFamily(CompareOperator,
 ByteArrayComparable, Cell)
 
 
-
+
 org.apache.hadoop.hbase.KeyValue.RawBytesComparator.compareFlatKey(byte[],
 int, int, byte[], int, int)
 Since 0.99.2.
 
 
-
+
 org.apache.hadoop.hbase.filter.CompareFilter.compareQualifier(CompareFilter.CompareOp,
 ByteArrayComparable, Cell)
 Since 2.0.0. Will be 
removed in 3.0.0.
  Use CompareFilter.compareQualifier(CompareOperator,
 ByteArrayComparable, Cell)
 
 
-
+
 org.apache.hadoop.hbase.filter.CompareFilter.compareRow(CompareFilter.CompareOp,
 ByteArrayComparable, Cell)
 Since 2.0.0. Will be 
removed in 3.0.0.
  Use CompareFilter.compareRow(CompareOperator,
 ByteArrayComparable, Cell)
 
 
-
+
 org.apache.hadoop.hbase.filter.CompareFilter.compareValue(CompareFilter.CompareOp,
 ByteArrayComparable, Cell)
 Since 2.0.0. Will be 
removed in 3.0.0.
  Use CompareFilter.compareValue(CompareOperator,
 ByteArrayComparable, Cell)
 
 
-
+
 org.apache.hadoop.hbase.HRegionInfo.convert(HBaseProtos.RegionInfo)
 As of release 2.0.0, this 
will be removed in HBase 3.0.0
  Use toRegionInfo(HBaseProtos.RegionInfo)
  in 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.
 
 
-
+
 org.apache.hadoop.hbase.HRegionInfo.convert(HRegionInfo)
 As of release 2.0.0, this 
will be removed in HBase 3.0.0
- Use toProtoRegionInfo(org.apache.hadoop.hbase.client.RegionInfo)
+ Use toRegionInfo(org.apache.hadoop.hbase.client.RegionInfo)
  in 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.
 
 
-

[05/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.html 
b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.html
index bbf26c4..9a7404b 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.html
@@ -123,7 +123,7 @@ var activeTableTab = "activeTableTab";
 
 @InterfaceAudience.Private
  @InterfaceStability.Evolving
-protected static class HBaseAdmin.ProcedureFutureV
+protected static class HBaseAdmin.ProcedureFutureV
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Future.html?is-external=true;
 title="class or interface in java.util.concurrent">FutureV
 Future that waits on a procedure result.
@@ -328,7 +328,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/concurren
 
 
 exception
-privatehttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutionException.html?is-external=true;
 title="class or interface in java.util.concurrent">ExecutionException exception
+privatehttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutionException.html?is-external=true;
 title="class or interface in java.util.concurrent">ExecutionException exception
 
 
 
@@ -337,7 +337,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/concurren
 
 
 procResultFound
-privateboolean procResultFound
+privateboolean procResultFound
 
 
 
@@ -346,7 +346,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/concurren
 
 
 done
-privateboolean done
+privateboolean done
 
 
 
@@ -355,7 +355,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/concurren
 
 
 cancelled
-privateboolean cancelled
+privateboolean cancelled
 
 
 
@@ -364,7 +364,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/concurren
 
 
 result
-privateV result
+privateV result
 
 
 
@@ -373,7 +373,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/concurren
 
 
 admin
-private finalHBaseAdmin admin
+private finalHBaseAdmin admin
 
 
 
@@ -382,7 +382,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/concurren
 
 
 procId
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">Long procId
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">Long procId
 
 
 
@@ -399,7 +399,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/concurren
 
 
 ProcedureFuture
-publicProcedureFuture(HBaseAdminadmin,
+publicProcedureFuture(HBaseAdminadmin,
http://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">LongprocId)
 
 
@@ -417,7 +417,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/concurren
 
 
 cancel
-publicbooleancancel(booleanmayInterruptIfRunning)
+publicbooleancancel(booleanmayInterruptIfRunning)
 
 Specified by:
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Future.html?is-external=true#cancel-boolean-;
 title="class or interface in java.util.concurrent">cancelin 
interfacehttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Future.html?is-external=true;
 title="class or interface in java.util.concurrent">FutureV
@@ -430,7 +430,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/concurren
 
 
 isCancelled
-publicbooleanisCancelled()
+publicbooleanisCancelled()
 
 Specified by:
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Future.html?is-external=true#isCancelled--;
 title="class or interface in 
java.util.concurrent">isCancelledin interfacehttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Future.html?is-external=true;
 title="class or interface in java.util.concurrent">FutureV
@@ -443,7 +443,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/concurren
 
 
 abortProcedureResult
-protectedorg.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponseabortProcedureResult(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequestrequest)
+protectedorg.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponseabortProcedureResult(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequestrequest)

   throws 

[35/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/apidocs/src-html/org/apache/hadoop/hbase/client/RegionInfo.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/RegionInfo.html 
b/apidocs/src-html/org/apache/hadoop/hbase/client/RegionInfo.html
index cdbdf4d..07af8e5 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/client/RegionInfo.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/client/RegionInfo.html
@@ -26,737 +26,738 @@
 018 */
 019package org.apache.hadoop.hbase.client;
 020
-021import 
org.apache.hadoop.hbase.HConstants;
-022import 
org.apache.hadoop.hbase.TableName;
-023import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-024import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-025import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-026import 
org.apache.hadoop.hbase.util.Bytes;
-027import 
org.apache.hadoop.hbase.util.MD5Hash;
-028import 
org.apache.hadoop.io.DataInputBuffer;
-029import 
org.apache.hadoop.util.StringUtils;
-030import 
org.apache.yetus.audience.InterfaceAudience;
-031
-032import java.io.DataInputStream;
-033import java.io.IOException;
-034import java.util.ArrayList;
-035import java.util.Arrays;
-036import java.util.Comparator;
-037import java.util.List;
-038import java.util.stream.Collectors;
-039import 
org.apache.hadoop.hbase.util.ByteArrayHashKey;
-040import 
org.apache.hadoop.hbase.util.HashKey;
-041import 
org.apache.hadoop.hbase.util.JenkinsHash;
-042
-043/**
-044 * Information about a region. A region 
is a range of keys in the whole keyspace
-045 * of a table, an identifier (a 
timestamp) for differentiating between subset
-046 * ranges (after region split) and a 
replicaId for differentiating the instance
-047 * for the same range and some status 
information about the region.
-048 *
-049 * The region has a unique name which 
consists of the following fields:
-050 * ul
-051 * li tableName   : The name of 
the table /li
-052 * li startKey: The startKey 
for the region. /li
-053 * li regionId: A timestamp 
when the region is created. /li
-054 * li replicaId   : An id 
starting from 0 to differentiate replicas of the
-055 * same region range but hosted in 
separated servers. The same region range can
-056 * be hosted in multiple 
locations./li
-057 * li encodedName : An MD5 
encoded string for the region name./li
-058 * /ul
-059 *
-060 * br Other than the fields in 
the region name, region info contains:
-061 * ul
-062 * li endKey  : the endKey 
for the region (exclusive) /li
-063 * li split   : Whether the 
region is split /li
-064 * li offline : Whether the 
region is offline /li
-065 * /ul
-066 *
-067 */
-068@InterfaceAudience.Public
-069public interface RegionInfo {
-070  /**
-071   * Separator used to demarcate the 
encodedName in a region name
-072   * in the new format. See description 
on new format above.
-073   */
-074  @InterfaceAudience.Private
-075  int ENC_SEPARATOR = '.';
-076
-077  @InterfaceAudience.Private
-078  int MD5_HEX_LENGTH = 32;
-079
-080  @InterfaceAudience.Private
-081  int DEFAULT_REPLICA_ID = 0;
-082
-083  /**
-084   * to keep appended int's sorted in 
string format. Only allows 2 bytes
-085   * to be sorted for replicaId.
-086   */
-087  @InterfaceAudience.Private
-088  String REPLICA_ID_FORMAT = "%04X";
-089
-090  @InterfaceAudience.Private
-091  byte REPLICA_ID_DELIMITER = 
(byte)'_';
-092
-093  @InterfaceAudience.Private
-094  String 
INVALID_REGION_NAME_FORMAT_MESSAGE = "Invalid regionName format";
-095
-096  @InterfaceAudience.Private
-097  ComparatorRegionInfo 
COMPARATOR
-098= (RegionInfo lhs, RegionInfo rhs) 
- {
-099  if (rhs == null) {
-100return 1;
-101  }
-102
-103  // Are regions of same table?
-104  int result = 
lhs.getTable().compareTo(rhs.getTable());
-105  if (result != 0) {
-106return result;
-107  }
-108
-109  // Compare start keys.
-110  result = 
Bytes.compareTo(lhs.getStartKey(), rhs.getStartKey());
-111  if (result != 0) {
-112return result;
-113  }
-114
-115  // Compare end keys.
-116  result = 
Bytes.compareTo(lhs.getEndKey(), rhs.getEndKey());
-117
-118  if (result != 0) {
-119if (lhs.getStartKey().length != 
0
-120 
lhs.getEndKey().length == 0) {
-121return 1; // this is last 
region
-122}
-123if (rhs.getStartKey().length != 
0
-124 
rhs.getEndKey().length == 0) {
-125return -1; // o is the last 
region
-126}
-127return result;
-128  }
-129
-130  // regionId is usually milli 
timestamp -- this defines older stamps
-131  // to be "smaller" than newer 
stamps in sort order.
-132  if (lhs.getRegionId()  
rhs.getRegionId()) {
-133return 1;
-134  } else if (lhs.getRegionId()  
rhs.getRegionId()) {
-135return -1;
-136  }
-137
-138  int 

[45/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/apidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html 
b/apidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
index dd195a6..120b8c4 100644
--- a/apidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
+++ b/apidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
@@ -119,6 +119,42 @@
 
 
 
+
+Methods in org.apache.hadoop.hbase
 that return RegionInfo
+
+Modifier and Type
+Method and Description
+
+
+
+RegionInfo
+HRegionLocation.getRegion()
+
+
+
+
+Constructors in org.apache.hadoop.hbase
 with parameters of type RegionInfo
+
+Constructor and Description
+
+
+
+HRegionInfo(RegionInfoother)
+Deprecated.
+Costruct a copy of another HRegionInfo
+
+
+
+HRegionLocation(RegionInforegionInfo,
+   ServerNameserverName)
+
+
+HRegionLocation(RegionInforegionInfo,
+   ServerNameserverName,
+   longseqNum)
+
+
+
 
 
 
@@ -180,17 +216,29 @@
 
 
 
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
+AsyncAdmin.getOnlineRegions(ServerNameserverName)
+Get all the online regions on a region server.
+
+
+
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
 Admin.getRegions(ServerNameserverName)
 Get all the online regions on a region server.
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
 Admin.getRegions(TableNametableName)
 Get the regions of a given table.
 
 
+
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
+AsyncAdmin.getTableRegions(TableNametableName)
+Get the regions of a given table.
+
+
 
 static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
 RegionInfo.parseDelimitedFrom(byte[]bytes,
@@ -220,6 +268,16 @@
 RegionInfo.getShortNameToLog(RegionInfo...hris)
 
 
+void
+RawAsyncTable.CoprocessorCallback.onRegionComplete(RegionInforegion,
+Rresp)
+
+
+void
+RawAsyncTable.CoprocessorCallback.onRegionError(RegionInforegion,
+ http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in 
java.lang">Throwableerror)
+
+
 static byte[]
 RegionInfo.toByteArray(RegionInfori)
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/apidocs/org/apache/hadoop/hbase/coprocessor/package-summary.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/coprocessor/package-summary.html 
b/apidocs/org/apache/hadoop/hbase/coprocessor/package-summary.html
index e1476a0..9289018 100644
--- a/apidocs/org/apache/hadoop/hbase/coprocessor/package-summary.html
+++ b/apidocs/org/apache/hadoop/hbase/coprocessor/package-summary.html
@@ -268,9 +268,8 @@ To implement an Endpoint, you need to:
  https://developers.google.com/protocol-buffers/docs/proto#services;>protocol
 buffer guide
  for more details on defining services.
  Generate the Service and Message code using the protoc compiler
- Implement the generated Service interface in your coprocessor class and 
implement the
- CoprocessorService interface.  The 
CoprocessorService.getService()
- method should return a reference to the Endpoint's protocol buffer Service 
instance.
+ Implement the generated Service interface and override get*Service() 
method in
+ relevant Coprocessor to return a reference to the Endpoint's protocol buffer 
Service instance.
 
 
 For a more detailed discussion of how to implement a coprocessor Endpoint, 
along with some sample

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/apidocs/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.html 
b/apidocs/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.html
index 8c50121..b783b22 100644
--- a/apidocs/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.html
+++ b/apidocs/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.html
@@ -124,7 +124,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public class 

[38/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/apidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTable.CoprocessorCallable.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTable.CoprocessorCallable.html
 
b/apidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTable.CoprocessorCallable.html
index 523d98a..22eba3b 100644
--- 
a/apidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTable.CoprocessorCallable.html
+++ 
b/apidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTable.CoprocessorCallable.html
@@ -25,236 +25,235 @@
 017 */
 018package org.apache.hadoop.hbase.client;
 019
-020import com.google.protobuf.RpcCallback;
-021import com.google.protobuf.RpcChannel;
-022import 
com.google.protobuf.RpcController;
-023
-024import 
java.util.concurrent.CompletableFuture;
-025import java.util.function.Function;
-026
-027import 
org.apache.hadoop.hbase.HRegionInfo;
-028import 
org.apache.yetus.audience.InterfaceAudience;
-029
-030/**
-031 * A low level asynchronous table.
-032 * p
-033 * The implementation is required to be 
thread safe.
-034 * p
-035 * The returned {@code CompletableFuture} 
will be finished directly in the rpc framework's callback
-036 * thread, so typically you should not do 
any time consuming work inside these methods, otherwise
-037 * you will be likely to block at least 
one connection to RS(even more if the rpc framework uses
-038 * NIO).
-039 * p
-040 * So, only experts that want to build 
high performance service should use this interface directly,
-041 * especially for the {@link #scan(Scan, 
RawScanResultConsumer)} below.
-042 * p
-043 * TODO: For now the only difference 
between this interface and {@link AsyncTable} is the scan
-044 * method. The {@link 
RawScanResultConsumer} exposes the implementation details of a 
scan(heartbeat)
-045 * so it is not suitable for a normal 
user. If it is still the only difference after we implement
-046 * most features of AsyncTable, we can 
think about merge these two interfaces.
-047 * @since 2.0.0
-048 */
-049@InterfaceAudience.Public
-050public interface RawAsyncTable extends 
AsyncTableBase {
-051
-052  /**
-053   * The basic scan API uses the observer 
pattern. All results that match the given scan object will
-054   * be passed to the given {@code 
consumer} by calling {@code RawScanResultConsumer.onNext}.
-055   * {@code 
RawScanResultConsumer.onComplete} means the scan is finished, and
-056   * {@code 
RawScanResultConsumer.onError} means we hit an unrecoverable error and the scan 
is
-057   * terminated. {@code 
RawScanResultConsumer.onHeartbeat} means the RS is still working but we can
-058   * not get a valid result to call 
{@code RawScanResultConsumer.onNext}. This is usually because
-059   * the matched results are too sparse, 
for example, a filter which almost filters out everything
-060   * is specified.
-061   * p
-062   * Notice that, the methods of the 
given {@code consumer} will be called directly in the rpc
-063   * framework's callback thread, so 
typically you should not do any time consuming work inside
-064   * these methods, otherwise you will be 
likely to block at least one connection to RS(even more if
-065   * the rpc framework uses NIO).
-066   * @param scan A configured {@link 
Scan} object.
-067   * @param consumer the consumer used to 
receive results.
-068   */
-069  void scan(Scan scan, 
RawScanResultConsumer consumer);
-070
-071  /**
-072   * Delegate to a protobuf rpc call.
-073   * p
-074   * Usually, it is just a simple lambda 
expression, like:
-075   *
-076   * pre
-077   * code
-078   * (stub, controller, rpcCallback) 
- {
-079   *   XXXRequest request = ...; // 
prepare the request
-080   *   stub.xxx(controller, request, 
rpcCallback);
-081   * }
-082   * /code
-083   * /pre
-084   *
-085   * And if you can prepare the {@code 
request} before calling the coprocessorService method, the
-086   * lambda expression will be:
-087   *
-088   * pre
-089   * code
-090   * (stub, controller, rpcCallback) 
- stub.xxx(controller, request, rpcCallback)
-091   * /code
-092   * /pre
-093   */
-094  @InterfaceAudience.Public
-095  @FunctionalInterface
-096  interface CoprocessorCallableS, 
R {
-097
-098/**
-099 * Represent the actual protobuf rpc 
call.
-100 * @param stub the asynchronous 
stub
-101 * @param controller the rpc 
controller, has already been prepared for you
-102 * @param rpcCallback the rpc 
callback, has already been prepared for you
-103 */
-104void call(S stub, RpcController 
controller, RpcCallbackR rpcCallback);
-105  }
-106
-107  /**
-108   * Execute the given coprocessor call 
on the region which contains the given {@code row}.
-109   * p
-110   * The {@code stubMaker} is just a 
delegation to the {@code newStub} call. Usually it is only a
-111   * one line lambda expression, like:
-112   *
-113   * pre
-114   * code
-115   * channel - 

[18/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/devapidocs/org/apache/hadoop/hbase/class-use/CoordinatedStateException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/class-use/CoordinatedStateException.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/CoordinatedStateException.html
index a9f6318..42ccf90 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/class-use/CoordinatedStateException.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/class-use/CoordinatedStateException.html
@@ -135,7 +135,7 @@
 
 protected static void
 ProcedureSyncWait.waitRegionInTransition(MasterProcedureEnvenv,
-  http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInforegions)
+  http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInforegions)
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/devapidocs/org/apache/hadoop/hbase/class-use/Coprocessor.State.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/class-use/Coprocessor.State.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/Coprocessor.State.html
index 0ab5251..3130a7e 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/Coprocessor.State.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/Coprocessor.State.html
@@ -137,7 +137,7 @@ the order they are declared.
 
 
 (package private) Coprocessor.State
-CoprocessorHost.Environment.state
+BaseEnvironment.state
 Current coprocessor state
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/devapidocs/org/apache/hadoop/hbase/class-use/Coprocessor.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/Coprocessor.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/Coprocessor.html
index c689ae7..31ec3cc 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/Coprocessor.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/Coprocessor.html
@@ -107,42 +107,30 @@
 
 
 
-org.apache.hadoop.hbase.master
-
-
-
 org.apache.hadoop.hbase.quotas
 
 
-
-org.apache.hadoop.hbase.regionserver
-
-
 
-org.apache.hadoop.hbase.regionserver.wal
-
-
-
 org.apache.hadoop.hbase.replication.regionserver
 
 
-
+
 org.apache.hadoop.hbase.rsgroup
 
 
-
+
 org.apache.hadoop.hbase.security.access
 
 
-
+
 org.apache.hadoop.hbase.security.token
 
 
-
+
 org.apache.hadoop.hbase.security.visibility
 
 
-
+
 org.apache.hadoop.hbase.tool
 
 
@@ -156,6 +144,21 @@
 
 Uses of Coprocessor in org.apache.hadoop.hbase
 
+Classes in org.apache.hadoop.hbase
 with type parameters of type Coprocessor
+
+Modifier and Type
+Interface and Description
+
+
+
+interface
+CoprocessorEnvironmentC 
extends Coprocessor
+Coprocessor environment state.
+
+
+
+
+
 Classes in org.apache.hadoop.hbase
 that implement Coprocessor
 
 Modifier and Type
@@ -174,19 +177,6 @@
 
 
 
-
-Methods in org.apache.hadoop.hbase
 that return Coprocessor
-
-Modifier and Type
-Method and Description
-
-
-
-Coprocessor
-CoprocessorEnvironment.getInstance()
-
-
-
 
 
 
@@ -232,51 +222,51 @@
 
 
 Uses of Coprocessor in org.apache.hadoop.hbase.coprocessor
-
-Subinterfaces of Coprocessor in org.apache.hadoop.hbase.coprocessor
+
+Classes in org.apache.hadoop.hbase.coprocessor
 with type parameters of type Coprocessor
 
 Modifier and Type
-Interface and Description
+Class and Description
 
 
 
-interface
-BulkLoadObserver
-Coprocessors implement this interface to observe and 
mediate bulk load operations.
+class
+BaseEnvironmentC 
extends Coprocessor
+Encapsulation of the environment of each coprocessor
 
 
 
-interface
-EndpointObserver
-Coprocessors implement this interface to observe and 
mediate endpoint invocations
- on a region.
+class
+CoprocessorHostC 
extends Coprocessor,E extends CoprocessorEnvironmentC
+Provides the common setup framework and runtime services 
for coprocessor
+ invocation from HBase services.
 
 
+
+
+
+Subinterfaces of Coprocessor in org.apache.hadoop.hbase.coprocessor
+
+Modifier and Type
+Interface and Description
+
+
 
 interface
-MasterObserver
-Defines coprocessor hooks for interacting with operations 
on the
- HMaster 
process.
-
+MasterCoprocessor
 
 
 interface
-RegionObserver
-Coprocessors implement this interface to observe and 
mediate client actions on the region.
-
+RegionCoprocessor
 
 
 interface
-RegionServerObserver
-Defines coprocessor hooks for interacting with operations 
on the
- HRegionServer 
process.
-
+RegionServerCoprocessor
 
 
 interface
-WALObserver
-It's provided to have a way for coprocessors to observe, 
rewrite,
- or skip WALEdits as they are being written to the WAL.
+WALCoprocessor
+WALCoprocessor don't support loading services using getService().
 
 
 
@@ 

[08/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/devapidocs/org/apache/hadoop/hbase/client/ClientSideRegionScanner.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/ClientSideRegionScanner.html 
b/devapidocs/org/apache/hadoop/hbase/client/ClientSideRegionScanner.html
index 4daf7c7..fe2320d 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/ClientSideRegionScanner.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/ClientSideRegionScanner.html
@@ -119,7 +119,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class ClientSideRegionScanner
+public class ClientSideRegionScanner
 extends AbstractClientScanner
 A client scanner for a region opened for read-only on the 
client side. Assumes region data
  is not changing.
@@ -179,11 +179,11 @@ extends Constructor and Description
 
 
-ClientSideRegionScanner(org.apache.hadoop.conf.Configurationconf,
+ClientSideRegionScanner(org.apache.hadoop.conf.Configurationconf,
org.apache.hadoop.fs.FileSystemfs,
org.apache.hadoop.fs.PathrootDir,
TableDescriptorhtd,
-   HRegionInfohri,
+   RegionInfohri,
Scanscan,
ScanMetricsscanMetrics)
 
@@ -269,7 +269,7 @@ extends 
 
 LOG
-private static finalorg.apache.commons.logging.Log LOG
+private static finalorg.apache.commons.logging.Log LOG
 
 
 
@@ -278,7 +278,7 @@ extends 
 
 region
-privateHRegion region
+privateHRegion region
 
 
 
@@ -287,7 +287,7 @@ extends 
 
 scanner
-RegionScanner scanner
+RegionScanner scanner
 
 
 
@@ -296,7 +296,7 @@ extends 
 
 values
-http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListCell values
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListCell values
 
 
 
@@ -307,17 +307,17 @@ extends 
+
 
 
 
 
 ClientSideRegionScanner
-publicClientSideRegionScanner(org.apache.hadoop.conf.Configurationconf,
+publicClientSideRegionScanner(org.apache.hadoop.conf.Configurationconf,
org.apache.hadoop.fs.FileSystemfs,
org.apache.hadoop.fs.PathrootDir,
TableDescriptorhtd,
-   HRegionInfohri,
+   RegionInfohri,
Scanscan,
ScanMetricsscanMetrics)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -341,7 +341,7 @@ extends 
 
 next
-publicResultnext()
+publicResultnext()
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Description copied from 
interface:ResultScanner
 Grab the next row's worth of values. The scanner will 
return a Result.
@@ -359,7 +359,7 @@ extends 
 
 close
-publicvoidclose()
+publicvoidclose()
 Description copied from 
interface:ResultScanner
 Closes the scanner and releases any resources it has 
allocated
 
@@ -370,7 +370,7 @@ extends 
 
 renewLease
-publicbooleanrenewLease()
+publicbooleanrenewLease()
 Description copied from 
interface:ResultScanner
 Allow the client to renew the scanner's lease on the 
server.
 



[47/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/apidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html 
b/apidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
index 6493b4c..159a8a7 100644
--- a/apidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
+++ b/apidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
@@ -102,7 +102,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public interface AsyncAdmin
+public interface AsyncAdmin
 The asynchronous administrative API for HBase.
  
  This feature is still under development, so marked as IA.Private. Will change 
to public when
@@ -454,7 +454,7 @@ public interface 
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInfo
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
 getOnlineRegions(ServerNameserverName)
 Get all the online regions on a region server.
 
@@ -505,7 +505,7 @@ public interface 
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInfo
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
 getTableRegions(TableNametableName)
 Get the regions of a given table.
 
@@ -956,7 +956,7 @@ public interface 
 
 tableExists
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">BooleantableExists(TableNametableName)
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">BooleantableExists(TableNametableName)
 
 Parameters:
 tableName - Table to check.
@@ -972,7 +972,7 @@ public interface 
 
 listTables
-defaulthttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableDescriptorlistTables()
+defaulthttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableDescriptorlistTables()
 List all the userspace tables.
 
 Returns:
@@ -988,7 +988,7 @@ public interface 
 
 listTables
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableDescriptorlistTables(http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">Optionalhttp://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in java.util.regex">Patternpattern,
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableDescriptorlistTables(http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">Optionalhttp://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in 

[22/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/devapidocs/org/apache/hadoop/hbase/MetaTableAccessor.QueryType.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/MetaTableAccessor.QueryType.html 
b/devapidocs/org/apache/hadoop/hbase/MetaTableAccessor.QueryType.html
index 8a583cd..a046492 100644
--- a/devapidocs/org/apache/hadoop/hbase/MetaTableAccessor.QueryType.html
+++ b/devapidocs/org/apache/hadoop/hbase/MetaTableAccessor.QueryType.html
@@ -123,7 +123,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public static enum MetaTableAccessor.QueryType
+public static enum MetaTableAccessor.QueryType
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumMetaTableAccessor.QueryType
 
 
@@ -237,7 +237,7 @@ the order they are declared.
 
 
 ALL
-public static finalMetaTableAccessor.QueryType ALL
+public static finalMetaTableAccessor.QueryType ALL
 
 
 
@@ -246,7 +246,7 @@ the order they are declared.
 
 
 REGION
-public static finalMetaTableAccessor.QueryType REGION
+public static finalMetaTableAccessor.QueryType REGION
 
 
 
@@ -255,7 +255,7 @@ the order they are declared.
 
 
 TABLE
-public static finalMetaTableAccessor.QueryType TABLE
+public static finalMetaTableAccessor.QueryType TABLE
 
 
 
@@ -272,7 +272,7 @@ the order they are declared.
 
 
 families
-private finalbyte[][] families
+private finalbyte[][] families
 
 
 
@@ -331,7 +331,7 @@ not permitted.)
 
 
 getFamilies
-byte[][]getFamilies()
+byte[][]getFamilies()
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/devapidocs/org/apache/hadoop/hbase/MetaTableAccessor.TableVisitorBase.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/MetaTableAccessor.TableVisitorBase.html 
b/devapidocs/org/apache/hadoop/hbase/MetaTableAccessor.TableVisitorBase.html
index 2469199..2e3b74c 100644
--- a/devapidocs/org/apache/hadoop/hbase/MetaTableAccessor.TableVisitorBase.html
+++ b/devapidocs/org/apache/hadoop/hbase/MetaTableAccessor.TableVisitorBase.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public abstract static class MetaTableAccessor.TableVisitorBase
+public abstract static class MetaTableAccessor.TableVisitorBase
 extends MetaTableAccessor.DefaultVisitorBase
 A Visitor for a table. Provides a consistent view of the 
table's
  hbase:meta entries during concurrent splits (see HBASE-5986 for details). 
This class
@@ -223,7 +223,7 @@ extends 
 
 tableName
-privateTableName tableName
+privateTableName tableName
 
 
 
@@ -240,7 +240,7 @@ extends 
 
 TableVisitorBase
-publicTableVisitorBase(TableNametableName)
+publicTableVisitorBase(TableNametableName)
 
 
 
@@ -257,7 +257,7 @@ extends 
 
 visit
-public finalbooleanvisit(ResultrowResult)
+public finalbooleanvisit(ResultrowResult)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Description copied from 
interface:MetaTableAccessor.Visitor
 Visit the catalog table row.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/devapidocs/org/apache/hadoop/hbase/MetaTableAccessor.Visitor.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/MetaTableAccessor.Visitor.html 
b/devapidocs/org/apache/hadoop/hbase/MetaTableAccessor.Visitor.html
index d4196ed..2a1dade 100644
--- a/devapidocs/org/apache/hadoop/hbase/MetaTableAccessor.Visitor.html
+++ b/devapidocs/org/apache/hadoop/hbase/MetaTableAccessor.Visitor.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static interface MetaTableAccessor.Visitor
+public static interface MetaTableAccessor.Visitor
 Implementations 'visit' a catalog table row.
 
 
@@ -160,7 +160,7 @@ var activeTableTab = "activeTableTab";
 
 
 visit
-booleanvisit(Resultr)
+booleanvisit(Resultr)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Visit the catalog table row.
 



[12/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
index c0c201e..85a8662 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
@@ -475,7 +475,7 @@ service.
 
 
 
-static http://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true;
 title="class or interface in java.util">NavigableMapHRegionInfo,ServerName
+static http://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true;
 title="class or interface in java.util">NavigableMapRegionInfo,ServerName
 MetaTableAccessor.allTableRegions(Connectionconnection,
TableNametableName)
 Deprecated.
@@ -563,7 +563,7 @@ service.
 TableDescriptors.get(TableNametableName)
 
 
-static HRegionInfo
+static RegionInfo
 MetaTableAccessor.getClosestRegionInfo(Connectionconnection,
 TableNametableName,
 byte[]row)
@@ -603,14 +603,14 @@ service.
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ExecutorServiceservice)
 
 
-static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInfo
+static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
 MetaTableAccessor.getTableRegions(Connectionconnection,
TableNametableName)
 Gets all of the regions of the specified table.
 
 
 
-static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHRegionInfo
+static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
 MetaTableAccessor.getTableRegions(Connectionconnection,
TableNametableName,
booleanexcludeOfflinedSplitParents)
@@ -618,14 +618,14 @@ service.
 
 
 
-static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairHRegionInfo,ServerName
+static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairRegionInfo,ServerName
 MetaTableAccessor.getTableRegionsAndLocations(Connectionconnection,
TableNametableName)
 Do not use this method to get meta table regions, use 
methods in MetaTableLocator instead.
 
 
 
-static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairHRegionInfo,ServerName
+static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairRegionInfo,ServerName
 MetaTableAccessor.getTableRegionsAndLocations(Connectionconnection,
TableNametableName,
booleanexcludeOfflinedSplitParents)
@@ -656,7 +656,7 @@ service.
 
 
 (package private) static boolean
-MetaTableAccessor.isInsideTable(HRegionInfocurrent,
+MetaTableAccessor.isInsideTable(RegionInfocurrent,
  TableNametableName)
 
 
@@ -729,7 +729,7 @@ service.
 
 
 
-private static http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairHRegionInfo,ServerName
+private static http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairRegionInfo,ServerName
 AsyncMetaTableAccessor.getTableRegionsAndLocations(RawAsyncTablemetaTable,
http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalTableNametableName,
booleanexcludeOfflinedSplitParents)
@@ -2071,81 +2071,85 @@ service.
 
 
 private TableName
-RegionInfoBuilder.MutableRegionInfo.tableName
+RegionInfoBuilder.tableName
 
 
 private TableName
-RawAsyncTableImpl.tableName
+RegionInfoBuilder.MutableRegionInfo.tableName
 
 
 private TableName
-RegionCoprocessorRpcChannelImpl.tableName
+RawAsyncTableImpl.tableName
 
 
 private TableName

[24/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/devapidocs/org/apache/hadoop/hbase/HRegionInfo.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/HRegionInfo.html 
b/devapidocs/org/apache/hadoop/hbase/HRegionInfo.html
index 4e0bce6..be84e9a 100644
--- a/devapidocs/org/apache/hadoop/hbase/HRegionInfo.html
+++ b/devapidocs/org/apache/hadoop/hbase/HRegionInfo.html
@@ -123,7 +123,7 @@ var activeTableTab = "activeTableTab";
 
 http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true;
 title="class or interface in java.lang">@Deprecated
  @InterfaceAudience.Public
-public class HRegionInfo
+public class HRegionInfo
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements RegionInfo, http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableHRegionInfo
 Information about a region. A region is a range of keys in 
the whole keyspace of a table, an
@@ -312,19 +312,12 @@ implements 
 
 
-HRegionInfo(HRegionInfoother)
-Deprecated.
-Costruct a copy of another HRegionInfo
-
-
-
-
 HRegionInfo(HRegionInfoother,
intreplicaId)
 Deprecated.
 
 
-
+
 private 
 HRegionInfo(longregionId,
TableNametableName)
@@ -333,7 +326,7 @@ implements 
 
 
-
+
 
 HRegionInfo(longregionId,
TableNametableName,
@@ -341,6 +334,13 @@ implements Deprecated.
 
 
+
+
+HRegionInfo(RegionInfoother)
+Deprecated.
+Costruct a copy of another HRegionInfo
+
+
 
 
 HRegionInfo(TableNametableName)
@@ -458,7 +458,7 @@ implements convert(HRegionInfoinfo)
 Deprecated.
 As of release 2.0.0, this 
will be removed in HBase 3.0.0
- Use toProtoRegionInfo(org.apache.hadoop.hbase.client.RegionInfo)
+ Use toRegionInfo(org.apache.hadoop.hbase.client.RegionInfo)
  in 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.
 
 
@@ -918,7 +918,7 @@ implements 
 
 LOG
-private static finalorg.apache.commons.logging.Log LOG
+private static finalorg.apache.commons.logging.Log LOG
 Deprecated.
 
 
@@ -928,7 +928,7 @@ implements 
 
 ENCODED_REGION_NAME_REGEX
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String ENCODED_REGION_NAME_REGEX
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String ENCODED_REGION_NAME_REGEX
 Deprecated.
 A non-capture group so that this can be embedded.
 
@@ -943,7 +943,7 @@ implements 
 
 MAX_REPLICA_ID
-private static finalint MAX_REPLICA_ID
+private static finalint MAX_REPLICA_ID
 Deprecated.
 
 See Also:
@@ -957,7 +957,7 @@ implements 
 
 endKey
-privatebyte[] endKey
+privatebyte[] endKey
 Deprecated.
 
 
@@ -967,7 +967,7 @@ implements 
 
 offLine
-privateboolean offLine
+privateboolean offLine
 Deprecated.
 
 
@@ -977,7 +977,7 @@ implements 
 
 regionId
-privatelong regionId
+privatelong regionId
 Deprecated.
 
 
@@ -987,7 +987,7 @@ implements 
 
 regionName
-private transientbyte[] regionName
+private transientbyte[] regionName
 Deprecated.
 
 
@@ -997,7 +997,7 @@ implements 
 
 split
-privateboolean split
+privateboolean split
 Deprecated.
 
 
@@ -1007,7 +1007,7 @@ implements 
 
 startKey
-privatebyte[] startKey
+privatebyte[] startKey
 Deprecated.
 
 
@@ -1017,7 +1017,7 @@ implements 
 
 hashCode
-privateint hashCode
+privateint hashCode
 Deprecated.
 
 
@@ -1027,7 +1027,7 @@ implements 
 
 NO_HASH
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String NO_HASH
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String NO_HASH
 Deprecated.
 
 
@@ -1037,7 +1037,7 @@ implements 
 
 encodedName
-privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String encodedName
+privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String encodedName
 Deprecated.
 
 
@@ -1047,7 +1047,7 @@ implements 
 
 encodedNameAsBytes
-privatebyte[] encodedNameAsBytes
+privatebyte[] encodedNameAsBytes
 Deprecated.
 
 
@@ -1057,7 +1057,7 @@ implements 
 
 replicaId
-privateint replicaId
+privateint replicaId
 Deprecated.
 
 
@@ -1067,7 +1067,7 @@ implements 
 
 tableName
-privateTableName tableName
+privateTableName tableName
 Deprecated.
 
 
@@ -1077,7 +1077,7 @@ implements 
 
 DISPLAY_KEYS_KEY
-static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String DISPLAY_KEYS_KEY
+static 

[34/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.html 
b/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.html
index 38d76b6..5478df1 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.html
@@ -90,115 +90,115 @@
 082import 
org.apache.hadoop.io.SequenceFile;
 083import org.apache.hadoop.io.Text;
 084import org.apache.hadoop.mapreduce.Job;
-085import 
org.apache.hadoop.mapreduce.OutputFormat;
-086import 
org.apache.hadoop.mapreduce.RecordWriter;
-087import 
org.apache.hadoop.mapreduce.TaskAttemptContext;
-088import 
org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
-089import 
org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
-090import 
org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner;
-091import 
org.apache.yetus.audience.InterfaceAudience;
-092
-093import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-094
-095/**
-096 * Writes HFiles. Passed Cells must 
arrive in order.
-097 * Writes current time as the sequence id 
for the file. Sets the major compacted
-098 * attribute on created @{link {@link 
HFile}s. Calling write(null,null) will forcibly roll
-099 * all HFiles being written.
-100 * p
-101 * Using this class as part of a 
MapReduce job is best done
-102 * using {@link 
#configureIncrementalLoad(Job, TableDescriptor, RegionLocator)}.
-103 */
-104@InterfaceAudience.Public
-105public class HFileOutputFormat2
-106extends 
FileOutputFormatImmutableBytesWritable, Cell {
-107  private static final Log LOG = 
LogFactory.getLog(HFileOutputFormat2.class);
-108  static class TableInfo {
-109private TableDescriptor 
tableDesctiptor;
-110private RegionLocator 
regionLocator;
-111
-112public TableInfo(TableDescriptor 
tableDesctiptor, RegionLocator regionLocator) {
-113  this.tableDesctiptor = 
tableDesctiptor;
-114  this.regionLocator = 
regionLocator;
-115}
-116
-117/**
-118 * The modification for the returned 
HTD doesn't affect the inner TD.
-119 * @return A clone of inner table 
descriptor
-120 * @deprecated use {@link 
#getTableDescriptor}
-121 */
-122@Deprecated
-123public HTableDescriptor 
getHTableDescriptor() {
-124  return new 
HTableDescriptor(tableDesctiptor);
-125}
-126
-127public TableDescriptor 
getTableDescriptor() {
-128  return tableDesctiptor;
-129}
-130
-131public RegionLocator 
getRegionLocator() {
-132  return regionLocator;
-133}
-134  }
-135
-136  protected static final byte[] 
tableSeparator = ";".getBytes(StandardCharsets.UTF_8);
-137
-138  protected static byte[] 
combineTableNameSuffix(byte[] tableName,
-139   
byte[] suffix ) {
-140return Bytes.add(tableName, 
tableSeparator, suffix);
-141  }
-142
-143  // The following constants are private 
since these are used by
-144  // HFileOutputFormat2 to internally 
transfer data between job setup and
-145  // reducer run using conf.
-146  // These should not be changed by the 
client.
-147  static final String 
COMPRESSION_FAMILIES_CONF_KEY =
-148  
"hbase.hfileoutputformat.families.compression";
-149  static final String 
BLOOM_TYPE_FAMILIES_CONF_KEY =
-150  
"hbase.hfileoutputformat.families.bloomtype";
-151  static final String 
BLOCK_SIZE_FAMILIES_CONF_KEY =
-152  
"hbase.mapreduce.hfileoutputformat.blocksize";
-153  static final String 
DATABLOCK_ENCODING_FAMILIES_CONF_KEY =
-154  
"hbase.mapreduce.hfileoutputformat.families.datablock.encoding";
-155
-156  // This constant is public since the 
client can modify this when setting
-157  // up their conf object and thus refer 
to this symbol.
-158  // It is present for backwards 
compatibility reasons. Use it only to
-159  // override the auto-detection of 
datablock encoding.
-160  public static final String 
DATABLOCK_ENCODING_OVERRIDE_CONF_KEY =
-161  
"hbase.mapreduce.hfileoutputformat.datablock.encoding";
-162
-163  /**
-164   * Keep locality while generating 
HFiles for bulkload. See HBASE-12596
-165   */
-166  public static final String 
LOCALITY_SENSITIVE_CONF_KEY =
-167  
"hbase.bulkload.locality.sensitive.enabled";
-168  private static final boolean 
DEFAULT_LOCALITY_SENSITIVE = true;
-169  static final String 
OUTPUT_TABLE_NAME_CONF_KEY =
-170  
"hbase.mapreduce.hfileoutputformat.table.name";
-171  static final String 
MULTI_TABLE_HFILEOUTPUTFORMAT_CONF_KEY =
-172  
"hbase.mapreduce.use.multi.table.hfileoutputformat";
-173
-174  public static final String 
STORAGE_POLICY_PROPERTY = "hbase.hstore.storagepolicy";
-175  public static final String 
STORAGE_POLICY_PROPERTY_CF_PREFIX 

[36/51] [partial] hbase-site git commit: Published site at .

2017-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67deb422/apidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTable.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTable.html 
b/apidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTable.html
index 523d98a..22eba3b 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTable.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncTable.html
@@ -25,236 +25,235 @@
 017 */
 018package org.apache.hadoop.hbase.client;
 019
-020import com.google.protobuf.RpcCallback;
-021import com.google.protobuf.RpcChannel;
-022import 
com.google.protobuf.RpcController;
-023
-024import 
java.util.concurrent.CompletableFuture;
-025import java.util.function.Function;
-026
-027import 
org.apache.hadoop.hbase.HRegionInfo;
-028import 
org.apache.yetus.audience.InterfaceAudience;
-029
-030/**
-031 * A low level asynchronous table.
-032 * p
-033 * The implementation is required to be 
thread safe.
-034 * p
-035 * The returned {@code CompletableFuture} 
will be finished directly in the rpc framework's callback
-036 * thread, so typically you should not do 
any time consuming work inside these methods, otherwise
-037 * you will be likely to block at least 
one connection to RS(even more if the rpc framework uses
-038 * NIO).
-039 * p
-040 * So, only experts that want to build 
high performance service should use this interface directly,
-041 * especially for the {@link #scan(Scan, 
RawScanResultConsumer)} below.
-042 * p
-043 * TODO: For now the only difference 
between this interface and {@link AsyncTable} is the scan
-044 * method. The {@link 
RawScanResultConsumer} exposes the implementation details of a 
scan(heartbeat)
-045 * so it is not suitable for a normal 
user. If it is still the only difference after we implement
-046 * most features of AsyncTable, we can 
think about merge these two interfaces.
-047 * @since 2.0.0
-048 */
-049@InterfaceAudience.Public
-050public interface RawAsyncTable extends 
AsyncTableBase {
-051
-052  /**
-053   * The basic scan API uses the observer 
pattern. All results that match the given scan object will
-054   * be passed to the given {@code 
consumer} by calling {@code RawScanResultConsumer.onNext}.
-055   * {@code 
RawScanResultConsumer.onComplete} means the scan is finished, and
-056   * {@code 
RawScanResultConsumer.onError} means we hit an unrecoverable error and the scan 
is
-057   * terminated. {@code 
RawScanResultConsumer.onHeartbeat} means the RS is still working but we can
-058   * not get a valid result to call 
{@code RawScanResultConsumer.onNext}. This is usually because
-059   * the matched results are too sparse, 
for example, a filter which almost filters out everything
-060   * is specified.
-061   * p
-062   * Notice that, the methods of the 
given {@code consumer} will be called directly in the rpc
-063   * framework's callback thread, so 
typically you should not do any time consuming work inside
-064   * these methods, otherwise you will be 
likely to block at least one connection to RS(even more if
-065   * the rpc framework uses NIO).
-066   * @param scan A configured {@link 
Scan} object.
-067   * @param consumer the consumer used to 
receive results.
-068   */
-069  void scan(Scan scan, 
RawScanResultConsumer consumer);
-070
-071  /**
-072   * Delegate to a protobuf rpc call.
-073   * p
-074   * Usually, it is just a simple lambda 
expression, like:
-075   *
-076   * pre
-077   * code
-078   * (stub, controller, rpcCallback) 
- {
-079   *   XXXRequest request = ...; // 
prepare the request
-080   *   stub.xxx(controller, request, 
rpcCallback);
-081   * }
-082   * /code
-083   * /pre
-084   *
-085   * And if you can prepare the {@code 
request} before calling the coprocessorService method, the
-086   * lambda expression will be:
-087   *
-088   * pre
-089   * code
-090   * (stub, controller, rpcCallback) 
- stub.xxx(controller, request, rpcCallback)
-091   * /code
-092   * /pre
-093   */
-094  @InterfaceAudience.Public
-095  @FunctionalInterface
-096  interface CoprocessorCallableS, 
R {
-097
-098/**
-099 * Represent the actual protobuf rpc 
call.
-100 * @param stub the asynchronous 
stub
-101 * @param controller the rpc 
controller, has already been prepared for you
-102 * @param rpcCallback the rpc 
callback, has already been prepared for you
-103 */
-104void call(S stub, RpcController 
controller, RpcCallbackR rpcCallback);
-105  }
-106
-107  /**
-108   * Execute the given coprocessor call 
on the region which contains the given {@code row}.
-109   * p
-110   * The {@code stubMaker} is just a 
delegation to the {@code newStub} call. Usually it is only a
-111   * one line lambda expression, like:
-112   *
-113   * pre
-114   * code
-115   * channel - 
xxxService.newStub(channel)
-116   * /code
-117   * /pre
-118   *
-119   * @param stubMaker a delegation to 

[09/19] hbase git commit: HBASE-18839 Apply RegionInfo to code base

2017-09-28 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/6693f45f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
index 1d31d5a..b21d55a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
@@ -23,21 +23,21 @@ import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ConcurrentMap;
 
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hbase.Abortable;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.locking.EntityLock;
 import org.apache.hadoop.hbase.executor.ExecutorService;
 import org.apache.hadoop.hbase.ipc.RpcServerInterface;
 import org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager;
 import org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager;
 import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
 import org.apache.hadoop.hbase.wal.WAL;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.zookeeper.KeeperException;
 
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
+
 import com.google.protobuf.Service;
 
 /**
@@ -49,7 +49,7 @@ public interface RegionServerServices
 
   /** @return the WAL for a particular region. Pass null for getting the
* default (common) WAL */
-  WAL getWAL(HRegionInfo regionInfo) throws IOException;
+  WAL getWAL(RegionInfo regionInfo) throws IOException;
 
   /** @return the List of WALs that are used by this server
*  Doesn't include the meta WAL
@@ -127,11 +127,11 @@ public interface RegionServerServices
 private final TransitionCode code;
 private final long openSeqNum;
 private final long masterSystemTime;
-private final HRegionInfo[] hris;
+private final RegionInfo[] hris;
 
 @InterfaceAudience.Private
 public RegionStateTransitionContext(TransitionCode code, long openSeqNum, 
long masterSystemTime,
-HRegionInfo... hris) {
+RegionInfo... hris) {
   this.code = code;
   this.openSeqNum = openSeqNum;
   this.masterSystemTime = masterSystemTime;
@@ -146,7 +146,7 @@ public interface RegionServerServices
 public long getMasterSystemTime() {
   return masterSystemTime;
 }
-public HRegionInfo[] getHris() {
+public RegionInfo[] getHris() {
   return hris;
 }
   }
@@ -161,14 +161,14 @@ public interface RegionServerServices
* @deprecated use {@link 
#reportRegionStateTransition(RegionStateTransitionContext)}
*/
   @Deprecated
-  boolean reportRegionStateTransition(TransitionCode code, long openSeqNum, 
HRegionInfo... hris);
+  boolean reportRegionStateTransition(TransitionCode code, long openSeqNum, 
RegionInfo... hris);
 
   /**
* Notify master that a handler requests to change a region state
* @deprecated use {@link 
#reportRegionStateTransition(RegionStateTransitionContext)}
*/
   @Deprecated
-  boolean reportRegionStateTransition(TransitionCode code, HRegionInfo... 
hris);
+  boolean reportRegionStateTransition(TransitionCode code, RegionInfo... hris);
 
   /**
* Returns a reference to the region server's RPC server
@@ -244,7 +244,7 @@ public interface RegionServerServices
   /**
* Master based locks on namespaces/tables/regions.
*/
-  EntityLock regionLock(List regionInfos, String description,
+  EntityLock regionLock(List regionInfos, String description,
   Abortable abort) throws IOException;
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/6693f45f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java
index 9b4a32a..aea92f8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java
@@ -23,9 +23,9 @@ import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.yetus.audience.InterfaceAudience;
+import 

[17/19] hbase git commit: HBASE-18839 Apply RegionInfo to code base

2017-09-28 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/6693f45f/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
index e5f1848..2fbbc3f 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
@@ -30,11 +30,9 @@ import org.apache.hadoop.hbase.CellScannable;
 import org.apache.hadoop.hbase.ClusterStatus.Option;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Action;
 import org.apache.hadoop.hbase.client.Append;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
@@ -46,6 +44,7 @@ import org.apache.hadoop.hbase.client.MasterSwitchType;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.RegionCoprocessorServiceExec;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.Row;
 import org.apache.hadoop.hbase.client.RowMutations;
 import org.apache.hadoop.hbase.client.Scan;
@@ -54,6 +53,12 @@ import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.filter.ByteArrayComparable;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.security.token.Token;
+import org.apache.yetus.audience.InterfaceAudience;
+
 import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
@@ -70,7 +75,6 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavor
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Condition;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.GetRequest;
@@ -81,6 +85,7 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationPr
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionAction;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.CompareType;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
@@ -97,12 +102,9 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTabl
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetCleanerChoreRunningRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
+import 

[15/19] hbase git commit: HBASE-18839 Apply RegionInfo to code base

2017-09-28 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/6693f45f/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java
index 9c5c180..68e5e89 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java
@@ -29,23 +29,26 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.favored.FavoredNodesPlan.Position;
 import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerLoad;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.master.*;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.favored.FavoredNodesPlan.Position;
+import org.apache.hadoop.hbase.master.RackManager;
+import org.apache.hadoop.hbase.master.RegionPlan;
+import org.apache.hadoop.hbase.master.ServerManager;
+import org.apache.hadoop.hbase.master.SnapshotOfRegionAssignmentFromMeta;
 import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
 import org.apache.hadoop.hbase.util.Pair;
+import org.apache.yetus.audience.InterfaceAudience;
 
 import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
 import org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
 
 /**
@@ -85,7 +88,7 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer 
implements Favored
   }
 
   @Override
-  public List balanceCluster(Map 
clusterState)  {
+  public List balanceCluster(Map 
clusterState)  {
 //TODO. Look at is whether Stochastic loadbalancer can be integrated with 
this
 List plans = new ArrayList<>();
 //perform a scan of the meta to get the latest updates (if any)
@@ -105,13 +108,13 @@ public class FavoredNodeLoadBalancer extends 
BaseLoadBalancer implements Favored
   // FindBugs complains about useless store! 
serverNameToServerNameWithoutCode.put(sn, s);
   serverNameWithoutCodeToServerName.put(s, sn);
 }
-for (Map.Entry entry : 
clusterState.entrySet()) {
+for (Map.Entry entry : 
clusterState.entrySet()) {
   ServerName currentServer = entry.getKey();
   //get a server without the startcode for the currentServer
   ServerName currentServerWithoutStartCode = 
ServerName.valueOf(currentServer.getHostname(),
   currentServer.getPort(), ServerName.NON_STARTCODE);
-  List list = entry.getValue();
-  for (HRegionInfo region : list) {
+  List list = entry.getValue();
+  for (RegionInfo region : list) {
 if(!FavoredNodesManager.isFavoredNodeApplicable(region)) {
   continue;
 }
@@ -157,9 +160,9 @@ public class FavoredNodeLoadBalancer extends 
BaseLoadBalancer implements Favored
   }
 
   @Override
-  public Map 
roundRobinAssignment(List regions,
+  public Map 
roundRobinAssignment(List regions,
   List servers) throws HBaseIOException {
-Map assignmentMap;
+Map assignmentMap;
 try {
   FavoredNodeAssignmentHelper assignmentHelper =
   new FavoredNodeAssignmentHelper(servers, rackManager);
@@ -183,10 +186,10 @@ public class FavoredNodeLoadBalancer extends 
BaseLoadBalancer implements Favored
   //need to come up with favored nodes assignments for them. The 
corner case
   //in (1) above is that all the nodes are unavailable and in that 
case, we
   //will note that this region doesn't have favored nodes.
-  Pair, List> 
segregatedRegions =
+  Pair, List> 
segregatedRegions =
   segregateRegionsAndAssignRegionsWithFavoredNodes(regions, servers);
-  Map regionsWithFavoredNodesMap = 
segregatedRegions.getFirst();
-  List regionsWithNoFavoredNodes = 
segregatedRegions.getSecond();
+  Map regionsWithFavoredNodesMap = 
segregatedRegions.getFirst();
+  List regionsWithNoFavoredNodes = 
segregatedRegions.getSecond();
   assignmentMap = new HashMap<>();
   roundRobinAssignmentImpl(assignmentHelper, assignmentMap, 

[01/19] hbase git commit: HBASE-18839 Apply RegionInfo to code base

2017-09-28 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/branch-2 d26b8f8dd -> 6693f45fa


http://git-wip-us.apache.org/repos/asf/hbase/blob/6693f45f/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckTwoRS.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckTwoRS.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckTwoRS.java
index beef02b..6fa455a 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckTwoRS.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckTwoRS.java
@@ -34,8 +34,24 @@ import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.*;
-import org.apache.hadoop.hbase.client.*;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.Durability;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
 import org.apache.hadoop.hbase.io.HFileLink;
 import org.apache.hadoop.hbase.io.hfile.HFile;
@@ -43,7 +59,6 @@ import org.apache.hadoop.hbase.io.hfile.HFileContext;
 import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
@@ -57,6 +72,7 @@ import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 
 import org.apache.hadoop.hbase.shaded.com.google.common.collect.Multimap;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 
 @Ignore // Until after HBASE-14614 goes in.
 @Category({MiscTests.class, LargeTests.class})
@@ -111,10 +127,10 @@ public class TestHBaseFsckTwoRS extends BaseTestHBaseFsck 
{
   @Test(timeout=18)
   public void testFixAssignmentsWhenMETAinTransition() throws Exception {
 MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
-admin.unassign(HRegionInfo.FIRST_META_REGIONINFO.getRegionName(), true);
-assignmentManager.offlineRegion(HRegionInfo.FIRST_META_REGIONINFO);
+admin.unassign(RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName(), 
true);
+assignmentManager.offlineRegion(RegionInfoBuilder.FIRST_META_REGIONINFO);
 new 
MetaTableLocator().deleteMetaLocation(cluster.getMaster().getZooKeeper());
-
assertFalse(regionStates.isRegionOnline(HRegionInfo.FIRST_META_REGIONINFO));
+
assertFalse(regionStates.isRegionOnline(RegionInfoBuilder.FIRST_META_REGIONINFO));
 HBaseFsck hbck = doFsck(conf, true);
 assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { 
HBaseFsck.ErrorReporter.ERROR_CODE.UNKNOWN, 
HBaseFsck.ErrorReporter.ERROR_CODE.NO_META_REGION,
 HBaseFsck.ErrorReporter.ERROR_CODE.NULL_META_REGION });
@@ -134,7 +150,7 @@ public class TestHBaseFsckTwoRS extends BaseTestHBaseFsck {
   assertEquals(ROWKEYS.length, countRows());
 
   // Now let's mess it up, by adding a region with a duplicate startkey
-  HRegionInfo hriDupe =
+  RegionInfo hriDupe =
   createRegion(tbl.getTableDescriptor(), Bytes.toBytes("A"), 
Bytes.toBytes("A2"));
   TEST_UTIL.assignRegion(hriDupe);
 
@@ -172,7 +188,7 @@ public class TestHBaseFsckTwoRS extends BaseTestHBaseFsck {
   assertEquals(ROWKEYS.length, countRows());
 
   // Now let's mess it up, by adding a region with a duplicate startkey
-  HRegionInfo hriDupe =
+  RegionInfo hriDupe =
   createRegion(tbl.getTableDescriptor(), Bytes.toBytes("A"), 
Bytes.toBytes("B"));
   TEST_UTIL.assignRegion(hriDupe);
 
@@ -221,7 +237,7 @@ public class TestHBaseFsckTwoRS extends BaseTestHBaseFsck {
   assertEquals(ROWKEYS.length, countRows());
 
   // Mess it up by creating an overlap in the metadata
-  HRegionInfo hriOverlap =
+  RegionInfo hriOverlap =
   createRegion(tbl.getTableDescriptor(), Bytes.toBytes("A2"), 
Bytes.toBytes("B"));

[19/19] hbase git commit: HBASE-18839 Apply RegionInfo to code base

2017-09-28 Thread chia7712
HBASE-18839 Apply RegionInfo to code base


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6693f45f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6693f45f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6693f45f

Branch: refs/heads/branch-2
Commit: 6693f45fafb86017df0ec9398dbb6ef3ceac6ef4
Parents: d26b8f8
Author: Chia-Ping Tsai 
Authored: Thu Sep 28 16:16:21 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Thu Sep 28 20:19:41 2017 +0800

--
 .../hadoop/hbase/backup/util/BackupUtils.java   |  14 +-
 .../hadoop/hbase/AsyncMetaTableAccessor.java|  47 +--
 .../org/apache/hadoop/hbase/HRegionInfo.java|  19 +-
 .../apache/hadoop/hbase/HRegionLocation.java|  26 +-
 .../apache/hadoop/hbase/MetaTableAccessor.java  | 245 ++---
 .../apache/hadoop/hbase/RegionLocations.java|   5 +-
 .../apache/hadoop/hbase/client/AsyncAdmin.java  |  15 +-
 .../hadoop/hbase/client/AsyncHBaseAdmin.java|   9 +-
 .../hbase/client/ConnectionImplementation.java  |  38 +-
 .../hbase/client/FlushRegionCallable.java   |  10 +-
 .../apache/hadoop/hbase/client/HBaseAdmin.java  | 134 ---
 .../hadoop/hbase/client/HRegionLocator.java |   9 +-
 .../hadoop/hbase/client/HTableMultiplexer.java  |  15 +-
 .../hbase/client/ImmutableHRegionInfo.java  |   2 +-
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java |  64 ++--
 .../hadoop/hbase/client/RawAsyncTable.java  |  25 +-
 .../hadoop/hbase/client/RawAsyncTableImpl.java  |  18 +-
 .../client/RegionCoprocessorRpcChannelImpl.java |  22 +-
 .../apache/hadoop/hbase/client/RegionInfo.java  |  27 +-
 .../hadoop/hbase/client/RegionInfoBuilder.java  | 360 ++-
 .../hadoop/hbase/client/RegionReplicaUtil.java  |  65 ++--
 .../hadoop/hbase/client/ZooKeeperRegistry.java  |   7 +-
 .../apache/hadoop/hbase/master/RegionState.java |  23 +-
 .../hbase/shaded/protobuf/ProtobufUtil.java |  38 +-
 .../hbase/shaded/protobuf/RequestConverter.java |  48 +--
 .../shaded/protobuf/ResponseConverter.java  |  20 +-
 .../hbase/zookeeper/MetaTableLocator.java   |  71 ++--
 .../hadoop/hbase/client/TestAsyncProcess.java   |  28 +-
 .../coprocessor/AsyncAggregationClient.java |  28 +-
 .../apache/hadoop/hbase/coprocessor/Export.java |  22 +-
 .../example/TestRefreshHFilesEndpoint.java  |  20 +-
 .../mapreduce/TableSnapshotInputFormat.java |  26 +-
 .../hbase/regionserver/CompactionTool.java  |  10 +-
 .../hadoop/hbase/snapshot/ExportSnapshot.java   |  23 +-
 .../hbase/mapreduce/TestImportExport.java   |  18 +-
 .../replication/TestReplicationSmallTests.java  |  25 +-
 .../hbase/snapshot/TestExportSnapshot.java  |  13 +-
 .../hbase/snapshot/TestMobExportSnapshot.java   |   4 +-
 .../hadoop/hbase/rest/RegionsResource.java  |  20 +-
 .../hbase/rsgroup/RSGroupAdminServer.java   |  43 +--
 .../hbase/rsgroup/RSGroupBasedLoadBalancer.java | 104 +++---
 .../hbase/rsgroup/RSGroupInfoManagerImpl.java   |  15 +-
 .../balancer/TestRSGroupBasedLoadBalancer.java  | 133 +++
 .../hadoop/hbase/rsgroup/TestRSGroupsBase.java  |  22 +-
 .../hbase/tmpl/master/MasterStatusTmpl.jamon|   6 +-
 .../hbase/tmpl/regionserver/RSStatusTmpl.jamon  |   4 +-
 .../tmpl/regionserver/RegionListTmpl.jamon  |  41 +--
 .../hadoop/hbase/RegionStateListener.java   |  11 +-
 .../hadoop/hbase/backup/HFileArchiver.java  |  18 +-
 .../hbase/client/ClientSideRegionScanner.java   |   5 +-
 .../hbase/client/TableSnapshotScanner.java  |  13 +-
 .../hbase/client/locking/LockServiceClient.java |  18 +-
 .../SplitLogManagerCoordination.java|   8 +-
 .../ZKSplitLogManagerCoordination.java  |  16 +-
 .../coprocessor/MultiRowMutationEndpoint.java   |  16 +-
 .../favored/FavoredNodeAssignmentHelper.java| 104 +++---
 .../hbase/favored/FavoredNodeLoadBalancer.java  |  85 ++---
 .../hbase/favored/FavoredNodesManager.java  |  44 +--
 .../hadoop/hbase/favored/FavoredNodesPlan.java  |  10 +-
 .../hbase/favored/FavoredNodesPromoter.java |   8 +-
 .../org/apache/hadoop/hbase/io/HFileLink.java   |  19 +-
 .../hadoop/hbase/master/AssignmentListener.java |   9 +-
 .../master/AssignmentVerificationReport.java|  38 +-
 .../hadoop/hbase/master/CatalogJanitor.java |  56 ++-
 .../org/apache/hadoop/hbase/master/HMaster.java |  58 +--
 .../hadoop/hbase/master/LoadBalancer.java   |  32 +-
 .../hadoop/hbase/master/MasterFileSystem.java   |  13 +-
 .../hbase/master/MasterMetaBootstrap.java   |  15 +-
 .../hadoop/hbase/master/MasterRpcServices.java  |  54 +--
 .../hadoop/hbase/master/MasterServices.java |  11 +-
 .../hadoop/hbase/master/MasterWalManager.java   |   6 +-
 .../hbase/master/RegionPlacementMaintainer.java |  67 ++--
 .../apache/hadoop/hbase/master/RegionPlan.java  |  10 +-
 .../hadoop/hbase/master/ServerManager.java  |  32 

[13/19] hbase git commit: HBASE-18839 Apply RegionInfo to code base

2017-09-28 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/6693f45f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
index 9e37292..da6afc9 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
@@ -30,15 +30,15 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.MetaMutationAnnotation;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.UnknownRegionException;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.MasterSwitchType;
 import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.exceptions.MergeRegionException;
@@ -59,13 +59,16 @@ import 
org.apache.hadoop.hbase.quotas.QuotaExceededException;
 import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.HStoreFile;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.yetus.audience.InterfaceAudience;
+
 import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsState;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.FSUtils;
 
 /**
  * The procedure to Merge a region in a table.
@@ -79,8 +82,8 @@ public class MergeTableRegionsProcedure
   private Boolean traceEnabled;
   private volatile boolean lock = false;
   private ServerName regionLocation;
-  private HRegionInfo[] regionsToMerge;
-  private HRegionInfo mergedRegion;
+  private RegionInfo[] regionsToMerge;
+  private RegionInfo mergedRegion;
   private boolean forcible;
 
   public MergeTableRegionsProcedure() {
@@ -88,18 +91,18 @@ public class MergeTableRegionsProcedure
   }
 
   public MergeTableRegionsProcedure(final MasterProcedureEnv env,
-  final HRegionInfo regionToMergeA, final HRegionInfo regionToMergeB) 
throws IOException {
+  final RegionInfo regionToMergeA, final RegionInfo regionToMergeB) throws 
IOException {
 this(env, regionToMergeA, regionToMergeB, false);
   }
 
   public MergeTableRegionsProcedure(final MasterProcedureEnv env,
-  final HRegionInfo regionToMergeA, final HRegionInfo regionToMergeB,
+  final RegionInfo regionToMergeA, final RegionInfo regionToMergeB,
   final boolean forcible) throws MergeRegionException {
-this(env, new HRegionInfo[] {regionToMergeA, regionToMergeB}, forcible);
+this(env, new RegionInfo[] {regionToMergeA, regionToMergeB}, forcible);
   }
 
   public MergeTableRegionsProcedure(final MasterProcedureEnv env,
-  final HRegionInfo[] regionsToMerge, final boolean forcible)
+  final RegionInfo[] regionsToMerge, final boolean forcible)
   throws MergeRegionException {
 super(env);
 
@@ -117,7 +120,7 @@ public class MergeTableRegionsProcedure
 this.forcible = forcible;
   }
 
-  private static void checkRegionsToMerge(final HRegionInfo[] regionsToMerge,
+  private static void checkRegionsToMerge(final RegionInfo[] regionsToMerge,
   final boolean forcible) throws MergeRegionException {
 // For now, we only merge 2 regions.
 // It could be extended to more than 2 regions in the future.
@@ -129,19 +132,19 @@ public class MergeTableRegionsProcedure
 checkRegionsToMerge(regionsToMerge[0], regionsToMerge[1], forcible);
   }
 
-  private static void checkRegionsToMerge(final HRegionInfo regionToMergeA,
-  final HRegionInfo regionToMergeB, final boolean forcible) throws 
MergeRegionException {
+  private static void 

[11/19] hbase git commit: HBASE-18839 Apply RegionInfo to code base

2017-09-28 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/6693f45f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
index 9f2baf4..6155f16 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
@@ -30,11 +30,10 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.TableExistsException;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.errorhandling.ForeignException;
@@ -46,11 +45,6 @@ import 
org.apache.hadoop.hbase.master.procedure.CreateTableProcedure.CreateHdfsR
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
 import org.apache.hadoop.hbase.monitoring.TaskMonitor;
 import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.CloneSnapshotState;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
 import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
 import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
@@ -59,8 +53,14 @@ import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
 import org.apache.hadoop.hbase.util.FSTableDescriptors;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.Pair;
+import org.apache.yetus.audience.InterfaceAudience;
 
 import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.CloneSnapshotState;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
 
 @InterfaceAudience.Private
 public class CloneSnapshotProcedure
@@ -70,7 +70,7 @@ public class CloneSnapshotProcedure
   private TableDescriptor tableDescriptor;
   private SnapshotDescription snapshot;
   private boolean restoreAcl;
-  private List newRegions = null;
+  private List newRegions = null;
   private Map > parentsToChildrenPairMap = new 
HashMap<>();
 
   // Monitor
@@ -253,8 +253,8 @@ public class CloneSnapshotProcedure
 .setSnapshot(this.snapshot)
 .setTableSchema(ProtobufUtil.toTableSchema(tableDescriptor));
 if (newRegions != null) {
-  for (HRegionInfo hri: newRegions) {
-cloneSnapshotMsg.addRegionInfo(HRegionInfo.convert(hri));
+  for (RegionInfo hri: newRegions) {
+cloneSnapshotMsg.addRegionInfo(ProtobufUtil.toRegionInfo(hri));
   }
 }
 if (!parentsToChildrenPairMap.isEmpty()) {
@@ -289,7 +289,7 @@ public class CloneSnapshotProcedure
 } else {
   newRegions = new ArrayList<>(cloneSnapshotMsg.getRegionInfoCount());
   for (HBaseProtos.RegionInfo hri: cloneSnapshotMsg.getRegionInfoList()) {
-newRegions.add(HRegionInfo.convert(hri));
+newRegions.add(ProtobufUtil.toRegionInfo(hri));
   }
 }
 if (cloneSnapshotMsg.getParentToChildRegionsPairListCount() > 0) {
@@ -357,8 +357,8 @@ public class CloneSnapshotProcedure
   throws IOException, InterruptedException {
 final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
 if (cpHost != null) {
-  final HRegionInfo[] regions = (newRegions == null) ? null :
-newRegions.toArray(new HRegionInfo[newRegions.size()]);
+  final RegionInfo[] regions = (newRegions == null) ? null :
+newRegions.toArray(new RegionInfo[newRegions.size()]);
   cpHost.postCompletedCreateTableAction(tableDescriptor, regions, 
getUser());
 }
   }
@@ -368,16 +368,16 @@ public class CloneSnapshotProcedure
* @param env MasterProcedureEnv
* @throws 

[07/19] hbase git commit: HBASE-18839 Apply RegionInfo to code base

2017-09-28 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/6693f45f/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
--
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp 
b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
index 6c401a9..c7a1aff 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
@@ -17,54 +17,57 @@
  * limitations under the License.
  */
 --%>
-<%@page 
import="org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType"%>
+<%@page import="java.net.URLEncoder"%>
 <%@ page contentType="text/html;charset=UTF-8"
   import="static org.apache.commons.lang3.StringEscapeUtils.escapeXml"
-  import="org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString"
-  import="java.net.URLEncoder"
   import="java.util.ArrayList"
-  import="java.util.TreeMap"
-  import="java.util.List"
-  import="java.util.LinkedHashMap"
-  import="java.util.Map"
-  import="java.util.Set"
   import="java.util.Collection"
   import="java.util.Collections"
   import="java.util.Comparator"
+  import="java.util.LinkedHashMap"
+  import="java.util.List"
+  import="java.util.Map"
+  import="java.util.TreeMap"
   import="org.apache.commons.lang3.StringEscapeUtils"
   import="org.apache.hadoop.conf.Configuration"
-  import="org.apache.hadoop.util.StringUtils"
-  import="org.apache.hadoop.hbase.HRegionInfo"
+  import="org.apache.hadoop.hbase.HBaseConfiguration"
+  import="org.apache.hadoop.hbase.HColumnDescriptor"
+  import="org.apache.hadoop.hbase.HConstants"
   import="org.apache.hadoop.hbase.HRegionLocation"
-  import="org.apache.hadoop.hbase.ServerName"
-  import="org.apache.hadoop.hbase.ServerLoad"
   import="org.apache.hadoop.hbase.RegionLoad"
-  import="org.apache.hadoop.hbase.HConstants"
+  import="org.apache.hadoop.hbase.ServerLoad"
+  import="org.apache.hadoop.hbase.ServerName"
+  import="org.apache.hadoop.hbase.TableName"
+  import="org.apache.hadoop.hbase.TableNotFoundException"
+  import="org.apache.hadoop.hbase.client.Admin"
+  import="org.apache.hadoop.hbase.client.CompactionState"
+  import="org.apache.hadoop.hbase.client.RegionInfo"
+  import="org.apache.hadoop.hbase.client.RegionInfoBuilder"
+  import="org.apache.hadoop.hbase.client.RegionLocator"
+  import="org.apache.hadoop.hbase.client.RegionReplicaUtil"
+  import="org.apache.hadoop.hbase.client.Table"
   import="org.apache.hadoop.hbase.master.HMaster"
-  import="org.apache.hadoop.hbase.zookeeper.MetaTableLocator"
   import="org.apache.hadoop.hbase.quotas.QuotaTableUtil"
   import="org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot"
   import="org.apache.hadoop.hbase.util.Bytes"
   import="org.apache.hadoop.hbase.util.FSUtils"
-  
import="org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos"
-  import="org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos"
-  import="org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas"
-  
import="org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota"
-  import="org.apache.hadoop.hbase.TableName"
-  import="org.apache.hadoop.hbase.HColumnDescriptor"
-  import="org.apache.hadoop.hbase.HBaseConfiguration"
-  import="org.apache.hadoop.hbase.TableNotFoundException"%>
-<%@ page import="org.apache.hadoop.hbase.client.*" %>
+  import="org.apache.hadoop.hbase.zookeeper.MetaTableLocator"
+  import="org.apache.hadoop.util.StringUtils"
+  import="org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString"%>
+<%@ page 
import="org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos" 
%>
+<%@ page 
import="org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos" %>
+<%@ page 
import="org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas" %>
+<%@ page 
import="org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota"
 %>
 <%!
   /**
-   * @return An empty region load stamped with the passed in hri
+   * @return An empty region load stamped with the passed in 
regionInfo
* region name.
*/
-  private RegionLoad getEmptyRegionLoad(final HRegionInfo hri) {
+  private RegionLoad getEmptyRegionLoad(final RegionInfo regionInfo) {
 return new RegionLoad(ClusterStatusProtos.RegionLoad.newBuilder().
   setRegionSpecifier(HBaseProtos.RegionSpecifier.newBuilder().
   setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME).
-  setValue(ByteString.copyFrom(hri.getRegionName())).build()).build());
+  
setValue(ByteString.copyFrom(regionInfo.getRegionName())).build()).build());
   }
 %>
 <%
@@ -205,10 +208,10 @@ if ( fqtn != null ) {
 %> Split request accepted. <%
 } else if (action.equals("compact")) {
   if (key != null && key.length() > 0) {
-List regions = 
admin.getTableRegions(TableName.valueOf(fqtn));
+List regions = 

[10/19] hbase git commit: HBASE-18839 Apply RegionInfo to code base

2017-09-28 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/6693f45f/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java
index 03141a3..66f9240 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java
@@ -24,15 +24,15 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.ThreadPoolExecutor;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
-import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.errorhandling.ForeignException;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
@@ -43,12 +43,13 @@ import org.apache.hadoop.hbase.procedure.Procedure;
 import org.apache.hadoop.hbase.procedure.ProcedureCoordinator;
 import org.apache.hadoop.hbase.procedure.ProcedureCoordinatorRpcs;
 import org.apache.hadoop.hbase.procedure.ZKProcedureCoordinator;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
+import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.zookeeper.KeeperException;
 
 import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
 
 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
 public class MasterFlushTableProcedureManager extends MasterProcedureManager {
@@ -125,7 +126,7 @@ public class MasterFlushTableProcedureManager extends 
MasterProcedureManager {
 // It is possible that regions may move after we get the region server 
list.
 // Each region server will get its own online regions for the table.
 // We may still miss regions that need to be flushed.
-List> regionsAndLocations;
+List> regionsAndLocations;
 
 if (TableName.META_TABLE_NAME.equals(tableName)) {
   regionsAndLocations = new MetaTableLocator().getMetaRegionsAndLocations(
@@ -136,9 +137,9 @@ public class MasterFlushTableProcedureManager extends 
MasterProcedureManager {
 }
 
 Set regionServers = new HashSet<>(regionsAndLocations.size());
-for (Pair region : regionsAndLocations) {
+for (Pair region : regionsAndLocations) {
   if (region != null && region.getFirst() != null && region.getSecond() != 
null) {
-HRegionInfo hri = region.getFirst();
+RegionInfo hri = region.getFirst();
 if (hri.isOffline() && (hri.isSplit() || hri.isSplitParent())) 
continue;
 regionServers.add(region.getSecond().toString());
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/6693f45f/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java
index 89d7e14..a76e9c1 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java
@@ -26,16 +26,14 @@ import java.util.concurrent.TimeUnit;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ScheduledChore;
-import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.Store;
-import org.apache.hadoop.hbase.regionserver.StoreFile;

[06/19] hbase git commit: HBASE-18839 Apply RegionInfo to code base

2017-09-28 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/6693f45f/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi.java
index 1acfcde..6307210 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi.java
@@ -34,14 +34,13 @@ import java.util.Optional;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ClusterStatus;
+import org.apache.hadoop.hbase.ClusterStatus.Option;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.RegionLoad;
 import org.apache.hadoop.hbase.ServerLoad;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.ClusterStatus.Option;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
@@ -227,13 +226,13 @@ public class TestAsyncClusterAdminApi extends 
TestAsyncAdminBase {
 // Check if regions match with the regionLoad from the server
 Collection servers = admin.getRegionServers().get();
 for (ServerName serverName : servers) {
-  List regions = admin.getOnlineRegions(serverName).get();
+  List regions = admin.getOnlineRegions(serverName).get();
   checkRegionsAndRegionLoads(regions, 
admin.getRegionLoads(serverName).get());
 }
 
 // Check if regionLoad matches the table's regions and nothing is missed
 for (TableName table : tables) {
-  List tableRegions = admin.getTableRegions(table).get();
+  List tableRegions = admin.getTableRegions(table).get();
   List regionLoads = Lists.newArrayList();
   for (ServerName serverName : servers) {
 regionLoads.addAll(admin.getRegionLoads(serverName, 
Optional.of(table)).get());
@@ -268,7 +267,7 @@ public class TestAsyncClusterAdminApi extends 
TestAsyncAdminBase {
 }
   }
 
-  private void checkRegionsAndRegionLoads(Collection regions,
+  private void checkRegionsAndRegionLoads(Collection regions,
   Collection regionLoads) {
 
 assertEquals("No of regions and regionloads doesn't match", 
regions.size(), regionLoads.size());
@@ -277,7 +276,7 @@ public class TestAsyncClusterAdminApi extends 
TestAsyncAdminBase {
 for (RegionLoad regionLoad : regionLoads) {
   regionLoadMap.put(regionLoad.getName(), regionLoad);
 }
-for (HRegionInfo info : regions) {
+for (RegionInfo info : regions) {
   assertTrue("Region not in regionLoadMap region:" + 
info.getRegionNameAsString()
   + " regionMap: " + regionLoadMap, 
regionLoadMap.containsKey(info.getRegionName()));
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/6693f45f/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java
index 6292b10..9775b86 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java
@@ -33,7 +33,6 @@ import java.util.stream.Collectors;
 
 import org.apache.hadoop.hbase.AsyncMetaTableAccessor;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
@@ -45,7 +44,6 @@ import 
org.apache.hadoop.hbase.master.assignment.AssignmentManager;
 import org.apache.hadoop.hbase.master.assignment.RegionStates;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.Region;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -58,6 +56,8 @@ import org.junit.experimental.categories.Category;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+
 /**
  * Class to test asynchronous region admin operations.
  */
@@ -69,10 +69,10 @@ public class TestAsyncRegionAdminApi extends 
TestAsyncAdminBase {
   public void testCloseRegion() throws 

[14/19] hbase git commit: HBASE-18839 Apply RegionInfo to code base

2017-09-28 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/6693f45f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
index d23cf7d..afd402b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
@@ -40,31 +40,32 @@ import org.apache.commons.cli.ParseException;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hbase.ClusterStatus.Option;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.ClusterStatus.Option;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.favored.FavoredNodeAssignmentHelper;
 import org.apache.hadoop.hbase.favored.FavoredNodesPlan;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.MunkresAssignment;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse;
 
 /**
  * A tool that is used for manipulating and viewing favored nodes information
@@ -197,12 +198,12 @@ public class RegionPlacementMaintainer {
   Map> regionLocalityMap, FavoredNodesPlan plan,
   boolean munkresForSecondaryAndTertiary) throws IOException {
   // Get the all the regions for the current table
-  List regions =
+  List regions =
 assignmentSnapshot.getTableToRegionMap().get(tableName);
   int numRegions = regions.size();
 
   // Get the current assignment map
-  Map currentAssignmentMap =
+  Map currentAssignmentMap =
 assignmentSnapshot.getRegionToRegionServerMap();
 
   // Get the all the region servers
@@ -257,12 +258,12 @@ public class RegionPlacementMaintainer {
 // Compute the total rack locality for each region in each rack. The 
total
 // rack locality is the sum of the localities of a region on all 
servers in
 // a rack.
-Map> rackRegionLocality = new 
HashMap<>();
+Map> rackRegionLocality = new 
HashMap<>();
 for (int i = 0; i < numRegions; i++) {
-  HRegionInfo region = regions.get(i);
+  RegionInfo region = regions.get(i);
   for (int j = 0; j < regionSlots; j += slotsPerServer) {
 String rack = rackManager.getRack(servers.get(j / slotsPerServer));
-Map rackLocality = 
rackRegionLocality.get(rack);
+Map rackLocality = rackRegionLocality.get(rack);
 if (rackLocality == null) {
   rackLocality = new HashMap<>();
   rackRegionLocality.put(rack, rackLocality);
@@ -417,18 +418,18 @@ public class RegionPlacementMaintainer {
 LOG.info("Assignment plan for secondary and tertiary generated " +
 "using MunkresAssignment");
   } else {
-Map primaryRSMap = new HashMap<>();
+Map primaryRSMap = new HashMap<>();
 for (int i = 0; i < numRegions; i++) {
  

[16/19] hbase git commit: HBASE-18839 Apply RegionInfo to code base

2017-09-28 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/6693f45f/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
--
diff --git 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
index 169e42f..3f1373f 100644
--- 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
+++ 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
@@ -18,13 +18,6 @@
 
 package org.apache.hadoop.hbase.rsgroup;
 
-import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ArrayListMultimap;
-import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.LinkedListMultimap;
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.ListMultimap;
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
-
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
@@ -43,10 +36,9 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ClusterStatus;
 import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.constraint.ConstraintException;
 import org.apache.hadoop.hbase.master.LoadBalancer;
 import org.apache.hadoop.hbase.master.MasterServices;
@@ -54,6 +46,14 @@ import org.apache.hadoop.hbase.master.RegionPlan;
 import org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer;
 import org.apache.hadoop.hbase.net.Address;
 import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ArrayListMultimap;
+import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.LinkedListMultimap;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.ListMultimap;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
 
 /**
  * GroupBasedLoadBalancer, used when Region Server Grouping is configured 
(HBase-6721)
@@ -106,31 +106,31 @@ public class RSGroupBasedLoadBalancer implements 
RSGroupableBalancer {
   }
 
   @Override
-  public List balanceCluster(TableName tableName, Map
+  public List balanceCluster(TableName tableName, Map
   clusterState) throws HBaseIOException {
 return balanceCluster(clusterState);
   }
 
   @Override
-  public List balanceCluster(Map 
clusterState)
+  public List balanceCluster(Map 
clusterState)
   throws HBaseIOException {
 if (!isOnline()) {
   throw new ConstraintException(RSGroupInfoManager.RSGROUP_TABLE_NAME +
   " is not online, unable to perform balance");
 }
 
-Map correctedState = 
correctAssignments(clusterState);
+Map correctedState = 
correctAssignments(clusterState);
 List regionPlans = new ArrayList<>();
 
-List misplacedRegions = 
correctedState.get(LoadBalancer.BOGUS_SERVER_NAME);
-for (HRegionInfo regionInfo : misplacedRegions) {
+List misplacedRegions = 
correctedState.get(LoadBalancer.BOGUS_SERVER_NAME);
+for (RegionInfo regionInfo : misplacedRegions) {
   regionPlans.add(new RegionPlan(regionInfo, null, null));
 }
 try {
   List rsgi = rsGroupInfoManager.listRSGroups();
   for (RSGroupInfo info: rsgi) {
-Map groupClusterState = new HashMap<>();
-Map> groupClusterLoad = 
new HashMap<>();
+Map groupClusterState = new HashMap<>();
+Map> groupClusterLoad = 
new HashMap<>();
 for (Address sName : info.getServers()) {
   for(ServerName curr: clusterState.keySet()) {
 if(curr.getAddress().equals(sName)) {
@@ -154,15 +154,15 @@ public class RSGroupBasedLoadBalancer implements 
RSGroupableBalancer {
   }
 
   @Override
-  public Map roundRobinAssignment(
-  List regions, List servers) throws 
HBaseIOException {
-Map assignments = Maps.newHashMap();
-ListMultimap regionMap = ArrayListMultimap.create();
+  public Map roundRobinAssignment(
+ 

[12/19] hbase git commit: HBASE-18839 Apply RegionInfo to code base

2017-09-28 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/6693f45f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
index ca73ff7..a05ad67 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
@@ -43,10 +43,10 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerLoad;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.master.LoadBalancer;
 import org.apache.hadoop.hbase.master.MasterServices;
@@ -71,7 +71,7 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
   protected static final int MIN_SERVER_BALANCE = 2;
   private volatile boolean stopped = false;
 
-  private static final List EMPTY_REGION_LIST = new 
ArrayList<>(0);
+  private static final List EMPTY_REGION_LIST = new ArrayList<>(0);
 
   static final Predicate IDLE_SERVER_PREDICATOR
 = load -> load.getNumberOfRegions() == 0;
@@ -127,7 +127,7 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
 boolean multiServersPerHost = false; // whether or not any host has more 
than one server
 
 ArrayList tables;
-HRegionInfo[] regions;
+RegionInfo[] regions;
 Deque[] regionLoads;
 private RegionLocationFinder regionFinder;
 
@@ -160,7 +160,7 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
 Map hostsToIndex;
 Map racksToIndex;
 Map tablesToIndex;
-Map regionsToIndex;
+Map regionsToIndex;
 float[] localityPerServer;
 
 int numServers;
@@ -170,7 +170,7 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
 int numRegions;
 
 int numMovedRegions = 0; //num moved regions from the initial configuration
-Map clusterState;
+Map clusterState;
 
 protected final RackManager rackManager;
 // Maps region -> rackIndex -> locality of region on rack
@@ -179,7 +179,7 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
 private int[][] regionsToMostLocalEntities;
 
 protected Cluster(
-Map clusterState,
+Map clusterState,
 Map loads,
 RegionLocationFinder regionFinder,
 RackManager rackManager) {
@@ -188,8 +188,8 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
 
 @SuppressWarnings("unchecked")
 protected Cluster(
-Collection unassignedRegions,
-Map clusterState,
+Collection unassignedRegions,
+Map clusterState,
 Map loads,
 RegionLocationFinder regionFinder,
 RackManager rackManager) {
@@ -247,7 +247,7 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
   }
 
   // Count how many regions there are.
-  for (Entry entry : 
clusterState.entrySet()) {
+  for (Entry entry : 
clusterState.entrySet()) {
 numRegions += entry.getValue().size();
   }
   numRegions += unassignedRegions.size();
@@ -256,7 +256,7 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
   servers = new ServerName[numServers];
   serversPerHost = new int[numHosts][];
   serversPerRack = new int[numRacks][];
-  regions = new HRegionInfo[numRegions];
+  regions = new RegionInfo[numRegions];
   regionIndexToServerIndex = new int[numRegions];
   initialRegionIndexToServerIndex = new int[numRegions];
   regionIndexToTableIndex = new int[numRegions];
@@ -279,7 +279,7 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
 
   int tableIndex = 0, regionIndex = 0, regionPerServerIndex = 0;
 
-  for (Entry entry : 
clusterState.entrySet()) {
+  for (Entry entry : 
clusterState.entrySet()) {
 if (entry.getKey() == null) {
   LOG.warn("SERVERNAME IS NULL, skipping " + entry.getValue());
   continue;
@@ -314,7 +314,7 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
 racks[entry.getValue()] = entry.getKey();
   

[08/19] hbase git commit: HBASE-18839 Apply RegionInfo to code base

2017-09-28 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/6693f45f/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
index b4e5007..e942a02 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
@@ -22,8 +22,6 @@ package org.apache.hadoop.hbase.tool;
 import static 
org.apache.hadoop.hbase.HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT;
 import static org.apache.hadoop.hbase.HConstants.ZOOKEEPER_ZNODE_PARENT;
 
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-
 import java.io.Closeable;
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -62,7 +60,6 @@ import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MetaTableAccessor;
@@ -72,13 +69,13 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotEnabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
@@ -96,11 +93,14 @@ import org.apache.hadoop.hbase.zookeeper.ZKConfig;
 import org.apache.hadoop.util.GenericOptionsParser;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
+import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.ZooKeeper;
 import org.apache.zookeeper.client.ConnectStringParser;
 import org.apache.zookeeper.data.Stat;
 
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+
 /**
  * HBase Canary Tool, that that can be used to do
  * "canary monitoring" of a running HBase cluster.
@@ -210,34 +210,34 @@ public final class Canary implements Tool {
 private Map perTableReadLatency = new HashMap<>();
 private LongAdder writeLatency = new LongAdder();
 
-public void publishReadFailure(ServerName serverName, HRegionInfo region, 
Exception e) {
+public void publishReadFailure(ServerName serverName, RegionInfo region, 
Exception e) {
   incReadFailureCount();
   LOG.error(String.format("read from region %s on regionserver %s failed", 
region.getRegionNameAsString(), serverName), e);
 }
 
-public void publishReadFailure(ServerName serverName, HRegionInfo region, 
ColumnFamilyDescriptor column, Exception e) {
+public void publishReadFailure(ServerName serverName, RegionInfo region, 
ColumnFamilyDescriptor column, Exception e) {
   incReadFailureCount();
   LOG.error(String.format("read from region %s on regionserver %s column 
family %s failed",
 region.getRegionNameAsString(), serverName, column.getNameAsString()), 
e);
 }
 
-public void publishReadTiming(ServerName serverName, HRegionInfo region, 
ColumnFamilyDescriptor column, long msTime) {
+public void publishReadTiming(ServerName serverName, RegionInfo region, 
ColumnFamilyDescriptor column, long msTime) {
   LOG.info(String.format("read from region %s on regionserver %s column 
family %s in %dms",
 region.getRegionNameAsString(), serverName, column.getNameAsString(), 
msTime));
 }
 
-public void publishWriteFailure(ServerName serverName, HRegionInfo region, 
Exception e) {
+public void publishWriteFailure(ServerName serverName, RegionInfo region, 
Exception e) {
   incWriteFailureCount();
   LOG.error(String.format("write to region %s on regionserver %s failed", 
region.getRegionNameAsString(), serverName), e);
 }
 
-public void publishWriteFailure(ServerName serverName, HRegionInfo region, 
ColumnFamilyDescriptor column, Exception e) {
+public void publishWriteFailure(ServerName serverName, RegionInfo region, 
ColumnFamilyDescriptor column, Exception e) {
   incWriteFailureCount();
   LOG.error(String.format("write to region %s on regionserver %s column 
family %s failed",
 region.getRegionNameAsString(), 

[18/19] hbase git commit: HBASE-18839 Apply RegionInfo to code base

2017-09-28 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/6693f45f/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index fac3ef1..7b42d3d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -40,6 +40,7 @@ import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicReference;
 import java.util.regex.Pattern;
+import java.util.stream.Collectors;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -69,8 +70,6 @@ import org.apache.hadoop.hbase.TableNotDisabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.UnknownRegionException;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.yetus.audience.InterfaceStability;
 import org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
 import org.apache.hadoop.hbase.client.replication.TableCFs;
 import org.apache.hadoop.hbase.client.security.SecurityCapability;
@@ -86,6 +85,26 @@ import 
org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
+import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
+import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
+import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
+import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
+import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
+import org.apache.hadoop.hbase.util.Addressing;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.ForeignExceptionUtil;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
+import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.yetus.audience.InterfaceStability;
+import org.apache.zookeeper.KeeperException;
+
+import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
@@ -184,28 +203,10 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRe
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
-import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
-import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
-import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
-import org.apache.hadoop.hbase.util.Addressing;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.ForeignExceptionUtil;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.zookeeper.KeeperException;
 
-import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
 import com.google.protobuf.Descriptors;
 import com.google.protobuf.Message;
 import com.google.protobuf.RpcController;
-import java.util.stream.Collectors;
 
 /**
  * HBaseAdmin is no longer a client API. It is marked 
InterfaceAudience.Private indicating that
@@ -399,12 +400,26 @@ public class HBaseAdmin implements Admin {
 
   @Override
   public List getRegions(final ServerName sn) throws IOException {
-return getOnlineRegions(sn).stream().collect(Collectors.toList());
+AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
+  

[02/19] hbase git commit: HBASE-18839 Apply RegionInfo to code base

2017-09-28 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/6693f45f/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfoBuilder.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfoBuilder.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfoBuilder.java
index 497ecc4..5e553dc 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfoBuilder.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfoBuilder.java
@@ -86,7 +86,7 @@ public class TestRegionInfoBuilder {
 RegionInfo ri = RegionInfoBuilder.FIRST_META_REGIONINFO;
 byte [] bytes = RegionInfo.toByteArray(ri);
 RegionInfo pbri = RegionInfo.parseFrom(bytes);
-assertTrue(ri.equals(pbri));
+assertTrue(RegionInfo.COMPARATOR.compare(ri, pbri) == 0);
   }
 
   @Test
@@ -286,7 +286,7 @@ public class TestRegionInfoBuilder {
 .setReplicaId(replicaId).build();
 
 // convert two times, compare
-RegionInfo convertedRi = 
ProtobufUtil.toRegionInfo(ProtobufUtil.toProtoRegionInfo(ri));
+RegionInfo convertedRi = 
ProtobufUtil.toRegionInfo(ProtobufUtil.toRegionInfo(ri));
 
 assertEquals(ri, convertedRi);
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/6693f45f/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
index b594a2f..15c6b76 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
@@ -40,7 +40,6 @@ import org.apache.hadoop.hbase.CategoryBasedTimeout;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.ServerName;
@@ -49,6 +48,7 @@ import org.apache.hadoop.hbase.UnknownRegionException;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
@@ -62,11 +62,6 @@ import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
 import org.apache.hadoop.hbase.master.assignment.RegionStates;
 import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -87,6 +82,11 @@ import org.junit.rules.TestName;
 import org.junit.rules.TestRule;
 
 import org.apache.hadoop.hbase.shaded.com.google.common.base.Joiner;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse;
 
 @Category({RegionServerTests.class, MediumTests.class})
 public class TestRegionMergeTransactionOnCluster {
@@ -142,14 +142,14 @@ public class TestRegionMergeTransactionOnCluster {
 INITIAL_REGION_NUM - 1);
 
 // Merge 2nd and 3th region
-PairOfSameType mergedRegions =
+PairOfSameType mergedRegions =
   mergeRegionsAndVerifyRegionNum(MASTER, tableName, 1, 2,
 INITIAL_REGION_NUM - 2);
 
 verifyRowCount(table, 

[04/19] hbase git commit: HBASE-18839 Apply RegionInfo to code base

2017-09-28 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/6693f45f/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java
index b360145..c33cd56 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java
@@ -17,7 +17,12 @@
  */
 package org.apache.hadoop.hbase.master.balancer;
 
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
@@ -28,15 +33,17 @@ import java.util.Set;
 import java.util.TreeMap;
 import java.util.TreeSet;
 import java.util.stream.Collectors;
+
 import org.apache.commons.lang3.ArrayUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseIOException;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.master.LoadBalancer;
 import org.apache.hadoop.hbase.master.MasterServices;
@@ -48,17 +55,14 @@ import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegi
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.net.DNSToSwitchMapping;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
 import org.junit.BeforeClass;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 import org.mockito.Mockito;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
+
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
 
 @Category({MasterTests.class, MediumTests.class})
 public class TestBaseLoadBalancer extends BalancerTestBase {
@@ -108,13 +112,13 @@ public class TestBaseLoadBalancer extends 
BalancerTestBase {
 
   public static class MockBalancer extends BaseLoadBalancer {
 @Override
-public List balanceCluster(Map 
clusterState) {
+public List balanceCluster(Map 
clusterState) {
   return null;
 }
 
 @Override
 public List balanceCluster(TableName tableName,
-Map clusterState) throws 
HBaseIOException {
+Map clusterState) throws 
HBaseIOException {
   return null;
 }
   }
@@ -125,9 +129,9 @@ public class TestBaseLoadBalancer extends BalancerTestBase {
* @param servers
* @param assignments
*/
-  private void assertImmediateAssignment(List regions, 
List servers,
-  Map assignments) {
-for (HRegionInfo region : regions) {
+  private void assertImmediateAssignment(List regions, 
List servers,
+  Map assignments) {
+for (RegionInfo region : regions) {
   assertTrue(assignments.containsKey(region));
 }
   }
@@ -143,31 +147,31 @@ public class TestBaseLoadBalancer extends 
BalancerTestBase {
   @Test (timeout=18)
   public void testBulkAssignment() throws Exception {
 List tmp = getListOfServerNames(randomServers(5, 0));
-List hris = randomRegions(20);
-hris.add(HRegionInfo.FIRST_META_REGIONINFO);
+List hris = randomRegions(20);
+hris.add(RegionInfoBuilder.FIRST_META_REGIONINFO);
 tmp.add(master);
-Map plans = 
loadBalancer.roundRobinAssignment(hris, tmp);
+Map plans = 
loadBalancer.roundRobinAssignment(hris, tmp);
 if (LoadBalancer.isTablesOnMaster(loadBalancer.getConf())) {
-  
assertTrue(plans.get(master).contains(HRegionInfo.FIRST_META_REGIONINFO));
+  
assertTrue(plans.get(master).contains(RegionInfoBuilder.FIRST_META_REGIONINFO));
   assertEquals(1, plans.get(master).size());
 }
 int totalRegion = 0;
-for (List regions: plans.values()) {
+for (List regions: plans.values()) {
   totalRegion += regions.size();
 }
 assertEquals(hris.size(), totalRegion);
  

[05/19] hbase git commit: HBASE-18839 Apply RegionInfo to code base

2017-09-28 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/6693f45f/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
index a8b9998..b73c873 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
@@ -19,12 +19,9 @@
 package org.apache.hadoop.hbase.master;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
-import java.util.Collection;
 import java.util.EnumSet;
-import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
@@ -38,18 +35,18 @@ import org.apache.hadoop.hbase.ClusterStatus.Option;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.MetaTableAccessor.Visitor;
 import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.MetaTableAccessor.Visitor;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Table;
@@ -107,7 +104,7 @@ public class TestMasterOperationsForRegionReplicas {
   ADMIN.createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), 
numRegions);
 
   validateNumberOfRowsInMeta(tableName, numRegions, ADMIN.getConnection());
-  List hris = MetaTableAccessor.getTableRegions(
+  List hris = MetaTableAccessor.getTableRegions(
 ADMIN.getConnection(), tableName);
   assert(hris.size() == numRegions * numReplica);
 } finally {
@@ -129,12 +126,12 @@ public class TestMasterOperationsForRegionReplicas {
   TEST_UTIL.waitTableEnabled(tableName);
   validateNumberOfRowsInMeta(tableName, numRegions, ADMIN.getConnection());
 
-  List hris = 
MetaTableAccessor.getTableRegions(ADMIN.getConnection(), tableName);
+  List hris = 
MetaTableAccessor.getTableRegions(ADMIN.getConnection(), tableName);
   assert(hris.size() == numRegions * numReplica);
   // check that the master created expected number of RegionState objects
   for (int i = 0; i < numRegions; i++) {
 for (int j = 0; j < numReplica; j++) {
-  HRegionInfo replica = 
RegionReplicaUtil.getRegionInfoForReplica(hris.get(i), j);
+  RegionInfo replica = 
RegionReplicaUtil.getRegionInfoForReplica(hris.get(i), j);
   RegionState state = 
TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager()
   .getRegionStates().getRegionState(replica);
   assert (state != null);
@@ -145,7 +142,7 @@ public class TestMasterOperationsForRegionReplicas {
   int numRows = 0;
   for (Result result : metaRows) {
 RegionLocations locations = 
MetaTableAccessor.getRegionLocations(result);
-HRegionInfo hri = locations.getRegionLocation().getRegionInfo();
+RegionInfo hri = locations.getRegionLocation().getRegionInfo();
 if (!hri.getTable().equals(tableName)) continue;
 numRows += 1;
 HRegionLocation[] servers = locations.getRegionLocations();
@@ -168,7 +165,7 @@ public class TestMasterOperationsForRegionReplicas {
   TEST_UTIL.getHBaseClusterInterface().waitForActiveAndReadyMaster();
   for (int i = 0; i < numRegions; i++) {
 for (int j = 0; j < numReplica; j++) {
-  HRegionInfo replica = 
RegionReplicaUtil.getRegionInfoForReplica(hris.get(i), j);
+  RegionInfo replica = 
RegionReplicaUtil.getRegionInfoForReplica(hris.get(i), j);
   RegionState state = 
TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager()
   .getRegionStates().getRegionState(replica);
   assert (state != null);
@@ -205,7 +202,7 @@ public class TestMasterOperationsForRegionReplicas {
   ADMIN.enableTable(tableName);
   LOG.info(ADMIN.getTableDescriptor(tableName).toString());
   assert(ADMIN.isTableEnabled(tableName));
-  List regions = 

[03/19] hbase git commit: HBASE-18839 Apply RegionInfo to code base

2017-09-28 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/6693f45f/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteTableProcedure.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteTableProcedure.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteTableProcedure.java
index 3ad8ec8..6f109e5 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteTableProcedure.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteTableProcedure.java
@@ -23,10 +23,10 @@ import static org.junit.Assert.assertTrue;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.CategoryBasedTimeout;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotDisabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
@@ -75,7 +75,7 @@ public class TestDeleteTableProcedure extends 
TestTableDDLProcedureBase {
 final TableName tableName = TableName.valueOf(name.getMethodName());
 final ProcedureExecutor procExec = 
getMasterProcedureExecutor();
 
-HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
+RegionInfo[] regions = MasterProcedureTestingUtility.createTable(
   procExec, tableName, null, "f");
 UTIL.getAdmin().disableTable(tableName);
 
@@ -118,7 +118,7 @@ public class TestDeleteTableProcedure extends 
TestTableDDLProcedureBase {
   }
 
   private void testSimpleDelete(final TableName tableName, byte[][] splitKeys) 
throws Exception {
-HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
+RegionInfo[] regions = MasterProcedureTestingUtility.createTable(
   getMasterProcedureExecutor(), tableName, splitKeys, "f1", "f2");
 UTIL.getAdmin().disableTable(tableName);
 
@@ -136,7 +136,7 @@ public class TestDeleteTableProcedure extends 
TestTableDDLProcedureBase {
 
 // create the table
 byte[][] splitKeys = null;
-HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
+RegionInfo[] regions = MasterProcedureTestingUtility.createTable(
   getMasterProcedureExecutor(), tableName, splitKeys, "f1", "f2");
 UTIL.getAdmin().disableTable(tableName);
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/6693f45f/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java
index 4e96cea..3eeb382 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java
@@ -26,18 +26,13 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.CategoryBasedTimeout;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
 import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.CreateTableState;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DeleteTableState;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DisableTableState;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.EnableTableState;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.TruncateTableState;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -50,6 +45,12 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestRule;
 
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.CreateTableState;
+import 

[17/19] hbase git commit: HBASE-18839 Apply RegionInfo to code base

2017-09-28 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
index e5f1848..2fbbc3f 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
@@ -30,11 +30,9 @@ import org.apache.hadoop.hbase.CellScannable;
 import org.apache.hadoop.hbase.ClusterStatus.Option;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Action;
 import org.apache.hadoop.hbase.client.Append;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
@@ -46,6 +44,7 @@ import org.apache.hadoop.hbase.client.MasterSwitchType;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.RegionCoprocessorServiceExec;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.Row;
 import org.apache.hadoop.hbase.client.RowMutations;
 import org.apache.hadoop.hbase.client.Scan;
@@ -54,6 +53,12 @@ import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.filter.ByteArrayComparable;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.security.token.Token;
+import org.apache.yetus.audience.InterfaceAudience;
+
 import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
@@ -70,7 +75,6 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavor
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Condition;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.GetRequest;
@@ -81,6 +85,7 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationPr
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionAction;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.CompareType;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
@@ -97,12 +102,9 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTabl
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetCleanerChoreRunningRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
+import 

[13/19] hbase git commit: HBASE-18839 Apply RegionInfo to code base

2017-09-28 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
index 9e37292..da6afc9 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
@@ -30,15 +30,15 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.MetaMutationAnnotation;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.UnknownRegionException;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.MasterSwitchType;
 import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.exceptions.MergeRegionException;
@@ -59,13 +59,16 @@ import 
org.apache.hadoop.hbase.quotas.QuotaExceededException;
 import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.HStoreFile;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.yetus.audience.InterfaceAudience;
+
 import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsState;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.FSUtils;
 
 /**
  * The procedure to Merge a region in a table.
@@ -79,8 +82,8 @@ public class MergeTableRegionsProcedure
   private Boolean traceEnabled;
   private volatile boolean lock = false;
   private ServerName regionLocation;
-  private HRegionInfo[] regionsToMerge;
-  private HRegionInfo mergedRegion;
+  private RegionInfo[] regionsToMerge;
+  private RegionInfo mergedRegion;
   private boolean forcible;
 
   public MergeTableRegionsProcedure() {
@@ -88,18 +91,18 @@ public class MergeTableRegionsProcedure
   }
 
   public MergeTableRegionsProcedure(final MasterProcedureEnv env,
-  final HRegionInfo regionToMergeA, final HRegionInfo regionToMergeB) 
throws IOException {
+  final RegionInfo regionToMergeA, final RegionInfo regionToMergeB) throws 
IOException {
 this(env, regionToMergeA, regionToMergeB, false);
   }
 
   public MergeTableRegionsProcedure(final MasterProcedureEnv env,
-  final HRegionInfo regionToMergeA, final HRegionInfo regionToMergeB,
+  final RegionInfo regionToMergeA, final RegionInfo regionToMergeB,
   final boolean forcible) throws MergeRegionException {
-this(env, new HRegionInfo[] {regionToMergeA, regionToMergeB}, forcible);
+this(env, new RegionInfo[] {regionToMergeA, regionToMergeB}, forcible);
   }
 
   public MergeTableRegionsProcedure(final MasterProcedureEnv env,
-  final HRegionInfo[] regionsToMerge, final boolean forcible)
+  final RegionInfo[] regionsToMerge, final boolean forcible)
   throws MergeRegionException {
 super(env);
 
@@ -117,7 +120,7 @@ public class MergeTableRegionsProcedure
 this.forcible = forcible;
   }
 
-  private static void checkRegionsToMerge(final HRegionInfo[] regionsToMerge,
+  private static void checkRegionsToMerge(final RegionInfo[] regionsToMerge,
   final boolean forcible) throws MergeRegionException {
 // For now, we only merge 2 regions.
 // It could be extended to more than 2 regions in the future.
@@ -129,19 +132,19 @@ public class MergeTableRegionsProcedure
 checkRegionsToMerge(regionsToMerge[0], regionsToMerge[1], forcible);
   }
 
-  private static void checkRegionsToMerge(final HRegionInfo regionToMergeA,
-  final HRegionInfo regionToMergeB, final boolean forcible) throws 
MergeRegionException {
+  private static void 

[07/19] hbase git commit: HBASE-18839 Apply RegionInfo to code base

2017-09-28 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
--
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp 
b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
index 6c401a9..c7a1aff 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
@@ -17,54 +17,57 @@
  * limitations under the License.
  */
 --%>
-<%@page 
import="org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType"%>
+<%@page import="java.net.URLEncoder"%>
 <%@ page contentType="text/html;charset=UTF-8"
   import="static org.apache.commons.lang3.StringEscapeUtils.escapeXml"
-  import="org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString"
-  import="java.net.URLEncoder"
   import="java.util.ArrayList"
-  import="java.util.TreeMap"
-  import="java.util.List"
-  import="java.util.LinkedHashMap"
-  import="java.util.Map"
-  import="java.util.Set"
   import="java.util.Collection"
   import="java.util.Collections"
   import="java.util.Comparator"
+  import="java.util.LinkedHashMap"
+  import="java.util.List"
+  import="java.util.Map"
+  import="java.util.TreeMap"
   import="org.apache.commons.lang3.StringEscapeUtils"
   import="org.apache.hadoop.conf.Configuration"
-  import="org.apache.hadoop.util.StringUtils"
-  import="org.apache.hadoop.hbase.HRegionInfo"
+  import="org.apache.hadoop.hbase.HBaseConfiguration"
+  import="org.apache.hadoop.hbase.HColumnDescriptor"
+  import="org.apache.hadoop.hbase.HConstants"
   import="org.apache.hadoop.hbase.HRegionLocation"
-  import="org.apache.hadoop.hbase.ServerName"
-  import="org.apache.hadoop.hbase.ServerLoad"
   import="org.apache.hadoop.hbase.RegionLoad"
-  import="org.apache.hadoop.hbase.HConstants"
+  import="org.apache.hadoop.hbase.ServerLoad"
+  import="org.apache.hadoop.hbase.ServerName"
+  import="org.apache.hadoop.hbase.TableName"
+  import="org.apache.hadoop.hbase.TableNotFoundException"
+  import="org.apache.hadoop.hbase.client.Admin"
+  import="org.apache.hadoop.hbase.client.CompactionState"
+  import="org.apache.hadoop.hbase.client.RegionInfo"
+  import="org.apache.hadoop.hbase.client.RegionInfoBuilder"
+  import="org.apache.hadoop.hbase.client.RegionLocator"
+  import="org.apache.hadoop.hbase.client.RegionReplicaUtil"
+  import="org.apache.hadoop.hbase.client.Table"
   import="org.apache.hadoop.hbase.master.HMaster"
-  import="org.apache.hadoop.hbase.zookeeper.MetaTableLocator"
   import="org.apache.hadoop.hbase.quotas.QuotaTableUtil"
   import="org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot"
   import="org.apache.hadoop.hbase.util.Bytes"
   import="org.apache.hadoop.hbase.util.FSUtils"
-  
import="org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos"
-  import="org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos"
-  import="org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas"
-  
import="org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota"
-  import="org.apache.hadoop.hbase.TableName"
-  import="org.apache.hadoop.hbase.HColumnDescriptor"
-  import="org.apache.hadoop.hbase.HBaseConfiguration"
-  import="org.apache.hadoop.hbase.TableNotFoundException"%>
-<%@ page import="org.apache.hadoop.hbase.client.*" %>
+  import="org.apache.hadoop.hbase.zookeeper.MetaTableLocator"
+  import="org.apache.hadoop.util.StringUtils"
+  import="org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString"%>
+<%@ page 
import="org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos" 
%>
+<%@ page 
import="org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos" %>
+<%@ page 
import="org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas" %>
+<%@ page 
import="org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota"
 %>
 <%!
   /**
-   * @return An empty region load stamped with the passed in hri
+   * @return An empty region load stamped with the passed in 
regionInfo
* region name.
*/
-  private RegionLoad getEmptyRegionLoad(final HRegionInfo hri) {
+  private RegionLoad getEmptyRegionLoad(final RegionInfo regionInfo) {
 return new RegionLoad(ClusterStatusProtos.RegionLoad.newBuilder().
   setRegionSpecifier(HBaseProtos.RegionSpecifier.newBuilder().
   setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME).
-  setValue(ByteString.copyFrom(hri.getRegionName())).build()).build());
+  
setValue(ByteString.copyFrom(regionInfo.getRegionName())).build()).build());
   }
 %>
 <%
@@ -205,10 +208,10 @@ if ( fqtn != null ) {
 %> Split request accepted. <%
 } else if (action.equals("compact")) {
   if (key != null && key.length() > 0) {
-List regions = 
admin.getTableRegions(TableName.valueOf(fqtn));
+List regions = 

[03/19] hbase git commit: HBASE-18839 Apply RegionInfo to code base

2017-09-28 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteTableProcedure.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteTableProcedure.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteTableProcedure.java
index 3ad8ec8..6f109e5 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteTableProcedure.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteTableProcedure.java
@@ -23,10 +23,10 @@ import static org.junit.Assert.assertTrue;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.CategoryBasedTimeout;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotDisabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
@@ -75,7 +75,7 @@ public class TestDeleteTableProcedure extends 
TestTableDDLProcedureBase {
 final TableName tableName = TableName.valueOf(name.getMethodName());
 final ProcedureExecutor procExec = 
getMasterProcedureExecutor();
 
-HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
+RegionInfo[] regions = MasterProcedureTestingUtility.createTable(
   procExec, tableName, null, "f");
 UTIL.getAdmin().disableTable(tableName);
 
@@ -118,7 +118,7 @@ public class TestDeleteTableProcedure extends 
TestTableDDLProcedureBase {
   }
 
   private void testSimpleDelete(final TableName tableName, byte[][] splitKeys) 
throws Exception {
-HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
+RegionInfo[] regions = MasterProcedureTestingUtility.createTable(
   getMasterProcedureExecutor(), tableName, splitKeys, "f1", "f2");
 UTIL.getAdmin().disableTable(tableName);
 
@@ -136,7 +136,7 @@ public class TestDeleteTableProcedure extends 
TestTableDDLProcedureBase {
 
 // create the table
 byte[][] splitKeys = null;
-HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
+RegionInfo[] regions = MasterProcedureTestingUtility.createTable(
   getMasterProcedureExecutor(), tableName, splitKeys, "f1", "f2");
 UTIL.getAdmin().disableTable(tableName);
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java
index 4e96cea..3eeb382 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java
@@ -26,18 +26,13 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.CategoryBasedTimeout;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
 import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.CreateTableState;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DeleteTableState;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DisableTableState;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.EnableTableState;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.TruncateTableState;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -50,6 +45,12 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestRule;
 
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.CreateTableState;
+import 

[09/19] hbase git commit: HBASE-18839 Apply RegionInfo to code base

2017-09-28 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
index 1d31d5a..b21d55a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
@@ -23,21 +23,21 @@ import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ConcurrentMap;
 
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hbase.Abortable;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.locking.EntityLock;
 import org.apache.hadoop.hbase.executor.ExecutorService;
 import org.apache.hadoop.hbase.ipc.RpcServerInterface;
 import org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager;
 import org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager;
 import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
 import org.apache.hadoop.hbase.wal.WAL;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.zookeeper.KeeperException;
 
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
+
 import com.google.protobuf.Service;
 
 /**
@@ -49,7 +49,7 @@ public interface RegionServerServices
 
   /** @return the WAL for a particular region. Pass null for getting the
* default (common) WAL */
-  WAL getWAL(HRegionInfo regionInfo) throws IOException;
+  WAL getWAL(RegionInfo regionInfo) throws IOException;
 
   /** @return the List of WALs that are used by this server
*  Doesn't include the meta WAL
@@ -127,11 +127,11 @@ public interface RegionServerServices
 private final TransitionCode code;
 private final long openSeqNum;
 private final long masterSystemTime;
-private final HRegionInfo[] hris;
+private final RegionInfo[] hris;
 
 @InterfaceAudience.Private
 public RegionStateTransitionContext(TransitionCode code, long openSeqNum, 
long masterSystemTime,
-HRegionInfo... hris) {
+RegionInfo... hris) {
   this.code = code;
   this.openSeqNum = openSeqNum;
   this.masterSystemTime = masterSystemTime;
@@ -146,7 +146,7 @@ public interface RegionServerServices
 public long getMasterSystemTime() {
   return masterSystemTime;
 }
-public HRegionInfo[] getHris() {
+public RegionInfo[] getHris() {
   return hris;
 }
   }
@@ -161,14 +161,14 @@ public interface RegionServerServices
* @deprecated use {@link 
#reportRegionStateTransition(RegionStateTransitionContext)}
*/
   @Deprecated
-  boolean reportRegionStateTransition(TransitionCode code, long openSeqNum, 
HRegionInfo... hris);
+  boolean reportRegionStateTransition(TransitionCode code, long openSeqNum, 
RegionInfo... hris);
 
   /**
* Notify master that a handler requests to change a region state
* @deprecated use {@link 
#reportRegionStateTransition(RegionStateTransitionContext)}
*/
   @Deprecated
-  boolean reportRegionStateTransition(TransitionCode code, HRegionInfo... 
hris);
+  boolean reportRegionStateTransition(TransitionCode code, RegionInfo... hris);
 
   /**
* Returns a reference to the region server's RPC server
@@ -244,7 +244,7 @@ public interface RegionServerServices
   /**
* Master based locks on namespaces/tables/regions.
*/
-  EntityLock regionLock(List regionInfos, String description,
+  EntityLock regionLock(List regionInfos, String description,
   Abortable abort) throws IOException;
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java
index 9b4a32a..aea92f8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java
@@ -23,9 +23,9 @@ import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.yetus.audience.InterfaceAudience;
+import 

[06/19] hbase git commit: HBASE-18839 Apply RegionInfo to code base

2017-09-28 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi.java
index 1acfcde..6307210 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi.java
@@ -34,14 +34,13 @@ import java.util.Optional;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ClusterStatus;
+import org.apache.hadoop.hbase.ClusterStatus.Option;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.RegionLoad;
 import org.apache.hadoop.hbase.ServerLoad;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.ClusterStatus.Option;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
@@ -227,13 +226,13 @@ public class TestAsyncClusterAdminApi extends 
TestAsyncAdminBase {
 // Check if regions match with the regionLoad from the server
 Collection servers = admin.getRegionServers().get();
 for (ServerName serverName : servers) {
-  List regions = admin.getOnlineRegions(serverName).get();
+  List regions = admin.getOnlineRegions(serverName).get();
   checkRegionsAndRegionLoads(regions, 
admin.getRegionLoads(serverName).get());
 }
 
 // Check if regionLoad matches the table's regions and nothing is missed
 for (TableName table : tables) {
-  List tableRegions = admin.getTableRegions(table).get();
+  List tableRegions = admin.getTableRegions(table).get();
   List regionLoads = Lists.newArrayList();
   for (ServerName serverName : servers) {
 regionLoads.addAll(admin.getRegionLoads(serverName, 
Optional.of(table)).get());
@@ -268,7 +267,7 @@ public class TestAsyncClusterAdminApi extends 
TestAsyncAdminBase {
 }
   }
 
-  private void checkRegionsAndRegionLoads(Collection regions,
+  private void checkRegionsAndRegionLoads(Collection regions,
   Collection regionLoads) {
 
 assertEquals("No of regions and regionloads doesn't match", 
regions.size(), regionLoads.size());
@@ -277,7 +276,7 @@ public class TestAsyncClusterAdminApi extends 
TestAsyncAdminBase {
 for (RegionLoad regionLoad : regionLoads) {
   regionLoadMap.put(regionLoad.getName(), regionLoad);
 }
-for (HRegionInfo info : regions) {
+for (RegionInfo info : regions) {
   assertTrue("Region not in regionLoadMap region:" + 
info.getRegionNameAsString()
   + " regionMap: " + regionLoadMap, 
regionLoadMap.containsKey(info.getRegionName()));
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java
index 6292b10..9775b86 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java
@@ -33,7 +33,6 @@ import java.util.stream.Collectors;
 
 import org.apache.hadoop.hbase.AsyncMetaTableAccessor;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
@@ -45,7 +44,6 @@ import 
org.apache.hadoop.hbase.master.assignment.AssignmentManager;
 import org.apache.hadoop.hbase.master.assignment.RegionStates;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.Region;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -58,6 +56,8 @@ import org.junit.experimental.categories.Category;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+
 /**
  * Class to test asynchronous region admin operations.
  */
@@ -69,10 +69,10 @@ public class TestAsyncRegionAdminApi extends 
TestAsyncAdminBase {
   public void testCloseRegion() throws 

[12/19] hbase git commit: HBASE-18839 Apply RegionInfo to code base

2017-09-28 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
index ca73ff7..a05ad67 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
@@ -43,10 +43,10 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerLoad;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.master.LoadBalancer;
 import org.apache.hadoop.hbase.master.MasterServices;
@@ -71,7 +71,7 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
   protected static final int MIN_SERVER_BALANCE = 2;
   private volatile boolean stopped = false;
 
-  private static final List EMPTY_REGION_LIST = new 
ArrayList<>(0);
+  private static final List EMPTY_REGION_LIST = new ArrayList<>(0);
 
   static final Predicate IDLE_SERVER_PREDICATOR
 = load -> load.getNumberOfRegions() == 0;
@@ -127,7 +127,7 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
 boolean multiServersPerHost = false; // whether or not any host has more 
than one server
 
 ArrayList tables;
-HRegionInfo[] regions;
+RegionInfo[] regions;
 Deque[] regionLoads;
 private RegionLocationFinder regionFinder;
 
@@ -160,7 +160,7 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
 Map hostsToIndex;
 Map racksToIndex;
 Map tablesToIndex;
-Map regionsToIndex;
+Map regionsToIndex;
 float[] localityPerServer;
 
 int numServers;
@@ -170,7 +170,7 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
 int numRegions;
 
 int numMovedRegions = 0; //num moved regions from the initial configuration
-Map clusterState;
+Map clusterState;
 
 protected final RackManager rackManager;
 // Maps region -> rackIndex -> locality of region on rack
@@ -179,7 +179,7 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
 private int[][] regionsToMostLocalEntities;
 
 protected Cluster(
-Map clusterState,
+Map clusterState,
 Map loads,
 RegionLocationFinder regionFinder,
 RackManager rackManager) {
@@ -188,8 +188,8 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
 
 @SuppressWarnings("unchecked")
 protected Cluster(
-Collection unassignedRegions,
-Map clusterState,
+Collection unassignedRegions,
+Map clusterState,
 Map loads,
 RegionLocationFinder regionFinder,
 RackManager rackManager) {
@@ -247,7 +247,7 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
   }
 
   // Count how many regions there are.
-  for (Entry entry : 
clusterState.entrySet()) {
+  for (Entry entry : 
clusterState.entrySet()) {
 numRegions += entry.getValue().size();
   }
   numRegions += unassignedRegions.size();
@@ -256,7 +256,7 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
   servers = new ServerName[numServers];
   serversPerHost = new int[numHosts][];
   serversPerRack = new int[numRacks][];
-  regions = new HRegionInfo[numRegions];
+  regions = new RegionInfo[numRegions];
   regionIndexToServerIndex = new int[numRegions];
   initialRegionIndexToServerIndex = new int[numRegions];
   regionIndexToTableIndex = new int[numRegions];
@@ -279,7 +279,7 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
 
   int tableIndex = 0, regionIndex = 0, regionPerServerIndex = 0;
 
-  for (Entry entry : 
clusterState.entrySet()) {
+  for (Entry entry : 
clusterState.entrySet()) {
 if (entry.getKey() == null) {
   LOG.warn("SERVERNAME IS NULL, skipping " + entry.getValue());
   continue;
@@ -314,7 +314,7 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
 racks[entry.getValue()] = entry.getKey();
   

[16/19] hbase git commit: HBASE-18839 Apply RegionInfo to code base

2017-09-28 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
--
diff --git 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
index 169e42f..3f1373f 100644
--- 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
+++ 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
@@ -18,13 +18,6 @@
 
 package org.apache.hadoop.hbase.rsgroup;
 
-import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ArrayListMultimap;
-import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.LinkedListMultimap;
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.ListMultimap;
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
-
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
@@ -43,10 +36,9 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ClusterStatus;
 import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.constraint.ConstraintException;
 import org.apache.hadoop.hbase.master.LoadBalancer;
 import org.apache.hadoop.hbase.master.MasterServices;
@@ -54,6 +46,14 @@ import org.apache.hadoop.hbase.master.RegionPlan;
 import org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer;
 import org.apache.hadoop.hbase.net.Address;
 import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ArrayListMultimap;
+import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.LinkedListMultimap;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.ListMultimap;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
 
 /**
  * GroupBasedLoadBalancer, used when Region Server Grouping is configured 
(HBase-6721)
@@ -106,31 +106,31 @@ public class RSGroupBasedLoadBalancer implements 
RSGroupableBalancer {
   }
 
   @Override
-  public List balanceCluster(TableName tableName, Map
+  public List balanceCluster(TableName tableName, Map
   clusterState) throws HBaseIOException {
 return balanceCluster(clusterState);
   }
 
   @Override
-  public List balanceCluster(Map 
clusterState)
+  public List balanceCluster(Map 
clusterState)
   throws HBaseIOException {
 if (!isOnline()) {
   throw new ConstraintException(RSGroupInfoManager.RSGROUP_TABLE_NAME +
   " is not online, unable to perform balance");
 }
 
-Map correctedState = 
correctAssignments(clusterState);
+Map correctedState = 
correctAssignments(clusterState);
 List regionPlans = new ArrayList<>();
 
-List misplacedRegions = 
correctedState.get(LoadBalancer.BOGUS_SERVER_NAME);
-for (HRegionInfo regionInfo : misplacedRegions) {
+List misplacedRegions = 
correctedState.get(LoadBalancer.BOGUS_SERVER_NAME);
+for (RegionInfo regionInfo : misplacedRegions) {
   regionPlans.add(new RegionPlan(regionInfo, null, null));
 }
 try {
   List rsgi = rsGroupInfoManager.listRSGroups();
   for (RSGroupInfo info: rsgi) {
-Map groupClusterState = new HashMap<>();
-Map> groupClusterLoad = 
new HashMap<>();
+Map groupClusterState = new HashMap<>();
+Map> groupClusterLoad = 
new HashMap<>();
 for (Address sName : info.getServers()) {
   for(ServerName curr: clusterState.keySet()) {
 if(curr.getAddress().equals(sName)) {
@@ -154,15 +154,15 @@ public class RSGroupBasedLoadBalancer implements 
RSGroupableBalancer {
   }
 
   @Override
-  public Map roundRobinAssignment(
-  List regions, List servers) throws 
HBaseIOException {
-Map assignments = Maps.newHashMap();
-ListMultimap regionMap = ArrayListMultimap.create();
+  public Map roundRobinAssignment(
+ 

[02/19] hbase git commit: HBASE-18839 Apply RegionInfo to code base

2017-09-28 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfoBuilder.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfoBuilder.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfoBuilder.java
index 497ecc4..5e553dc 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfoBuilder.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfoBuilder.java
@@ -86,7 +86,7 @@ public class TestRegionInfoBuilder {
 RegionInfo ri = RegionInfoBuilder.FIRST_META_REGIONINFO;
 byte [] bytes = RegionInfo.toByteArray(ri);
 RegionInfo pbri = RegionInfo.parseFrom(bytes);
-assertTrue(ri.equals(pbri));
+assertTrue(RegionInfo.COMPARATOR.compare(ri, pbri) == 0);
   }
 
   @Test
@@ -286,7 +286,7 @@ public class TestRegionInfoBuilder {
 .setReplicaId(replicaId).build();
 
 // convert two times, compare
-RegionInfo convertedRi = 
ProtobufUtil.toRegionInfo(ProtobufUtil.toProtoRegionInfo(ri));
+RegionInfo convertedRi = 
ProtobufUtil.toRegionInfo(ProtobufUtil.toRegionInfo(ri));
 
 assertEquals(ri, convertedRi);
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
index b594a2f..15c6b76 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
@@ -40,7 +40,6 @@ import org.apache.hadoop.hbase.CategoryBasedTimeout;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.ServerName;
@@ -49,6 +48,7 @@ import org.apache.hadoop.hbase.UnknownRegionException;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
@@ -62,11 +62,6 @@ import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
 import org.apache.hadoop.hbase.master.assignment.RegionStates;
 import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -87,6 +82,11 @@ import org.junit.rules.TestName;
 import org.junit.rules.TestRule;
 
 import org.apache.hadoop.hbase.shaded.com.google.common.base.Joiner;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse;
 
 @Category({RegionServerTests.class, MediumTests.class})
 public class TestRegionMergeTransactionOnCluster {
@@ -142,14 +142,14 @@ public class TestRegionMergeTransactionOnCluster {
 INITIAL_REGION_NUM - 1);
 
 // Merge 2nd and 3th region
-PairOfSameType mergedRegions =
+PairOfSameType mergedRegions =
   mergeRegionsAndVerifyRegionNum(MASTER, tableName, 1, 2,
 INITIAL_REGION_NUM - 2);
 
 verifyRowCount(table, 

[14/19] hbase git commit: HBASE-18839 Apply RegionInfo to code base

2017-09-28 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
index d23cf7d..afd402b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
@@ -40,31 +40,32 @@ import org.apache.commons.cli.ParseException;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hbase.ClusterStatus.Option;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.ClusterStatus.Option;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.favored.FavoredNodeAssignmentHelper;
 import org.apache.hadoop.hbase.favored.FavoredNodesPlan;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.MunkresAssignment;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse;
 
 /**
  * A tool that is used for manipulating and viewing favored nodes information
@@ -197,12 +198,12 @@ public class RegionPlacementMaintainer {
   Map> regionLocalityMap, FavoredNodesPlan plan,
   boolean munkresForSecondaryAndTertiary) throws IOException {
   // Get the all the regions for the current table
-  List regions =
+  List regions =
 assignmentSnapshot.getTableToRegionMap().get(tableName);
   int numRegions = regions.size();
 
   // Get the current assignment map
-  Map currentAssignmentMap =
+  Map currentAssignmentMap =
 assignmentSnapshot.getRegionToRegionServerMap();
 
   // Get the all the region servers
@@ -257,12 +258,12 @@ public class RegionPlacementMaintainer {
 // Compute the total rack locality for each region in each rack. The 
total
 // rack locality is the sum of the localities of a region on all 
servers in
 // a rack.
-Map> rackRegionLocality = new 
HashMap<>();
+Map> rackRegionLocality = new 
HashMap<>();
 for (int i = 0; i < numRegions; i++) {
-  HRegionInfo region = regions.get(i);
+  RegionInfo region = regions.get(i);
   for (int j = 0; j < regionSlots; j += slotsPerServer) {
 String rack = rackManager.getRack(servers.get(j / slotsPerServer));
-Map rackLocality = 
rackRegionLocality.get(rack);
+Map rackLocality = rackRegionLocality.get(rack);
 if (rackLocality == null) {
   rackLocality = new HashMap<>();
   rackRegionLocality.put(rack, rackLocality);
@@ -417,18 +418,18 @@ public class RegionPlacementMaintainer {
 LOG.info("Assignment plan for secondary and tertiary generated " +
 "using MunkresAssignment");
   } else {
-Map primaryRSMap = new HashMap<>();
+Map primaryRSMap = new HashMap<>();
 for (int i = 0; i < numRegions; i++) {
  

[18/19] hbase git commit: HBASE-18839 Apply RegionInfo to code base

2017-09-28 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index fac3ef1..7b42d3d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -40,6 +40,7 @@ import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicReference;
 import java.util.regex.Pattern;
+import java.util.stream.Collectors;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -69,8 +70,6 @@ import org.apache.hadoop.hbase.TableNotDisabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.UnknownRegionException;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.yetus.audience.InterfaceStability;
 import org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
 import org.apache.hadoop.hbase.client.replication.TableCFs;
 import org.apache.hadoop.hbase.client.security.SecurityCapability;
@@ -86,6 +85,26 @@ import 
org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
+import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
+import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
+import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
+import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
+import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
+import org.apache.hadoop.hbase.util.Addressing;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.ForeignExceptionUtil;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
+import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.yetus.audience.InterfaceStability;
+import org.apache.zookeeper.KeeperException;
+
+import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
@@ -184,28 +203,10 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRe
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
-import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
-import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
-import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
-import org.apache.hadoop.hbase.util.Addressing;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.ForeignExceptionUtil;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.zookeeper.KeeperException;
 
-import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
 import com.google.protobuf.Descriptors;
 import com.google.protobuf.Message;
 import com.google.protobuf.RpcController;
-import java.util.stream.Collectors;
 
 /**
  * HBaseAdmin is no longer a client API. It is marked 
InterfaceAudience.Private indicating that
@@ -399,12 +400,26 @@ public class HBaseAdmin implements Admin {
 
   @Override
   public List getRegions(final ServerName sn) throws IOException {
-return getOnlineRegions(sn).stream().collect(Collectors.toList());
+AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
+  

[05/19] hbase git commit: HBASE-18839 Apply RegionInfo to code base

2017-09-28 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
index a8b9998..b73c873 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
@@ -19,12 +19,9 @@
 package org.apache.hadoop.hbase.master;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
-import java.util.Collection;
 import java.util.EnumSet;
-import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
@@ -38,18 +35,18 @@ import org.apache.hadoop.hbase.ClusterStatus.Option;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.MetaTableAccessor.Visitor;
 import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.MetaTableAccessor.Visitor;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Table;
@@ -107,7 +104,7 @@ public class TestMasterOperationsForRegionReplicas {
   ADMIN.createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), 
numRegions);
 
   validateNumberOfRowsInMeta(tableName, numRegions, ADMIN.getConnection());
-  List hris = MetaTableAccessor.getTableRegions(
+  List hris = MetaTableAccessor.getTableRegions(
 ADMIN.getConnection(), tableName);
   assert(hris.size() == numRegions * numReplica);
 } finally {
@@ -129,12 +126,12 @@ public class TestMasterOperationsForRegionReplicas {
   TEST_UTIL.waitTableEnabled(tableName);
   validateNumberOfRowsInMeta(tableName, numRegions, ADMIN.getConnection());
 
-  List hris = 
MetaTableAccessor.getTableRegions(ADMIN.getConnection(), tableName);
+  List hris = 
MetaTableAccessor.getTableRegions(ADMIN.getConnection(), tableName);
   assert(hris.size() == numRegions * numReplica);
   // check that the master created expected number of RegionState objects
   for (int i = 0; i < numRegions; i++) {
 for (int j = 0; j < numReplica; j++) {
-  HRegionInfo replica = 
RegionReplicaUtil.getRegionInfoForReplica(hris.get(i), j);
+  RegionInfo replica = 
RegionReplicaUtil.getRegionInfoForReplica(hris.get(i), j);
   RegionState state = 
TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager()
   .getRegionStates().getRegionState(replica);
   assert (state != null);
@@ -145,7 +142,7 @@ public class TestMasterOperationsForRegionReplicas {
   int numRows = 0;
   for (Result result : metaRows) {
 RegionLocations locations = 
MetaTableAccessor.getRegionLocations(result);
-HRegionInfo hri = locations.getRegionLocation().getRegionInfo();
+RegionInfo hri = locations.getRegionLocation().getRegionInfo();
 if (!hri.getTable().equals(tableName)) continue;
 numRows += 1;
 HRegionLocation[] servers = locations.getRegionLocations();
@@ -168,7 +165,7 @@ public class TestMasterOperationsForRegionReplicas {
   TEST_UTIL.getHBaseClusterInterface().waitForActiveAndReadyMaster();
   for (int i = 0; i < numRegions; i++) {
 for (int j = 0; j < numReplica; j++) {
-  HRegionInfo replica = 
RegionReplicaUtil.getRegionInfoForReplica(hris.get(i), j);
+  RegionInfo replica = 
RegionReplicaUtil.getRegionInfoForReplica(hris.get(i), j);
   RegionState state = 
TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager()
   .getRegionStates().getRegionState(replica);
   assert (state != null);
@@ -205,7 +202,7 @@ public class TestMasterOperationsForRegionReplicas {
   ADMIN.enableTable(tableName);
   LOG.info(ADMIN.getTableDescriptor(tableName).toString());
   assert(ADMIN.isTableEnabled(tableName));
-  List regions = 

[10/19] hbase git commit: HBASE-18839 Apply RegionInfo to code base

2017-09-28 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java
index 03141a3..66f9240 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java
@@ -24,15 +24,15 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.ThreadPoolExecutor;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
-import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.errorhandling.ForeignException;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
@@ -43,12 +43,13 @@ import org.apache.hadoop.hbase.procedure.Procedure;
 import org.apache.hadoop.hbase.procedure.ProcedureCoordinator;
 import org.apache.hadoop.hbase.procedure.ProcedureCoordinatorRpcs;
 import org.apache.hadoop.hbase.procedure.ZKProcedureCoordinator;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
+import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.zookeeper.KeeperException;
 
 import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
 
 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
 public class MasterFlushTableProcedureManager extends MasterProcedureManager {
@@ -125,7 +126,7 @@ public class MasterFlushTableProcedureManager extends 
MasterProcedureManager {
 // It is possible that regions may move after we get the region server 
list.
 // Each region server will get its own online regions for the table.
 // We may still miss regions that need to be flushed.
-List> regionsAndLocations;
+List> regionsAndLocations;
 
 if (TableName.META_TABLE_NAME.equals(tableName)) {
   regionsAndLocations = new MetaTableLocator().getMetaRegionsAndLocations(
@@ -136,9 +137,9 @@ public class MasterFlushTableProcedureManager extends 
MasterProcedureManager {
 }
 
 Set regionServers = new HashSet<>(regionsAndLocations.size());
-for (Pair region : regionsAndLocations) {
+for (Pair region : regionsAndLocations) {
   if (region != null && region.getFirst() != null && region.getSecond() != 
null) {
-HRegionInfo hri = region.getFirst();
+RegionInfo hri = region.getFirst();
 if (hri.isOffline() && (hri.isSplit() || hri.isSplitParent())) 
continue;
 regionServers.add(region.getSecond().toString());
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java
index 89d7e14..a76e9c1 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java
@@ -26,16 +26,14 @@ import java.util.concurrent.TimeUnit;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ScheduledChore;
-import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.Store;
-import org.apache.hadoop.hbase.regionserver.StoreFile;

[19/19] hbase git commit: HBASE-18839 Apply RegionInfo to code base

2017-09-28 Thread chia7712
HBASE-18839 Apply RegionInfo to code base


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a11a35a1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a11a35a1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a11a35a1

Branch: refs/heads/master
Commit: a11a35a1135c431ee12534451c925727165eded5
Parents: 7f4c3b3
Author: Chia-Ping Tsai 
Authored: Thu Sep 28 16:16:21 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Thu Sep 28 20:19:29 2017 +0800

--
 .../hadoop/hbase/backup/util/BackupUtils.java   |  14 +-
 .../hadoop/hbase/AsyncMetaTableAccessor.java|  47 +--
 .../org/apache/hadoop/hbase/HRegionInfo.java|  19 +-
 .../apache/hadoop/hbase/HRegionLocation.java|  26 +-
 .../apache/hadoop/hbase/MetaTableAccessor.java  | 245 ++---
 .../apache/hadoop/hbase/RegionLocations.java|   5 +-
 .../apache/hadoop/hbase/client/AsyncAdmin.java  |  15 +-
 .../hadoop/hbase/client/AsyncHBaseAdmin.java|   9 +-
 .../hbase/client/ConnectionImplementation.java  |  38 +-
 .../hbase/client/FlushRegionCallable.java   |  10 +-
 .../apache/hadoop/hbase/client/HBaseAdmin.java  | 134 ---
 .../hadoop/hbase/client/HRegionLocator.java |   9 +-
 .../hadoop/hbase/client/HTableMultiplexer.java  |  15 +-
 .../hbase/client/ImmutableHRegionInfo.java  |   2 +-
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java |  64 ++--
 .../hadoop/hbase/client/RawAsyncTable.java  |  25 +-
 .../hadoop/hbase/client/RawAsyncTableImpl.java  |  18 +-
 .../client/RegionCoprocessorRpcChannelImpl.java |  22 +-
 .../apache/hadoop/hbase/client/RegionInfo.java  |  27 +-
 .../hadoop/hbase/client/RegionInfoBuilder.java  | 360 ++-
 .../hadoop/hbase/client/RegionReplicaUtil.java  |  65 ++--
 .../hadoop/hbase/client/ZooKeeperRegistry.java  |   7 +-
 .../apache/hadoop/hbase/master/RegionState.java |  23 +-
 .../hbase/shaded/protobuf/ProtobufUtil.java |  38 +-
 .../hbase/shaded/protobuf/RequestConverter.java |  48 +--
 .../shaded/protobuf/ResponseConverter.java  |  20 +-
 .../hbase/zookeeper/MetaTableLocator.java   |  71 ++--
 .../hadoop/hbase/client/TestAsyncProcess.java   |  28 +-
 .../coprocessor/AsyncAggregationClient.java |  28 +-
 .../apache/hadoop/hbase/coprocessor/Export.java |  22 +-
 .../example/TestRefreshHFilesEndpoint.java  |  20 +-
 .../mapreduce/TableSnapshotInputFormat.java |  26 +-
 .../hbase/regionserver/CompactionTool.java  |  10 +-
 .../hadoop/hbase/snapshot/ExportSnapshot.java   |  23 +-
 .../hbase/mapreduce/TestImportExport.java   |  18 +-
 .../replication/TestReplicationSmallTests.java  |  25 +-
 .../hbase/snapshot/TestExportSnapshot.java  |  13 +-
 .../hbase/snapshot/TestMobExportSnapshot.java   |   4 +-
 .../hadoop/hbase/rest/RegionsResource.java  |  20 +-
 .../hbase/rsgroup/RSGroupAdminServer.java   |  43 +--
 .../hbase/rsgroup/RSGroupBasedLoadBalancer.java | 104 +++---
 .../hbase/rsgroup/RSGroupInfoManagerImpl.java   |  15 +-
 .../balancer/TestRSGroupBasedLoadBalancer.java  | 133 +++
 .../hadoop/hbase/rsgroup/TestRSGroupsBase.java  |  22 +-
 .../hbase/tmpl/master/MasterStatusTmpl.jamon|   6 +-
 .../hbase/tmpl/regionserver/RSStatusTmpl.jamon  |   4 +-
 .../tmpl/regionserver/RegionListTmpl.jamon  |  41 +--
 .../hadoop/hbase/RegionStateListener.java   |  11 +-
 .../hadoop/hbase/backup/HFileArchiver.java  |  18 +-
 .../hbase/client/ClientSideRegionScanner.java   |   5 +-
 .../hbase/client/TableSnapshotScanner.java  |  13 +-
 .../hbase/client/locking/LockServiceClient.java |  18 +-
 .../SplitLogManagerCoordination.java|   8 +-
 .../ZKSplitLogManagerCoordination.java  |  16 +-
 .../coprocessor/MultiRowMutationEndpoint.java   |  16 +-
 .../favored/FavoredNodeAssignmentHelper.java| 104 +++---
 .../hbase/favored/FavoredNodeLoadBalancer.java  |  85 ++---
 .../hbase/favored/FavoredNodesManager.java  |  44 +--
 .../hadoop/hbase/favored/FavoredNodesPlan.java  |  10 +-
 .../hbase/favored/FavoredNodesPromoter.java |   8 +-
 .../org/apache/hadoop/hbase/io/HFileLink.java   |  19 +-
 .../hadoop/hbase/master/AssignmentListener.java |   9 +-
 .../master/AssignmentVerificationReport.java|  38 +-
 .../hadoop/hbase/master/CatalogJanitor.java |  56 ++-
 .../org/apache/hadoop/hbase/master/HMaster.java |  58 +--
 .../hadoop/hbase/master/LoadBalancer.java   |  32 +-
 .../hadoop/hbase/master/MasterFileSystem.java   |  13 +-
 .../hbase/master/MasterMetaBootstrap.java   |  15 +-
 .../hadoop/hbase/master/MasterRpcServices.java  |  54 +--
 .../hadoop/hbase/master/MasterServices.java |  11 +-
 .../hadoop/hbase/master/MasterWalManager.java   |   6 +-
 .../hbase/master/RegionPlacementMaintainer.java |  67 ++--
 .../apache/hadoop/hbase/master/RegionPlan.java  |  10 +-
 .../hadoop/hbase/master/ServerManager.java  |  32 +-
 

[04/19] hbase git commit: HBASE-18839 Apply RegionInfo to code base

2017-09-28 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java
index b360145..c33cd56 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java
@@ -17,7 +17,12 @@
  */
 package org.apache.hadoop.hbase.master.balancer;
 
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
@@ -28,15 +33,17 @@ import java.util.Set;
 import java.util.TreeMap;
 import java.util.TreeSet;
 import java.util.stream.Collectors;
+
 import org.apache.commons.lang3.ArrayUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseIOException;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.master.LoadBalancer;
 import org.apache.hadoop.hbase.master.MasterServices;
@@ -48,17 +55,14 @@ import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegi
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.net.DNSToSwitchMapping;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
 import org.junit.BeforeClass;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 import org.mockito.Mockito;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
+
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
 
 @Category({MasterTests.class, MediumTests.class})
 public class TestBaseLoadBalancer extends BalancerTestBase {
@@ -108,13 +112,13 @@ public class TestBaseLoadBalancer extends 
BalancerTestBase {
 
   public static class MockBalancer extends BaseLoadBalancer {
 @Override
-public List balanceCluster(Map 
clusterState) {
+public List balanceCluster(Map 
clusterState) {
   return null;
 }
 
 @Override
 public List balanceCluster(TableName tableName,
-Map clusterState) throws 
HBaseIOException {
+Map clusterState) throws 
HBaseIOException {
   return null;
 }
   }
@@ -125,9 +129,9 @@ public class TestBaseLoadBalancer extends BalancerTestBase {
* @param servers
* @param assignments
*/
-  private void assertImmediateAssignment(List regions, 
List servers,
-  Map assignments) {
-for (HRegionInfo region : regions) {
+  private void assertImmediateAssignment(List regions, 
List servers,
+  Map assignments) {
+for (RegionInfo region : regions) {
   assertTrue(assignments.containsKey(region));
 }
   }
@@ -143,31 +147,31 @@ public class TestBaseLoadBalancer extends 
BalancerTestBase {
   @Test (timeout=18)
   public void testBulkAssignment() throws Exception {
 List tmp = getListOfServerNames(randomServers(5, 0));
-List hris = randomRegions(20);
-hris.add(HRegionInfo.FIRST_META_REGIONINFO);
+List hris = randomRegions(20);
+hris.add(RegionInfoBuilder.FIRST_META_REGIONINFO);
 tmp.add(master);
-Map plans = 
loadBalancer.roundRobinAssignment(hris, tmp);
+Map plans = 
loadBalancer.roundRobinAssignment(hris, tmp);
 if (LoadBalancer.isTablesOnMaster(loadBalancer.getConf())) {
-  
assertTrue(plans.get(master).contains(HRegionInfo.FIRST_META_REGIONINFO));
+  
assertTrue(plans.get(master).contains(RegionInfoBuilder.FIRST_META_REGIONINFO));
   assertEquals(1, plans.get(master).size());
 }
 int totalRegion = 0;
-for (List regions: plans.values()) {
+for (List regions: plans.values()) {
   totalRegion += regions.size();
 }
 assertEquals(hris.size(), totalRegion);
  

[08/19] hbase git commit: HBASE-18839 Apply RegionInfo to code base

2017-09-28 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
index b4e5007..e942a02 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
@@ -22,8 +22,6 @@ package org.apache.hadoop.hbase.tool;
 import static 
org.apache.hadoop.hbase.HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT;
 import static org.apache.hadoop.hbase.HConstants.ZOOKEEPER_ZNODE_PARENT;
 
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-
 import java.io.Closeable;
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -62,7 +60,6 @@ import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MetaTableAccessor;
@@ -72,13 +69,13 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotEnabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
@@ -96,11 +93,14 @@ import org.apache.hadoop.hbase.zookeeper.ZKConfig;
 import org.apache.hadoop.util.GenericOptionsParser;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
+import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.ZooKeeper;
 import org.apache.zookeeper.client.ConnectStringParser;
 import org.apache.zookeeper.data.Stat;
 
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+
 /**
  * HBase Canary Tool, that that can be used to do
  * "canary monitoring" of a running HBase cluster.
@@ -210,34 +210,34 @@ public final class Canary implements Tool {
 private Map perTableReadLatency = new HashMap<>();
 private LongAdder writeLatency = new LongAdder();
 
-public void publishReadFailure(ServerName serverName, HRegionInfo region, 
Exception e) {
+public void publishReadFailure(ServerName serverName, RegionInfo region, 
Exception e) {
   incReadFailureCount();
   LOG.error(String.format("read from region %s on regionserver %s failed", 
region.getRegionNameAsString(), serverName), e);
 }
 
-public void publishReadFailure(ServerName serverName, HRegionInfo region, 
ColumnFamilyDescriptor column, Exception e) {
+public void publishReadFailure(ServerName serverName, RegionInfo region, 
ColumnFamilyDescriptor column, Exception e) {
   incReadFailureCount();
   LOG.error(String.format("read from region %s on regionserver %s column 
family %s failed",
 region.getRegionNameAsString(), serverName, column.getNameAsString()), 
e);
 }
 
-public void publishReadTiming(ServerName serverName, HRegionInfo region, 
ColumnFamilyDescriptor column, long msTime) {
+public void publishReadTiming(ServerName serverName, RegionInfo region, 
ColumnFamilyDescriptor column, long msTime) {
   LOG.info(String.format("read from region %s on regionserver %s column 
family %s in %dms",
 region.getRegionNameAsString(), serverName, column.getNameAsString(), 
msTime));
 }
 
-public void publishWriteFailure(ServerName serverName, HRegionInfo region, 
Exception e) {
+public void publishWriteFailure(ServerName serverName, RegionInfo region, 
Exception e) {
   incWriteFailureCount();
   LOG.error(String.format("write to region %s on regionserver %s failed", 
region.getRegionNameAsString(), serverName), e);
 }
 
-public void publishWriteFailure(ServerName serverName, HRegionInfo region, 
ColumnFamilyDescriptor column, Exception e) {
+public void publishWriteFailure(ServerName serverName, RegionInfo region, 
ColumnFamilyDescriptor column, Exception e) {
   incWriteFailureCount();
   LOG.error(String.format("write to region %s on regionserver %s column 
family %s failed",
 region.getRegionNameAsString(), 

[01/19] hbase git commit: HBASE-18839 Apply RegionInfo to code base

2017-09-28 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/master 7f4c3b356 -> a11a35a11


http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckTwoRS.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckTwoRS.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckTwoRS.java
index beef02b..6fa455a 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckTwoRS.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckTwoRS.java
@@ -34,8 +34,24 @@ import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.*;
-import org.apache.hadoop.hbase.client.*;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.Durability;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
 import org.apache.hadoop.hbase.io.HFileLink;
 import org.apache.hadoop.hbase.io.hfile.HFile;
@@ -43,7 +59,6 @@ import org.apache.hadoop.hbase.io.hfile.HFileContext;
 import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
@@ -57,6 +72,7 @@ import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 
 import org.apache.hadoop.hbase.shaded.com.google.common.collect.Multimap;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 
 @Ignore // Until after HBASE-14614 goes in.
 @Category({MiscTests.class, LargeTests.class})
@@ -111,10 +127,10 @@ public class TestHBaseFsckTwoRS extends BaseTestHBaseFsck 
{
   @Test(timeout=18)
   public void testFixAssignmentsWhenMETAinTransition() throws Exception {
 MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
-admin.unassign(HRegionInfo.FIRST_META_REGIONINFO.getRegionName(), true);
-assignmentManager.offlineRegion(HRegionInfo.FIRST_META_REGIONINFO);
+admin.unassign(RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName(), 
true);
+assignmentManager.offlineRegion(RegionInfoBuilder.FIRST_META_REGIONINFO);
 new 
MetaTableLocator().deleteMetaLocation(cluster.getMaster().getZooKeeper());
-
assertFalse(regionStates.isRegionOnline(HRegionInfo.FIRST_META_REGIONINFO));
+
assertFalse(regionStates.isRegionOnline(RegionInfoBuilder.FIRST_META_REGIONINFO));
 HBaseFsck hbck = doFsck(conf, true);
 assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { 
HBaseFsck.ErrorReporter.ERROR_CODE.UNKNOWN, 
HBaseFsck.ErrorReporter.ERROR_CODE.NO_META_REGION,
 HBaseFsck.ErrorReporter.ERROR_CODE.NULL_META_REGION });
@@ -134,7 +150,7 @@ public class TestHBaseFsckTwoRS extends BaseTestHBaseFsck {
   assertEquals(ROWKEYS.length, countRows());
 
   // Now let's mess it up, by adding a region with a duplicate startkey
-  HRegionInfo hriDupe =
+  RegionInfo hriDupe =
   createRegion(tbl.getTableDescriptor(), Bytes.toBytes("A"), 
Bytes.toBytes("A2"));
   TEST_UTIL.assignRegion(hriDupe);
 
@@ -172,7 +188,7 @@ public class TestHBaseFsckTwoRS extends BaseTestHBaseFsck {
   assertEquals(ROWKEYS.length, countRows());
 
   // Now let's mess it up, by adding a region with a duplicate startkey
-  HRegionInfo hriDupe =
+  RegionInfo hriDupe =
   createRegion(tbl.getTableDescriptor(), Bytes.toBytes("A"), 
Bytes.toBytes("B"));
   TEST_UTIL.assignRegion(hriDupe);
 
@@ -221,7 +237,7 @@ public class TestHBaseFsckTwoRS extends BaseTestHBaseFsck {
   assertEquals(ROWKEYS.length, countRows());
 
   // Mess it up by creating an overlap in the metadata
-  HRegionInfo hriOverlap =
+  RegionInfo hriOverlap =
   createRegion(tbl.getTableDescriptor(), Bytes.toBytes("A2"), 
Bytes.toBytes("B"));
  

[11/19] hbase git commit: HBASE-18839 Apply RegionInfo to code base

2017-09-28 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
index 9f2baf4..6155f16 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
@@ -30,11 +30,10 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.TableExistsException;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.errorhandling.ForeignException;
@@ -46,11 +45,6 @@ import 
org.apache.hadoop.hbase.master.procedure.CreateTableProcedure.CreateHdfsR
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
 import org.apache.hadoop.hbase.monitoring.TaskMonitor;
 import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.CloneSnapshotState;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
 import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
 import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
@@ -59,8 +53,14 @@ import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
 import org.apache.hadoop.hbase.util.FSTableDescriptors;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.Pair;
+import org.apache.yetus.audience.InterfaceAudience;
 
 import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.CloneSnapshotState;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
 
 @InterfaceAudience.Private
 public class CloneSnapshotProcedure
@@ -70,7 +70,7 @@ public class CloneSnapshotProcedure
   private TableDescriptor tableDescriptor;
   private SnapshotDescription snapshot;
   private boolean restoreAcl;
-  private List newRegions = null;
+  private List newRegions = null;
   private Map > parentsToChildrenPairMap = new 
HashMap<>();
 
   // Monitor
@@ -253,8 +253,8 @@ public class CloneSnapshotProcedure
 .setSnapshot(this.snapshot)
 .setTableSchema(ProtobufUtil.toTableSchema(tableDescriptor));
 if (newRegions != null) {
-  for (HRegionInfo hri: newRegions) {
-cloneSnapshotMsg.addRegionInfo(HRegionInfo.convert(hri));
+  for (RegionInfo hri: newRegions) {
+cloneSnapshotMsg.addRegionInfo(ProtobufUtil.toRegionInfo(hri));
   }
 }
 if (!parentsToChildrenPairMap.isEmpty()) {
@@ -289,7 +289,7 @@ public class CloneSnapshotProcedure
 } else {
   newRegions = new ArrayList<>(cloneSnapshotMsg.getRegionInfoCount());
   for (HBaseProtos.RegionInfo hri: cloneSnapshotMsg.getRegionInfoList()) {
-newRegions.add(HRegionInfo.convert(hri));
+newRegions.add(ProtobufUtil.toRegionInfo(hri));
   }
 }
 if (cloneSnapshotMsg.getParentToChildRegionsPairListCount() > 0) {
@@ -357,8 +357,8 @@ public class CloneSnapshotProcedure
   throws IOException, InterruptedException {
 final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
 if (cpHost != null) {
-  final HRegionInfo[] regions = (newRegions == null) ? null :
-newRegions.toArray(new HRegionInfo[newRegions.size()]);
+  final RegionInfo[] regions = (newRegions == null) ? null :
+newRegions.toArray(new RegionInfo[newRegions.size()]);
   cpHost.postCompletedCreateTableAction(tableDescriptor, regions, 
getUser());
 }
   }
@@ -368,16 +368,16 @@ public class CloneSnapshotProcedure
* @param env MasterProcedureEnv
* @throws 

[15/19] hbase git commit: HBASE-18839 Apply RegionInfo to code base

2017-09-28 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/a11a35a1/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java
index 9c5c180..68e5e89 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java
@@ -29,23 +29,26 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.favored.FavoredNodesPlan.Position;
 import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerLoad;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.master.*;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.favored.FavoredNodesPlan.Position;
+import org.apache.hadoop.hbase.master.RackManager;
+import org.apache.hadoop.hbase.master.RegionPlan;
+import org.apache.hadoop.hbase.master.ServerManager;
+import org.apache.hadoop.hbase.master.SnapshotOfRegionAssignmentFromMeta;
 import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
 import org.apache.hadoop.hbase.util.Pair;
+import org.apache.yetus.audience.InterfaceAudience;
 
 import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
 import org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
 
 /**
@@ -85,7 +88,7 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer 
implements Favored
   }
 
   @Override
-  public List balanceCluster(Map 
clusterState)  {
+  public List balanceCluster(Map 
clusterState)  {
 //TODO. Look at is whether Stochastic loadbalancer can be integrated with 
this
 List plans = new ArrayList<>();
 //perform a scan of the meta to get the latest updates (if any)
@@ -105,13 +108,13 @@ public class FavoredNodeLoadBalancer extends 
BaseLoadBalancer implements Favored
   // FindBugs complains about useless store! 
serverNameToServerNameWithoutCode.put(sn, s);
   serverNameWithoutCodeToServerName.put(s, sn);
 }
-for (Map.Entry entry : 
clusterState.entrySet()) {
+for (Map.Entry entry : 
clusterState.entrySet()) {
   ServerName currentServer = entry.getKey();
   //get a server without the startcode for the currentServer
   ServerName currentServerWithoutStartCode = 
ServerName.valueOf(currentServer.getHostname(),
   currentServer.getPort(), ServerName.NON_STARTCODE);
-  List list = entry.getValue();
-  for (HRegionInfo region : list) {
+  List list = entry.getValue();
+  for (RegionInfo region : list) {
 if(!FavoredNodesManager.isFavoredNodeApplicable(region)) {
   continue;
 }
@@ -157,9 +160,9 @@ public class FavoredNodeLoadBalancer extends 
BaseLoadBalancer implements Favored
   }
 
   @Override
-  public Map 
roundRobinAssignment(List regions,
+  public Map 
roundRobinAssignment(List regions,
   List servers) throws HBaseIOException {
-Map assignmentMap;
+Map assignmentMap;
 try {
   FavoredNodeAssignmentHelper assignmentHelper =
   new FavoredNodeAssignmentHelper(servers, rackManager);
@@ -183,10 +186,10 @@ public class FavoredNodeLoadBalancer extends 
BaseLoadBalancer implements Favored
   //need to come up with favored nodes assignments for them. The 
corner case
   //in (1) above is that all the nodes are unavailable and in that 
case, we
   //will note that this region doesn't have favored nodes.
-  Pair, List> 
segregatedRegions =
+  Pair, List> 
segregatedRegions =
   segregateRegionsAndAssignRegionsWithFavoredNodes(regions, servers);
-  Map regionsWithFavoredNodesMap = 
segregatedRegions.getFirst();
-  List regionsWithNoFavoredNodes = 
segregatedRegions.getSecond();
+  Map regionsWithFavoredNodesMap = 
segregatedRegions.getFirst();
+  List regionsWithNoFavoredNodes = 
segregatedRegions.getSecond();
   assignmentMap = new HashMap<>();
   roundRobinAssignmentImpl(assignmentHelper, assignmentMap, 

[2/3] hbase git commit: HBASE-18826 Use HStore instead of Store in our own code base and remove unnecessary methods in Store interface

2017-09-28 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java
index d558307..dec28f3 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java
@@ -53,11 +53,11 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.CachedBlock;
 import org.apache.hadoop.hbase.io.hfile.CombinedBlockCache;
 import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
-import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.regionserver.ScannerContext;
-import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -65,9 +65,9 @@ import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
+import org.junit.Ignore;
 import org.junit.Rule;
 import org.junit.Test;
-import org.junit.Ignore;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 
@@ -184,8 +184,9 @@ public class TestBlockEvictionFromClient {
   // get the block cache and region
   RegionLocator locator = 
TEST_UTIL.getConnection().getRegionLocator(tableName);
   String regionName = 
locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
-  Region region = 
TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
-  Store store = region.getStores().iterator().next();
+  HRegion region = (HRegion) 
TEST_UTIL.getRSForFirstRegionInTable(tableName)
+  .getRegion(regionName);
+  HStore store = region.getStores().iterator().next();
   CacheConfig cacheConf = store.getCacheConfig();
   cacheConf.setCacheDataOnWrite(true);
   cacheConf.setEvictOnClose(true);
@@ -274,8 +275,9 @@ public class TestBlockEvictionFromClient {
   // get the block cache and region
   RegionLocator locator = 
TEST_UTIL.getConnection().getRegionLocator(tableName);
   String regionName = 
locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
-  Region region = 
TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
-  Store store = region.getStores().iterator().next();
+  HRegion region =
+  (HRegion) 
TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
+  HStore store = region.getStores().iterator().next();
   CacheConfig cacheConf = store.getCacheConfig();
   cacheConf.setCacheDataOnWrite(true);
   cacheConf.setEvictOnClose(true);
@@ -332,8 +334,9 @@ public class TestBlockEvictionFromClient {
   // get the block cache and region
   RegionLocator locator = 
TEST_UTIL.getConnection().getRegionLocator(tableName);
   String regionName = 
locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
-  Region region = 
TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
-  Store store = region.getStores().iterator().next();
+  HRegion region =
+  (HRegion) 
TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
+  HStore store = region.getStores().iterator().next();
   CacheConfig cacheConf = store.getCacheConfig();
   cacheConf.setCacheDataOnWrite(true);
   cacheConf.setEvictOnClose(true);
@@ -393,7 +396,8 @@ public class TestBlockEvictionFromClient {
   // get the block cache and region
   RegionLocator locator = 
TEST_UTIL.getConnection().getRegionLocator(tableName);
   String regionName = 
locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
-  Region region = 
TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
+  HRegion region =
+  (HRegion) 
TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
   BlockCache cache = setCacheProperties(region);
   Put put = new Put(ROW);
   put.addColumn(FAMILY, QUALIFIER, data);
@@ -485,7 +489,8 @@ public class TestBlockEvictionFromClient {
   // get the block cache and region
   RegionLocator locator = 
TEST_UTIL.getConnection().getRegionLocator(tableName);
   String regionName = 
locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
-

[1/3] hbase git commit: HBASE-18826 Use HStore instead of Store in our own code base and remove unnecessary methods in Store interface

2017-09-28 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/branch-2 f73a3a6fb -> d26b8f8dd


http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java
index d93152a..7edcf54 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java
@@ -99,7 +99,7 @@ public class TestKeepDeletes {
 // keep 3 versions, rows do not expire
 HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 0, 
3,
 HConstants.FOREVER, KeepDeletedCells.TRUE);
-Region region = hbu.createLocalHRegion(htd, null, null);
+HRegion region = hbu.createLocalHRegion(htd, null, null);
 
 long ts = EnvironmentEdgeManager.currentTime();
 Put p = new Put(T1, ts);
@@ -241,7 +241,7 @@ public class TestKeepDeletes {
 // KEEP_DELETED_CELLS is NOT enabled
 HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 0, 
3,
 HConstants.FOREVER, KeepDeletedCells.FALSE);
-Region region = hbu.createLocalHRegion(htd, null, null);
+HRegion region = hbu.createLocalHRegion(htd, null, null);
 
 long ts = EnvironmentEdgeManager.currentTime();
 Put p = new Put(T1, ts);
@@ -408,7 +408,7 @@ public class TestKeepDeletes {
   public void testDeleteMarkerExpirationEmptyStore() throws Exception {
 HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 0, 
1,
 HConstants.FOREVER, KeepDeletedCells.TRUE);
-Region region = hbu.createLocalHRegion(htd, null, null);
+HRegion region = hbu.createLocalHRegion(htd, null, null);
 
 long ts = EnvironmentEdgeManager.currentTime();
 
@@ -451,7 +451,7 @@ public class TestKeepDeletes {
   public void testDeleteMarkerExpiration() throws Exception {
 HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 0, 
1,
 HConstants.FOREVER, KeepDeletedCells.TRUE);
-Region region = hbu.createLocalHRegion(htd, null, null);
+HRegion region = hbu.createLocalHRegion(htd, null, null);
 
 long ts = EnvironmentEdgeManager.currentTime();
 
@@ -514,7 +514,7 @@ public class TestKeepDeletes {
   public void testWithOldRow() throws Exception {
 HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 0, 
1,
 HConstants.FOREVER, KeepDeletedCells.TRUE);
-Region region = hbu.createLocalHRegion(htd, null, null);
+HRegion region = hbu.createLocalHRegion(htd, null, null);
 
 long ts = EnvironmentEdgeManager.currentTime();
 
@@ -674,7 +674,7 @@ public class TestKeepDeletes {
   public void testDeleteMarkerVersioning() throws Exception {
 HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 0, 
1,
 HConstants.FOREVER, KeepDeletedCells.TRUE);
-Region region = hbu.createLocalHRegion(htd, null, null);
+HRegion region = hbu.createLocalHRegion(htd, null, null);
 
 long ts = EnvironmentEdgeManager.currentTime();
 Put p = new Put(T1, ts);
@@ -818,7 +818,7 @@ public class TestKeepDeletes {
   public void testWithMinVersions() throws Exception {
 HTableDescriptor htd =
 hbu.createTableDescriptor(name.getMethodName(), 3, 1000, 1, 
KeepDeletedCells.TRUE);
-Region region = hbu.createLocalHRegion(htd, null, null);
+HRegion region = hbu.createLocalHRegion(htd, null, null);
 
 long ts = EnvironmentEdgeManager.currentTime() - 2000; // 2s in the past
 
@@ -897,7 +897,7 @@ public class TestKeepDeletes {
   public void testWithTTL() throws Exception {
 HTableDescriptor htd =
 hbu.createTableDescriptor(name.getMethodName(), 1, 1000, 1, 
KeepDeletedCells.TTL);
-Region region = hbu.createLocalHRegion(htd, null, null);
+HRegion region = hbu.createLocalHRegion(htd, null, null);
 
 long ts = EnvironmentEdgeManager.currentTime() - 2000; // 2s in the past
 
@@ -945,7 +945,7 @@ public class TestKeepDeletes {
 
   }
 
-  private int countDeleteMarkers(Region region) throws IOException {
+  private int countDeleteMarkers(HRegion region) throws IOException {
 Scan s = new Scan();
 s.setRaw(true);
 // use max versions from the store(s)

http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java
index 0c33bdb..71f18c0 100644
--- 

  1   2   >