hbase git commit: HBASE-18879 HBase FilterList cause KeyOnlyFilter not work
Repository: hbase Updated Branches: refs/heads/HBASE-18410 f67fe4da4 -> 37c63e693 HBASE-18879 HBase FilterList cause KeyOnlyFilter not work Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/37c63e69 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/37c63e69 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/37c63e69 Branch: refs/heads/HBASE-18410 Commit: 37c63e693bcd6d0b0191792daf19d4a3dd6626d2 Parents: f67fe4d Author: huzhengAuthored: Wed Oct 11 21:17:03 2017 +0800 Committer: zhangduo Committed: Mon Oct 16 11:26:30 2017 +0800 -- .../apache/hadoop/hbase/filter/FilterList.java | 6 +++ .../hadoop/hbase/filter/FilterListBase.java | 3 ++ .../hadoop/hbase/filter/FilterListWithAND.java | 22 + .../hadoop/hbase/filter/FilterListWithOR.java | 22 + .../hadoop/hbase/filter/TestFilterList.java | 48 5 files changed, 85 insertions(+), 16 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/37c63e69/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java -- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java index a4fa74b..79f3e78 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java @@ -66,6 +66,8 @@ final public class FilterList extends FilterBase { filterListBase = new FilterListWithAND(filters); } else if (operator == Operator.MUST_PASS_ONE) { filterListBase = new FilterListWithOR(filters); +} else { + throw new IllegalArgumentException("Invalid operator: " + operator); } this.operator = operator; } @@ -162,6 +164,10 @@ final public class FilterList extends FilterBase { return filterListBase.transformCell(c); } + ReturnCode internalFilterKeyValue(Cell c, Cell currentTransformedCell) throws IOException { +return this.filterListBase.internalFilterKeyValue(c, currentTransformedCell); + } + @Override public ReturnCode filterKeyValue(Cell c) throws IOException { return filterListBase.filterKeyValue(c); http://git-wip-us.apache.org/repos/asf/hbase/blob/37c63e69/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListBase.java -- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListBase.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListBase.java index 2ac5fcd..06da6be 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListBase.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListBase.java @@ -107,6 +107,9 @@ public abstract class FilterListBase extends FilterBase { return cell; } + abstract ReturnCode internalFilterKeyValue(Cell c, Cell currentTransformedCell) + throws IOException; + /** * Filters that never filter by modifying the returned List of Cells can inherit this * implementation that does nothing. {@inheritDoc} http://git-wip-us.apache.org/repos/asf/hbase/blob/37c63e69/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListWithAND.java -- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListWithAND.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListWithAND.java index fa979c0..4909dfd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListWithAND.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListWithAND.java @@ -147,16 +147,26 @@ public class FilterListWithAND extends FilterListBase { "Received code is not valid. rc: " + rc + ", localRC: " + localRC); } - private ReturnCode filterKeyValueWithMustPassAll(Cell c) throws IOException { + @Override + ReturnCode internalFilterKeyValue(Cell c, Cell currentTransformedCell) throws IOException { +if (isEmpty()) { + return ReturnCode.INCLUDE; +} ReturnCode rc = ReturnCode.INCLUDE; -Cell transformed = c; +Cell transformed = currentTransformedCell; +this.referenceCell = c; this.seekHintFilter.clear(); for (int i = 0, n = filters.size(); i < n; i++) { Filter filter = filters.get(i); if (filter.filterAllRemaining()) { return ReturnCode.NEXT_ROW; } - ReturnCode localRC = filter.filterKeyValue(c); + ReturnCode localRC; + if (filter instanceof FilterList) { +
hbase git commit: HBASE-18986 Remove unnecessary null check after CellUtil.cloneQualifier()
Repository: hbase Updated Branches: refs/heads/master 202e414eb -> 83af5f2c6 HBASE-18986 Remove unnecessary null check after CellUtil.cloneQualifier() Signed-off-by: Jerry HeProject: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/83af5f2c Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/83af5f2c Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/83af5f2c Branch: refs/heads/master Commit: 83af5f2c623cb3180ab21f17f5681d4328acdc76 Parents: 202e414 Author: Xiang Li Authored: Wed Oct 11 20:55:27 2017 +0800 Committer: Jerry He Committed: Sun Oct 15 13:20:09 2017 -0700 -- .../src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java | 1 - 1 file changed, 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/83af5f2c/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 6901e3f..e3d88f6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -2956,7 +2956,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // This is expensive. if (cell.getTimestamp() == HConstants.LATEST_TIMESTAMP && CellUtil.isDeleteType(cell)) { byte[] qual = CellUtil.cloneQualifier(cell); - if (qual == null) qual = HConstants.EMPTY_BYTE_ARRAY; Integer count = kvCount.get(qual); if (count == null) {
hbase git commit: HBASE-18986 Remove unnecessary null check after CellUtil.cloneQualifier()
Repository: hbase Updated Branches: refs/heads/branch-2 e04b15c68 -> aeaf222e3 HBASE-18986 Remove unnecessary null check after CellUtil.cloneQualifier() Signed-off-by: Jerry HeProject: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/aeaf222e Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/aeaf222e Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/aeaf222e Branch: refs/heads/branch-2 Commit: aeaf222e35e8a7d5c751477e7774417654062e54 Parents: e04b15c Author: Xiang Li Authored: Wed Oct 11 20:55:27 2017 +0800 Committer: Jerry He Committed: Sun Oct 15 13:11:31 2017 -0700 -- .../src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java | 1 - 1 file changed, 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/aeaf222e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 6901e3f..e3d88f6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -2956,7 +2956,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // This is expensive. if (cell.getTimestamp() == HConstants.LATEST_TIMESTAMP && CellUtil.isDeleteType(cell)) { byte[] qual = CellUtil.cloneQualifier(cell); - if (qual == null) qual = HConstants.EMPTY_BYTE_ARRAY; Integer count = kvCount.get(qual); if (count == null) {
[47/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/checkstyle.rss -- diff --git a/checkstyle.rss b/checkstyle.rss index acd2adb..ced9295 100644 --- a/checkstyle.rss +++ b/checkstyle.rss @@ -26,7 +26,7 @@ under the License. 2007 - 2017 The Apache Software Foundation File: 2055, - Errors: 13596, + Errors: 13608, Warnings: 0, Infos: 0 @@ -3149,7 +3149,7 @@ under the License. 0 - 4 + 5 @@ -5193,7 +5193,7 @@ under the License. 0 - 48 + 50 @@ -10975,7 +10975,7 @@ under the License. 0 - 5 + 7 @@ -12389,7 +12389,7 @@ under the License. 0 - 4 + 6 @@ -13495,7 +13495,7 @@ under the License. 0 - 1 + 2 @@ -25227,7 +25227,7 @@ under the License. 0 - 15 + 17 @@ -25269,7 +25269,7 @@ under the License. 0 - 2 + 3 @@ -26389,7 +26389,7 @@ under the License. 0 - 4 + 5 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/coc.html -- diff --git a/coc.html b/coc.html index 557274f..ec1fc8c 100644 --- a/coc.html +++ b/coc.html @@ -7,7 +7,7 @@ - + Apache HBase Code of Conduct Policy @@ -380,7 +380,7 @@ email to mailto:priv...@hbase.apache.org;>the priv https://www.apache.org/;>The Apache Software Foundation. All rights reserved. - Last Published: 2017-10-14 + Last Published: 2017-10-15 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/cygwin.html -- diff --git a/cygwin.html b/cygwin.html index 82661b2..f23f27e 100644 --- a/cygwin.html +++ b/cygwin.html @@ -7,7 +7,7 @@ - + Apache HBase Installing Apache HBase (TM) on Windows using Cygwin @@ -679,7 +679,7 @@ Now your HBase server is running, start coding and build that next https://www.apache.org/;>The Apache Software Foundation. All rights reserved. - Last Published: 2017-10-14 + Last Published: 2017-10-15 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/dependencies.html -- diff --git a/dependencies.html b/dependencies.html index 0c1d34d..c282b56 100644 --- a/dependencies.html +++ b/dependencies.html @@ -7,7 +7,7 @@ - + Apache HBase Project Dependencies @@ -445,7 +445,7 @@ https://www.apache.org/;>The Apache Software Foundation. All rights reserved. - Last Published: 2017-10-14 + Last Published: 2017-10-15 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/dependency-convergence.html -- diff --git a/dependency-convergence.html b/dependency-convergence.html index 660e8a7..dbef118 100644 --- a/dependency-convergence.html +++ b/dependency-convergence.html @@ -7,7 +7,7 @@ - + Apache HBase Reactor Dependency Convergence @@ -820,7 +820,7 @@ https://www.apache.org/;>The Apache Software Foundation. All rights reserved. - Last Published: 2017-10-14 + Last Published: 2017-10-15
[26/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/Export.RegionOp.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/Export.RegionOp.html b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/Export.RegionOp.html index 95f6ef8..45f0981 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/Export.RegionOp.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/Export.RegionOp.html @@ -61,487 +61,488 @@ 053import org.apache.hadoop.hbase.protobuf.ProtobufUtil; 054import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.DelegationToken; 055import org.apache.hadoop.hbase.protobuf.generated.ExportProtos; -056import org.apache.hadoop.hbase.regionserver.InternalScanner; -057import org.apache.hadoop.hbase.regionserver.Region; -058import org.apache.hadoop.hbase.regionserver.RegionScanner; -059import org.apache.hadoop.hbase.security.User; -060import org.apache.hadoop.hbase.security.UserProvider; -061import org.apache.hadoop.hbase.security.token.FsDelegationToken; -062import org.apache.hadoop.hbase.util.ArrayUtils; -063import org.apache.hadoop.hbase.util.ByteStringer; -064import org.apache.hadoop.hbase.util.Bytes; -065import org.apache.hadoop.hbase.util.Triple; -066import org.apache.hadoop.io.SequenceFile; -067import org.apache.hadoop.io.Text; -068import org.apache.hadoop.io.compress.CompressionCodec; -069import org.apache.hadoop.io.compress.DefaultCodec; -070import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; -071import org.apache.hadoop.security.token.Token; -072import org.apache.hadoop.util.GenericOptionsParser; -073import org.apache.hadoop.util.ReflectionUtils; -074import org.apache.yetus.audience.InterfaceAudience; -075import org.apache.yetus.audience.InterfaceStability; -076 -077import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting; -078 -079import com.google.protobuf.RpcCallback; -080import com.google.protobuf.RpcController; -081import com.google.protobuf.Service; -082 -083/** -084 * Export an HBase table. Writes content to sequence files up in HDFS. Use -085 * {@link Import} to read it back in again. It is implemented by the endpoint -086 * technique. -087 * -088 * @see org.apache.hadoop.hbase.mapreduce.Export -089 */ -090@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) -091@InterfaceStability.Evolving -092public class Export extends ExportProtos.ExportService implements RegionCoprocessor { -093 -094 private static final Log LOG = LogFactory.getLog(Export.class); -095 private static final Class? extends CompressionCodec DEFAULT_CODEC = DefaultCodec.class; -096 private static final SequenceFile.CompressionType DEFAULT_TYPE = SequenceFile.CompressionType.RECORD; -097 private RegionCoprocessorEnvironment env = null; -098 private UserProvider userProvider; -099 -100 public static void main(String[] args) throws Throwable { -101Mapbyte[], Response response = run(HBaseConfiguration.create(), args); -102System.exit(response == null ? -1 : 0); -103 } -104 -105 @VisibleForTesting -106 static Mapbyte[], Response run(final Configuration conf, final String[] args) throws Throwable { -107String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs(); -108if (!ExportUtils.isValidArguements(args)) { -109 ExportUtils.usage("Wrong number of arguments: " + ArrayUtils.length(otherArgs)); -110 return null; -111} -112TripleTableName, Scan, Path arguments = ExportUtils.getArgumentsFromCommandLine(conf, otherArgs); -113return run(conf, arguments.getFirst(), arguments.getSecond(), arguments.getThird()); -114 } -115 -116 public static Mapbyte[], Response run(final Configuration conf, TableName tableName, Scan scan, Path dir) throws Throwable { -117FileSystem fs = dir.getFileSystem(conf); -118UserProvider userProvider = UserProvider.instantiate(conf); -119checkDir(fs, dir); -120FsDelegationToken fsDelegationToken = new FsDelegationToken(userProvider, "renewer"); -121 fsDelegationToken.acquireDelegationToken(fs); -122try { -123 final ExportProtos.ExportRequest request = getConfiguredRequest(conf, dir, -124scan, fsDelegationToken.getUserToken()); -125 try (Connection con = ConnectionFactory.createConnection(conf); -126 Table table = con.getTable(tableName)) { -127Mapbyte[], Response result = new TreeMap(Bytes.BYTES_COMPARATOR); -128 table.coprocessorService(ExportProtos.ExportService.class, -129 scan.getStartRow(), -130 scan.getStopRow(), -131 (ExportProtos.ExportService service) - { -132ServerRpcController controller = new ServerRpcController(); -133Mapbyte[], ExportProtos.ExportResponse rval = new TreeMap(Bytes.BYTES_COMPARATOR); -134
[37/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HRegion.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HRegion.html b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HRegion.html index f54dfe2..afa74ff 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HRegion.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HRegion.html @@ -89,26 +89,32 @@ +org.apache.hadoop.hbase.coprocessor + +Table of Contents + + + org.apache.hadoop.hbase.procedure.flush - + org.apache.hadoop.hbase.regionserver - + org.apache.hadoop.hbase.regionserver.handler - + org.apache.hadoop.hbase.regionserver.snapshot - + org.apache.hadoop.hbase.snapshot - + org.apache.hadoop.hbase.util @@ -135,6 +141,24 @@ + + + +Uses of HRegion in org.apache.hadoop.hbase.coprocessor + +Fields in org.apache.hadoop.hbase.coprocessor declared as HRegion + +Modifier and Type +Field and Description + + + +private HRegion +Export.ScanCoprocessor.region + + + + @@ -357,34 +381,38 @@ HRegion -RegionServerServices.PostOpenDeployContext.getRegion() +HRegionServer.getOnlineRegion(byte[]regionName) +HRegion +RegionServerServices.PostOpenDeployContext.getRegion() + + protected HRegion HRegionServer.getRegion(byte[]regionName) Protected utility method for safely obtaining an HRegion handle. - + HRegion RSRpcServices.getRegion(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierregionSpecifier) Find the HRegion based on a region specifier - + HRegion HRegionServer.getRegion(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringencodedRegionName) - + protected HRegion HRegionServer.getRegionByEncodedName(byte[]regionName, http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringencodedRegionName) - + HRegion HRegionServer.getRegionByEncodedName(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringencodedRegionName) - + (package private) static HRegion HRegion.newHRegion(org.apache.hadoop.fs.PathtableDir, WALwal, @@ -397,13 +425,13 @@ HConstants.REGION_IMPL configuration property. - + protected HRegion HRegion.openHRegion(CancelableProgressablereporter) Open HRegion. - + static HRegion HRegion.openHRegion(org.apache.hadoop.conf.Configurationconf, org.apache.hadoop.fs.FileSystemfs, @@ -417,7 +445,7 @@ Open a Region. - + static HRegion HRegion.openHRegion(org.apache.hadoop.conf.Configurationconf, org.apache.hadoop.fs.FileSystemfs, @@ -428,7 +456,7 @@ Open a Region. - + static HRegion HRegion.openHRegion(org.apache.hadoop.conf.Configurationconf, org.apache.hadoop.fs.FileSystemfs, @@ -441,14 +469,14 @@ Open a Region. - + static HRegion HRegion.openHRegion(HRegionother, CancelableProgressablereporter) Useful when reopening a closed region (normally for unit tests) - + static HRegion HRegion.openHRegion(org.apache.hadoop.fs.PathrootDir, RegionInfoinfo, @@ -458,7 +486,7 @@ Open a Region. - + static HRegion HRegion.openHRegion(org.apache.hadoop.fs.PathrootDir, RegionInfoinfo, @@ -470,7 +498,7 @@ Open a Region. - + static HRegion HRegion.openHRegion(RegionInfoinfo, TableDescriptorhtd, @@ -479,7 +507,7 @@ Open a Region. - + static HRegion HRegion.openHRegion(RegionInfoinfo, TableDescriptorhtd, @@ -585,81 +613,86 @@ Mutationm) +void +SecureBulkLoadManager.cleanupBulkLoad(HRegionregion, + org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequestrequest) + + private void RSRpcServices.closeScanner(HRegionregion, RegionScannerscanner, http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringscannerName, RpcCallContextcontext) - + protected void KeyPrefixRegionSplitPolicy.configureForRegion(HRegionregion) - + protected void RegionSplitPolicy.configureForRegion(HRegionregion) Upon construction, this method will be called with the region to be governed. - + protected void DelimitedKeyPrefixRegionSplitPolicy.configureForRegion(HRegionregion) - + protected void IncreasingToUpperBoundRegionSplitPolicy.configureForRegion(HRegionregion) - + protected void FlushAllLargeStoresPolicy.configureForRegion(HRegionregion) - + protected void FlushPolicy.configureForRegion(HRegionregion) Upon construction, this method will be called with the region to be
[21/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/src-html/org/apache/hadoop/hbase/favored/FavoredNodesManager.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/favored/FavoredNodesManager.html b/devapidocs/src-html/org/apache/hadoop/hbase/favored/FavoredNodesManager.html index 4d42a54..7d5ce35 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/favored/FavoredNodesManager.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/favored/FavoredNodesManager.html @@ -125,7 +125,7 @@ 117 * we apply any favored nodes logic on a region. 118 */ 119 public static boolean isFavoredNodeApplicable(RegionInfo regionInfo) { -120return !regionInfo.isSystemTable(); +120return !regionInfo.getTable().isSystemTable(); 121 } 122 123 /** http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html index 7052d81..8138d13 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html @@ -1653,7 +1653,7 @@ 1645} 1646 1647ServerName dest; -1648ListServerName exclude = hri.isSystemTable() ? assignmentManager.getExcludedServersForSystemTable() +1648ListServerName exclude = hri.getTable().isSystemTable() ? assignmentManager.getExcludedServersForSystemTable() 1649: new ArrayList(1); 1650if (destServerName != null exclude.contains(ServerName.valueOf(Bytes.toString(destServerName { 1651 LOG.info( http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html index 7052d81..8138d13 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html @@ -1653,7 +1653,7 @@ 1645} 1646 1647ServerName dest; -1648ListServerName exclude = hri.isSystemTable() ? assignmentManager.getExcludedServersForSystemTable() +1648ListServerName exclude = hri.getTable().isSystemTable() ? assignmentManager.getExcludedServersForSystemTable() 1649: new ArrayList(1); 1650if (destServerName != null exclude.contains(ServerName.valueOf(Bytes.toString(destServerName { 1651 LOG.info( http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html index 7052d81..8138d13 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html @@ -1653,7 +1653,7 @@ 1645} 1646 1647ServerName dest; -1648ListServerName exclude = hri.isSystemTable() ? assignmentManager.getExcludedServersForSystemTable() +1648ListServerName exclude = hri.getTable().isSystemTable() ? assignmentManager.getExcludedServersForSystemTable() 1649: new ArrayList(1); 1650if (destServerName != null exclude.contains(ServerName.valueOf(Bytes.toString(destServerName { 1651 LOG.info( http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html index 419a935..97005c3 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html @@ -530,7 +530,7 @@ 522} 523return regions.stream() 524 .map(RegionStateNode::getRegionInfo) -525 .filter(RegionInfo::isSystemTable) +525.filter(r - r.getTable().isSystemTable()) 526.collect(Collectors.toList());
[04/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html index 5243b06..af3ba51 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html @@ -2128,464 +2128,463 @@ 2120return this.region; 2121 } 2122 -2123 @Override -2124 public RegionCoprocessorHost getCoprocessorHost() { -2125return this.region.getCoprocessorHost(); -2126 } -2127 -2128 @Override -2129 public RegionInfo getRegionInfo() { -2130return this.fs.getRegionInfo(); -2131 } -2132 -2133 @Override -2134 public boolean areWritesEnabled() { -2135return this.region.areWritesEnabled(); -2136 } -2137 -2138 @Override -2139 public long getSmallestReadPoint() { -2140return this.region.getSmallestReadPoint(); -2141 } -2142 -2143 /** -2144 * Adds or replaces the specified KeyValues. -2145 * p -2146 * For each KeyValue specified, if a cell with the same row, family, and qualifier exists in -2147 * MemStore, it will be replaced. Otherwise, it will just be inserted to MemStore. -2148 * p -2149 * This operation is atomic on each KeyValue (row/family/qualifier) but not necessarily atomic -2150 * across all of them. -2151 * @param cells -2152 * @param readpoint readpoint below which we can safely remove duplicate KVs -2153 * @param memstoreSize -2154 * @throws IOException -2155 */ -2156 public void upsert(IterableCell cells, long readpoint, MemStoreSize memstoreSize) -2157 throws IOException { -2158this.lock.readLock().lock(); -2159try { -2160 this.memstore.upsert(cells, readpoint, memstoreSize); -2161} finally { -2162 this.lock.readLock().unlock(); -2163} -2164 } -2165 -2166 public StoreFlushContext createFlushContext(long cacheFlushId) { -2167return new StoreFlusherImpl(cacheFlushId); -2168 } -2169 -2170 private final class StoreFlusherImpl implements StoreFlushContext { -2171 -2172private long cacheFlushSeqNum; -2173private MemStoreSnapshot snapshot; -2174private ListPath tempFiles; -2175private ListPath committedFiles; -2176private long cacheFlushCount; -2177private long cacheFlushSize; -2178private long outputFileSize; -2179 -2180private StoreFlusherImpl(long cacheFlushSeqNum) { -2181 this.cacheFlushSeqNum = cacheFlushSeqNum; -2182} -2183 -2184/** -2185 * This is not thread safe. The caller should have a lock on the region or the store. -2186 * If necessary, the lock can be added with the patch provided in HBASE-10087 -2187 */ -2188@Override -2189public void prepare() { -2190 // passing the current sequence number of the wal - to allow bookkeeping in the memstore -2191 this.snapshot = memstore.snapshot(); -2192 this.cacheFlushCount = snapshot.getCellsCount(); -2193 this.cacheFlushSize = snapshot.getDataSize(); -2194 committedFiles = new ArrayList(1); -2195} -2196 -2197@Override -2198public void flushCache(MonitoredTask status) throws IOException { -2199 RegionServerServices rsService = region.getRegionServerServices(); -2200 ThroughputController throughputController = -2201 rsService == null ? null : rsService.getFlushThroughputController(); -2202 tempFiles = HStore.this.flushCache(cacheFlushSeqNum, snapshot, status, throughputController); -2203} -2204 -2205@Override -2206public boolean commit(MonitoredTask status) throws IOException { -2207 if (this.tempFiles == null || this.tempFiles.isEmpty()) { -2208return false; -2209 } -2210 ListHStoreFile storeFiles = new ArrayList(this.tempFiles.size()); -2211 for (Path storeFilePath : tempFiles) { -2212try { -2213 HStoreFile sf = HStore.this.commitFile(storeFilePath, cacheFlushSeqNum, status); -2214 outputFileSize += sf.getReader().length(); -2215 storeFiles.add(sf); -2216} catch (IOException ex) { -2217 LOG.error("Failed to commit store file " + storeFilePath, ex); -2218 // Try to delete the files we have committed before. -2219 for (HStoreFile sf : storeFiles) { -2220Path pathToDelete = sf.getPath(); -2221try { - sf.deleteStoreFile(); -2223} catch (IOException deleteEx) { -2224 LOG.fatal("Failed to delete store file we committed, halting " + pathToDelete, ex); -2225 Runtime.getRuntime().halt(1); -2226} -2227 } -2228 throw new IOException("Failed to commit the flush", ex); -2229} -2230 } -2231 -2232 for (HStoreFile sf :
[16/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html index 5d138ea..81d256e 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html @@ -2387,7 +2387,7 @@ 2379 return true; 2380} 2381long modifiedFlushCheckInterval = flushCheckInterval; -2382if (getRegionInfo().isSystemTable() +2382if (getRegionInfo().getTable().isSystemTable() 2383getRegionInfo().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) { 2384 modifiedFlushCheckInterval = SYSTEM_CACHE_FLUSH_INTERVAL; 2385} @@ -7869,7 +7869,7 @@ 7861 */ 7862 public byte[] checkSplit() { 7863// Can't split META -7864if (this.getRegionInfo().isMetaTable() || +7864if (this.getRegionInfo().isMetaRegion() || 7865 TableName.NAMESPACE_TABLE_NAME.equals(this.getRegionInfo().getTable())) { 7866 if (shouldForceSplit()) { 7867LOG.warn("Cannot split meta region in HBase 0.20 and above"); @@ -7914,374 +7914,372 @@ 7906 } 7907 7908 /** @return the coprocessor host */ -7909 @Override -7910 public RegionCoprocessorHost getCoprocessorHost() { -7911return coprocessorHost; -7912 } -7913 -7914 /** @param coprocessorHost the new coprocessor host */ -7915 public void setCoprocessorHost(final RegionCoprocessorHost coprocessorHost) { -7916this.coprocessorHost = coprocessorHost; -7917 } -7918 -7919 @Override -7920 public void startRegionOperation() throws IOException { -7921 startRegionOperation(Operation.ANY); -7922 } -7923 -7924 @Override -7925 @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="SF_SWITCH_FALLTHROUGH", -7926justification="Intentional") -7927 public void startRegionOperation(Operation op) throws IOException { -7928switch (op) { -7929 case GET: // read operations -7930 case SCAN: -7931checkReadsEnabled(); -7932 case INCREMENT: // write operations -7933 case APPEND: -7934 case SPLIT_REGION: -7935 case MERGE_REGION: -7936 case PUT: -7937 case DELETE: -7938 case BATCH_MUTATE: -7939 case COMPACT_REGION: -7940 case SNAPSHOT: -7941// when a region is in recovering state, no read, split, merge or snapshot is allowed -7942if (isRecovering() (this.disallowWritesInRecovering || -7943 (op != Operation.PUT op != Operation.DELETE op != Operation.BATCH_MUTATE))) { -7944 throw new RegionInRecoveryException(getRegionInfo().getRegionNameAsString() + -7945" is recovering; cannot take reads"); -7946} -7947break; -7948 default: -7949break; -7950} -7951if (op == Operation.MERGE_REGION || op == Operation.SPLIT_REGION -7952|| op == Operation.COMPACT_REGION) { -7953 // split, merge or compact region doesn't need to check the closing/closed state or lock the -7954 // region -7955 return; -7956} -7957if (this.closing.get()) { -7958 throw new NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is closing"); -7959} -7960lock(lock.readLock()); -7961if (this.closed.get()) { -7962 lock.readLock().unlock(); -7963 throw new NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is closed"); -7964} -7965// The unit for snapshot is a region. So, all stores for this region must be -7966// prepared for snapshot operation before proceeding. -7967if (op == Operation.SNAPSHOT) { -7968 stores.values().forEach(HStore::preSnapshotOperation); -7969} -7970try { -7971 if (coprocessorHost != null) { -7972 coprocessorHost.postStartRegionOperation(op); -7973 } -7974} catch (Exception e) { -7975 lock.readLock().unlock(); -7976 throw new IOException(e); -7977} -7978 } -7979 -7980 @Override -7981 public void closeRegionOperation() throws IOException { -7982 closeRegionOperation(Operation.ANY); -7983 } -7984 -7985 @Override -7986 public void closeRegionOperation(Operation operation) throws IOException { -7987if (operation == Operation.SNAPSHOT) { -7988 stores.values().forEach(HStore::postSnapshotOperation); -7989} -7990lock.readLock().unlock(); -7991if (coprocessorHost != null) { -7992 coprocessorHost.postCloseRegionOperation(operation); -7993} -7994 } -7995 -7996 /** -7997 * This method needs to be called before any public call that reads or -7998 * modifies stores in bulk. It has to be called just before a try. -7999 *
[24/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/Export.ScanCoprocessor.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/Export.ScanCoprocessor.html b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/Export.ScanCoprocessor.html index 95f6ef8..45f0981 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/Export.ScanCoprocessor.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/Export.ScanCoprocessor.html @@ -61,487 +61,488 @@ 053import org.apache.hadoop.hbase.protobuf.ProtobufUtil; 054import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.DelegationToken; 055import org.apache.hadoop.hbase.protobuf.generated.ExportProtos; -056import org.apache.hadoop.hbase.regionserver.InternalScanner; -057import org.apache.hadoop.hbase.regionserver.Region; -058import org.apache.hadoop.hbase.regionserver.RegionScanner; -059import org.apache.hadoop.hbase.security.User; -060import org.apache.hadoop.hbase.security.UserProvider; -061import org.apache.hadoop.hbase.security.token.FsDelegationToken; -062import org.apache.hadoop.hbase.util.ArrayUtils; -063import org.apache.hadoop.hbase.util.ByteStringer; -064import org.apache.hadoop.hbase.util.Bytes; -065import org.apache.hadoop.hbase.util.Triple; -066import org.apache.hadoop.io.SequenceFile; -067import org.apache.hadoop.io.Text; -068import org.apache.hadoop.io.compress.CompressionCodec; -069import org.apache.hadoop.io.compress.DefaultCodec; -070import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; -071import org.apache.hadoop.security.token.Token; -072import org.apache.hadoop.util.GenericOptionsParser; -073import org.apache.hadoop.util.ReflectionUtils; -074import org.apache.yetus.audience.InterfaceAudience; -075import org.apache.yetus.audience.InterfaceStability; -076 -077import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting; -078 -079import com.google.protobuf.RpcCallback; -080import com.google.protobuf.RpcController; -081import com.google.protobuf.Service; -082 -083/** -084 * Export an HBase table. Writes content to sequence files up in HDFS. Use -085 * {@link Import} to read it back in again. It is implemented by the endpoint -086 * technique. -087 * -088 * @see org.apache.hadoop.hbase.mapreduce.Export -089 */ -090@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) -091@InterfaceStability.Evolving -092public class Export extends ExportProtos.ExportService implements RegionCoprocessor { -093 -094 private static final Log LOG = LogFactory.getLog(Export.class); -095 private static final Class? extends CompressionCodec DEFAULT_CODEC = DefaultCodec.class; -096 private static final SequenceFile.CompressionType DEFAULT_TYPE = SequenceFile.CompressionType.RECORD; -097 private RegionCoprocessorEnvironment env = null; -098 private UserProvider userProvider; -099 -100 public static void main(String[] args) throws Throwable { -101Mapbyte[], Response response = run(HBaseConfiguration.create(), args); -102System.exit(response == null ? -1 : 0); -103 } -104 -105 @VisibleForTesting -106 static Mapbyte[], Response run(final Configuration conf, final String[] args) throws Throwable { -107String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs(); -108if (!ExportUtils.isValidArguements(args)) { -109 ExportUtils.usage("Wrong number of arguments: " + ArrayUtils.length(otherArgs)); -110 return null; -111} -112TripleTableName, Scan, Path arguments = ExportUtils.getArgumentsFromCommandLine(conf, otherArgs); -113return run(conf, arguments.getFirst(), arguments.getSecond(), arguments.getThird()); -114 } -115 -116 public static Mapbyte[], Response run(final Configuration conf, TableName tableName, Scan scan, Path dir) throws Throwable { -117FileSystem fs = dir.getFileSystem(conf); -118UserProvider userProvider = UserProvider.instantiate(conf); -119checkDir(fs, dir); -120FsDelegationToken fsDelegationToken = new FsDelegationToken(userProvider, "renewer"); -121 fsDelegationToken.acquireDelegationToken(fs); -122try { -123 final ExportProtos.ExportRequest request = getConfiguredRequest(conf, dir, -124scan, fsDelegationToken.getUserToken()); -125 try (Connection con = ConnectionFactory.createConnection(conf); -126 Table table = con.getTable(tableName)) { -127Mapbyte[], Response result = new TreeMap(Bytes.BYTES_COMPARATOR); -128 table.coprocessorService(ExportProtos.ExportService.class, -129 scan.getStartRow(), -130 scan.getStopRow(), -131 (ExportProtos.ExportService service) - { -132ServerRpcController controller = new ServerRpcController(); -133Mapbyte[], ExportProtos.ExportResponse rval = new
[14/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html index 5d138ea..81d256e 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html @@ -2387,7 +2387,7 @@ 2379 return true; 2380} 2381long modifiedFlushCheckInterval = flushCheckInterval; -2382if (getRegionInfo().isSystemTable() +2382if (getRegionInfo().getTable().isSystemTable() 2383getRegionInfo().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) { 2384 modifiedFlushCheckInterval = SYSTEM_CACHE_FLUSH_INTERVAL; 2385} @@ -7869,7 +7869,7 @@ 7861 */ 7862 public byte[] checkSplit() { 7863// Can't split META -7864if (this.getRegionInfo().isMetaTable() || +7864if (this.getRegionInfo().isMetaRegion() || 7865 TableName.NAMESPACE_TABLE_NAME.equals(this.getRegionInfo().getTable())) { 7866 if (shouldForceSplit()) { 7867LOG.warn("Cannot split meta region in HBase 0.20 and above"); @@ -7914,374 +7914,372 @@ 7906 } 7907 7908 /** @return the coprocessor host */ -7909 @Override -7910 public RegionCoprocessorHost getCoprocessorHost() { -7911return coprocessorHost; -7912 } -7913 -7914 /** @param coprocessorHost the new coprocessor host */ -7915 public void setCoprocessorHost(final RegionCoprocessorHost coprocessorHost) { -7916this.coprocessorHost = coprocessorHost; -7917 } -7918 -7919 @Override -7920 public void startRegionOperation() throws IOException { -7921 startRegionOperation(Operation.ANY); -7922 } -7923 -7924 @Override -7925 @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="SF_SWITCH_FALLTHROUGH", -7926justification="Intentional") -7927 public void startRegionOperation(Operation op) throws IOException { -7928switch (op) { -7929 case GET: // read operations -7930 case SCAN: -7931checkReadsEnabled(); -7932 case INCREMENT: // write operations -7933 case APPEND: -7934 case SPLIT_REGION: -7935 case MERGE_REGION: -7936 case PUT: -7937 case DELETE: -7938 case BATCH_MUTATE: -7939 case COMPACT_REGION: -7940 case SNAPSHOT: -7941// when a region is in recovering state, no read, split, merge or snapshot is allowed -7942if (isRecovering() (this.disallowWritesInRecovering || -7943 (op != Operation.PUT op != Operation.DELETE op != Operation.BATCH_MUTATE))) { -7944 throw new RegionInRecoveryException(getRegionInfo().getRegionNameAsString() + -7945" is recovering; cannot take reads"); -7946} -7947break; -7948 default: -7949break; -7950} -7951if (op == Operation.MERGE_REGION || op == Operation.SPLIT_REGION -7952|| op == Operation.COMPACT_REGION) { -7953 // split, merge or compact region doesn't need to check the closing/closed state or lock the -7954 // region -7955 return; -7956} -7957if (this.closing.get()) { -7958 throw new NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is closing"); -7959} -7960lock(lock.readLock()); -7961if (this.closed.get()) { -7962 lock.readLock().unlock(); -7963 throw new NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is closed"); -7964} -7965// The unit for snapshot is a region. So, all stores for this region must be -7966// prepared for snapshot operation before proceeding. -7967if (op == Operation.SNAPSHOT) { -7968 stores.values().forEach(HStore::preSnapshotOperation); -7969} -7970try { -7971 if (coprocessorHost != null) { -7972 coprocessorHost.postStartRegionOperation(op); -7973 } -7974} catch (Exception e) { -7975 lock.readLock().unlock(); -7976 throw new IOException(e); -7977} -7978 } -7979 -7980 @Override -7981 public void closeRegionOperation() throws IOException { -7982 closeRegionOperation(Operation.ANY); -7983 } -7984 -7985 @Override -7986 public void closeRegionOperation(Operation operation) throws IOException { -7987if (operation == Operation.SNAPSHOT) { -7988 stores.values().forEach(HStore::postSnapshotOperation); -7989} -7990lock.readLock().unlock(); -7991if (coprocessorHost != null) { -7992 coprocessorHost.postCloseRegionOperation(operation); -7993} -7994 } -7995 -7996 /** -7997 * This method needs to be called before any public call that reads or -7998 * modifies stores in bulk.
[51/51] [partial] hbase-site git commit: Published site at .
Published site at . Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/5a2158f2 Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/5a2158f2 Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/5a2158f2 Branch: refs/heads/asf-site Commit: 5a2158f21e4641c4db8a97ddb409e9bff6035cb6 Parents: 6ca7e94 Author: jenkinsAuthored: Sun Oct 15 15:15:28 2017 + Committer: jenkins Committed: Sun Oct 15 15:15:28 2017 + -- acid-semantics.html |4 +- apache_hbase_reference_guide.pdf|4 +- apidocs/index-all.html |4 - .../org/apache/hadoop/hbase/HRegionInfo.html|4 - .../apache/hadoop/hbase/client/RegionInfo.html | 92 +- .../apache/hadoop/hbase/client/RegionInfo.html | 1028 +++-- book.html |2 +- bulk-loads.html |4 +- checkstyle-aggregate.html | 4048 +- checkstyle.rss | 18 +- coc.html|4 +- cygwin.html |4 +- dependencies.html |4 +- dependency-convergence.html |4 +- dependency-info.html|4 +- dependency-management.html |4 +- devapidocs/constant-values.html |6 +- devapidocs/index-all.html | 43 +- .../org/apache/hadoop/hbase/HRegionInfo.html|4 - .../hadoop/hbase/backup/package-tree.html |4 +- .../hadoop/hbase/class-use/CellComparator.html |8 +- .../apache/hadoop/hbase/client/RegionInfo.html | 116 +- .../RegionInfoBuilder.MutableRegionInfo.html| 72 +- .../hadoop/hbase/client/package-tree.html | 26 +- ...essorHost.EnvironmentPriorityComparator.html |6 +- .../CoprocessorHost.ObserverGetter.html |2 +- .../CoprocessorHost.ObserverOperation.html | 12 +- ...ocessorHost.ObserverOperationWithResult.html | 16 +- ...ssorHost.ObserverOperationWithoutResult.html | 10 +- .../hbase/coprocessor/CoprocessorHost.html | 95 +- .../coprocessor/Export.PrivilegedWriter.html| 18 +- .../hbase/coprocessor/Export.RegionOp.html |8 +- .../hbase/coprocessor/Export.Response.html | 14 +- .../coprocessor/Export.ScanCoprocessor.html | 16 +- .../hbase/coprocessor/Export.SecureWriter.html | 12 +- .../apache/hadoop/hbase/coprocessor/Export.html | 46 +- .../hadoop/hbase/filter/package-tree.html |8 +- .../hadoop/hbase/io/class-use/TimeRange.html| 17 - .../hadoop/hbase/io/hfile/package-tree.html |6 +- .../apache/hadoop/hbase/ipc/package-tree.html |2 +- .../hadoop/hbase/mapreduce/package-tree.html|2 +- .../hbase/master/balancer/package-tree.html |2 +- .../hadoop/hbase/master/package-tree.html |6 +- .../org/apache/hadoop/hbase/package-tree.html | 14 +- .../hadoop/hbase/procedure2/package-tree.html |6 +- .../hadoop/hbase/quotas/package-tree.html |4 +- .../regionserver/CSLMImmutableSegment.html |4 +- .../regionserver/CellArrayImmutableSegment.html |4 +- .../regionserver/CellChunkImmutableSegment.html |4 +- .../regionserver/CompositeImmutableSegment.html | 148 +- .../hadoop/hbase/regionserver/HRegion.html | 68 +- .../hbase/regionserver/HRegionServer.html |4 +- .../regionserver/HStore.StoreFlusherImpl.html | 32 +- .../hadoop/hbase/regionserver/HStore.html | 76 +- .../hbase/regionserver/ImmutableSegment.html| 72 +- .../hbase/regionserver/MutableSegment.html | 52 +- .../hbase/regionserver/Region.Operation.html|4 +- .../hadoop/hbase/regionserver/Region.html | 103 +- ...processorHost.BulkLoadObserverOperation.html |4 +- ...RegionCoprocessorHost.RegionEnvironment.html | 24 +- ...CoprocessorHost.RegionObserverOperation.html |6 +- ...processorHost.TableCoprocessorAttribute.html | 20 +- .../regionserver/RegionCoprocessorHost.html | 161 +- ...CoprocessorHost.RegionServerEnvironment.html | 14 +- ...essorHost.RegionServerObserverOperation.html |6 +- .../RegionServerCoprocessorHost.html| 33 +- .../regionserver/SecureBulkLoadManager.html | 12 +- .../hadoop/hbase/regionserver/Segment.html | 133 +- .../apache/hadoop/hbase/regionserver/Store.html | 117 +- ...imeRangeTracker.NonSyncTimeRangeTracker.html | 26 +- .../TimeRangeTracker.SyncTimeRangeTracker.html | 26 +- .../hbase/regionserver/TimeRangeTracker.html|2 +- .../hbase/regionserver/class-use/CellSet.html |5 +- .../hbase/regionserver/class-use/HRegion.html |
[36/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html b/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html index 793c33c..5aa2f80 100644 --- a/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html +++ b/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html @@ -136,9 +136,9 @@ java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true; title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true; title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true; title="class or interface in java.io">Serializable) -org.apache.hadoop.hbase.security.access.AccessControlFilter.Strategy org.apache.hadoop.hbase.security.access.Permission.Action org.apache.hadoop.hbase.security.access.AccessController.OpType +org.apache.hadoop.hbase.security.access.AccessControlFilter.Strategy http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/org/apache/hadoop/hbase/security/package-tree.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/security/package-tree.html b/devapidocs/org/apache/hadoop/hbase/security/package-tree.html index c4d3f6e..08da01c 100644 --- a/devapidocs/org/apache/hadoop/hbase/security/package-tree.html +++ b/devapidocs/org/apache/hadoop/hbase/security/package-tree.html @@ -191,9 +191,9 @@ java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true; title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true; title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true; title="class or interface in java.io">Serializable) -org.apache.hadoop.hbase.security.SaslUtil.QualityOfProtection org.apache.hadoop.hbase.security.AuthMethod org.apache.hadoop.hbase.security.SaslStatus +org.apache.hadoop.hbase.security.SaslUtil.QualityOfProtection http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/org/apache/hadoop/hbase/thrift/package-tree.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/thrift/package-tree.html b/devapidocs/org/apache/hadoop/hbase/thrift/package-tree.html index 06ce7fd..29f4bfc 100644 --- a/devapidocs/org/apache/hadoop/hbase/thrift/package-tree.html +++ b/devapidocs/org/apache/hadoop/hbase/thrift/package-tree.html @@ -198,8 +198,8 @@ java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true; title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true; title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true; title="class or interface in java.io">Serializable) -org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactoryImpl.FactoryStorage org.apache.hadoop.hbase.thrift.ThriftMetrics.ThriftServerType +org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactoryImpl.FactoryStorage org.apache.hadoop.hbase.thrift.ThriftServerRunner.ImplType http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/org/apache/hadoop/hbase/util/ClassSize.MemoryLayout.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/util/ClassSize.MemoryLayout.html b/devapidocs/org/apache/hadoop/hbase/util/ClassSize.MemoryLayout.html index 39f8ff9..ec9e46f 100644 --- a/devapidocs/org/apache/hadoop/hbase/util/ClassSize.MemoryLayout.html +++ b/devapidocs/org/apache/hadoop/hbase/util/ClassSize.MemoryLayout.html @@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab"; -private static class ClassSize.MemoryLayout +private static class ClassSize.MemoryLayout extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object MemoryLayout abstracts details about the JVM object layout. Default implementation is used in case Unsafe is not available. @@ -210,7 +210,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? MemoryLayout -privateMemoryLayout() +privateMemoryLayout() @@ -227,7 +227,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? headerSize -intheaderSize() +intheaderSize() @@
[44/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/org/apache/hadoop/hbase/coprocessor/Export.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/coprocessor/Export.html b/devapidocs/org/apache/hadoop/hbase/coprocessor/Export.html index c86ff44..aacc485 100644 --- a/devapidocs/org/apache/hadoop/hbase/coprocessor/Export.html +++ b/devapidocs/org/apache/hadoop/hbase/coprocessor/Export.html @@ -120,7 +120,7 @@ var activeTableTab = "activeTableTab"; @InterfaceAudience.LimitedPrivate(value="Coprocesssor") @InterfaceStability.Evolving -public class Export +public class Export extends org.apache.hadoop.hbase.protobuf.generated.ExportProtos.ExportService implements RegionCoprocessor Export an HBase table. Writes content to sequence files up in HDFS. Use @@ -393,7 +393,7 @@ implements LOG -private static finalorg.apache.commons.logging.Log LOG +private static finalorg.apache.commons.logging.Log LOG @@ -402,7 +402,7 @@ implements DEFAULT_CODEC -private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true; title="class or interface in java.lang">Class? extends org.apache.hadoop.io.compress.CompressionCodec DEFAULT_CODEC +private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true; title="class or interface in java.lang">Class? extends org.apache.hadoop.io.compress.CompressionCodec DEFAULT_CODEC @@ -411,7 +411,7 @@ implements DEFAULT_TYPE -private static finalorg.apache.hadoop.io.SequenceFile.CompressionType DEFAULT_TYPE +private static finalorg.apache.hadoop.io.SequenceFile.CompressionType DEFAULT_TYPE @@ -420,7 +420,7 @@ implements env -privateRegionCoprocessorEnvironment env +privateRegionCoprocessorEnvironment env @@ -429,7 +429,7 @@ implements userProvider -privateUserProvider userProvider +privateUserProvider userProvider @@ -446,7 +446,7 @@ implements Export -publicExport() +publicExport() @@ -463,7 +463,7 @@ implements main -public staticvoidmain(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String[]args) +public staticvoidmain(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String[]args) throws http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true; title="class or interface in java.lang">Throwable Throws: @@ -477,7 +477,7 @@ implements run -statichttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true; title="class or interface in java.util">Mapbyte[],Export.Responserun(org.apache.hadoop.conf.Configurationconf, +statichttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true; title="class or interface in java.util">Mapbyte[],Export.Responserun(org.apache.hadoop.conf.Configurationconf, http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String[]args) throws http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true; title="class or interface in java.lang">Throwable @@ -492,7 +492,7 @@ implements run -public statichttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true; title="class or interface in java.util">Mapbyte[],Export.Responserun(org.apache.hadoop.conf.Configurationconf, +public statichttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true; title="class or interface in java.util">Mapbyte[],Export.Responserun(org.apache.hadoop.conf.Configurationconf, TableNametableName, Scanscan, org.apache.hadoop.fs.Pathdir) @@ -509,7 +509,7 @@ implements getCompression -private staticbooleangetCompression(org.apache.hadoop.hbase.protobuf.generated.ExportProtos.ExportRequestrequest) +private staticbooleangetCompression(org.apache.hadoop.hbase.protobuf.generated.ExportProtos.ExportRequestrequest) @@ -518,7 +518,7 @@ implements getCompressionType -private staticorg.apache.hadoop.io.SequenceFile.CompressionTypegetCompressionType(org.apache.hadoop.hbase.protobuf.generated.ExportProtos.ExportRequestrequest) +private staticorg.apache.hadoop.io.SequenceFile.CompressionTypegetCompressionType(org.apache.hadoop.hbase.protobuf.generated.ExportProtos.ExportRequestrequest) @@ -527,7 +527,7 @@ implements getCompressionCodec -private staticorg.apache.hadoop.io.compress.CompressionCodecgetCompressionCodec(org.apache.hadoop.conf.Configurationconf, +private
[34/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/src-html/org/apache/hadoop/hbase/client/RegionInfoBuilder.MutableRegionInfo.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/RegionInfoBuilder.MutableRegionInfo.html b/devapidocs/src-html/org/apache/hadoop/hbase/client/RegionInfoBuilder.MutableRegionInfo.html index 93a53b4..e6f3e5c 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/RegionInfoBuilder.MutableRegionInfo.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/RegionInfoBuilder.MutableRegionInfo.html @@ -370,133 +370,117 @@ 362 Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY)); 363} 364 -365/** -366 * @return true if this region is from hbase:meta -367 */ -368@Override -369public boolean isMetaTable() { -370 return isMetaRegion(); -371} -372 -373/** @return true if this region is a meta region */ +365/** @return true if this region is a meta region */ +366@Override +367public boolean isMetaRegion() { +368 return tableName.equals(FIRST_META_REGIONINFO.getTable()); +369} +370 +371/** +372 * @return True if has been split and has daughters. +373 */ 374@Override -375public boolean isMetaRegion() { -376 return tableName.equals(FIRST_META_REGIONINFO.getTable()); +375public boolean isSplit() { +376 return this.split; 377} 378 379/** -380 * @return true if this region is from a system table -381 */ -382@Override -383public boolean isSystemTable() { -384 return tableName.isSystemTable(); -385} -386 -387/** -388 * @return True if has been split and has daughters. -389 */ -390@Override -391public boolean isSplit() { -392 return this.split; -393} -394 -395/** -396 * @param split set split status -397 * @return MutableRegionInfo -398 */ -399public MutableRegionInfo setSplit(boolean split) { -400 this.split = split; -401 return this; -402} -403 -404/** -405 * @return True if this region is offline. -406 */ -407@Override -408public boolean isOffline() { -409 return this.offLine; -410} -411 -412/** -413 * The parent of a region split is offline while split daughters hold -414 * references to the parent. Offlined regions are closed. -415 * @param offLine Set online/offline status. -416 * @return MutableRegionInfo -417 */ -418public MutableRegionInfo setOffline(boolean offLine) { -419 this.offLine = offLine; -420 return this; -421} -422 -423/** -424 * @return True if this is a split parent region. -425 */ -426@Override -427public boolean isSplitParent() { -428 if (!isSplit()) return false; -429 if (!isOffline()) { -430LOG.warn("Region is split but NOT offline: " + getRegionNameAsString()); -431 } -432 return true; -433} -434 -435/** -436 * Returns the region replica id -437 * @return returns region replica id -438 */ -439@Override -440public int getReplicaId() { -441 return replicaId; -442} -443 -444/** -445 * @see java.lang.Object#toString() +380 * @param split set split status +381 * @return MutableRegionInfo +382 */ +383public MutableRegionInfo setSplit(boolean split) { +384 this.split = split; +385 return this; +386} +387 +388/** +389 * @return True if this region is offline. +390 */ +391@Override +392public boolean isOffline() { +393 return this.offLine; +394} +395 +396/** +397 * The parent of a region split is offline while split daughters hold +398 * references to the parent. Offlined regions are closed. +399 * @param offLine Set online/offline status. +400 * @return MutableRegionInfo +401 */ +402public MutableRegionInfo setOffline(boolean offLine) { +403 this.offLine = offLine; +404 return this; +405} +406 +407/** +408 * @return True if this is a split parent region. +409 */ +410@Override +411public boolean isSplitParent() { +412 if (!isSplit()) return false; +413 if (!isOffline()) { +414LOG.warn("Region is split but NOT offline: " + getRegionNameAsString()); +415 } +416 return true; +417} +418 +419/** +420 * Returns the region replica id +421 * @return returns region replica id +422 */ +423@Override +424public int getReplicaId() { +425 return replicaId; +426} +427 +428/** +429 * @see java.lang.Object#toString() +430 */ +431@Override +432public String toString() { +433 return "{ENCODED = " + getEncodedName() + ", " + +434HConstants.NAME + " = '" + Bytes.toStringBinary(this.regionName) +435+ "', STARTKEY = '" + +436
[11/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatch.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatch.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatch.html index 5d138ea..81d256e 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatch.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatch.html @@ -2387,7 +2387,7 @@ 2379 return true; 2380} 2381long modifiedFlushCheckInterval = flushCheckInterval; -2382if (getRegionInfo().isSystemTable() +2382if (getRegionInfo().getTable().isSystemTable() 2383getRegionInfo().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) { 2384 modifiedFlushCheckInterval = SYSTEM_CACHE_FLUSH_INTERVAL; 2385} @@ -7869,7 +7869,7 @@ 7861 */ 7862 public byte[] checkSplit() { 7863// Can't split META -7864if (this.getRegionInfo().isMetaTable() || +7864if (this.getRegionInfo().isMetaRegion() || 7865 TableName.NAMESPACE_TABLE_NAME.equals(this.getRegionInfo().getTable())) { 7866 if (shouldForceSplit()) { 7867LOG.warn("Cannot split meta region in HBase 0.20 and above"); @@ -7914,374 +7914,372 @@ 7906 } 7907 7908 /** @return the coprocessor host */ -7909 @Override -7910 public RegionCoprocessorHost getCoprocessorHost() { -7911return coprocessorHost; -7912 } -7913 -7914 /** @param coprocessorHost the new coprocessor host */ -7915 public void setCoprocessorHost(final RegionCoprocessorHost coprocessorHost) { -7916this.coprocessorHost = coprocessorHost; -7917 } -7918 -7919 @Override -7920 public void startRegionOperation() throws IOException { -7921 startRegionOperation(Operation.ANY); -7922 } -7923 -7924 @Override -7925 @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="SF_SWITCH_FALLTHROUGH", -7926justification="Intentional") -7927 public void startRegionOperation(Operation op) throws IOException { -7928switch (op) { -7929 case GET: // read operations -7930 case SCAN: -7931checkReadsEnabled(); -7932 case INCREMENT: // write operations -7933 case APPEND: -7934 case SPLIT_REGION: -7935 case MERGE_REGION: -7936 case PUT: -7937 case DELETE: -7938 case BATCH_MUTATE: -7939 case COMPACT_REGION: -7940 case SNAPSHOT: -7941// when a region is in recovering state, no read, split, merge or snapshot is allowed -7942if (isRecovering() (this.disallowWritesInRecovering || -7943 (op != Operation.PUT op != Operation.DELETE op != Operation.BATCH_MUTATE))) { -7944 throw new RegionInRecoveryException(getRegionInfo().getRegionNameAsString() + -7945" is recovering; cannot take reads"); -7946} -7947break; -7948 default: -7949break; -7950} -7951if (op == Operation.MERGE_REGION || op == Operation.SPLIT_REGION -7952|| op == Operation.COMPACT_REGION) { -7953 // split, merge or compact region doesn't need to check the closing/closed state or lock the -7954 // region -7955 return; -7956} -7957if (this.closing.get()) { -7958 throw new NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is closing"); -7959} -7960lock(lock.readLock()); -7961if (this.closed.get()) { -7962 lock.readLock().unlock(); -7963 throw new NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is closed"); -7964} -7965// The unit for snapshot is a region. So, all stores for this region must be -7966// prepared for snapshot operation before proceeding. -7967if (op == Operation.SNAPSHOT) { -7968 stores.values().forEach(HStore::preSnapshotOperation); -7969} -7970try { -7971 if (coprocessorHost != null) { -7972 coprocessorHost.postStartRegionOperation(op); -7973 } -7974} catch (Exception e) { -7975 lock.readLock().unlock(); -7976 throw new IOException(e); -7977} -7978 } -7979 -7980 @Override -7981 public void closeRegionOperation() throws IOException { -7982 closeRegionOperation(Operation.ANY); -7983 } -7984 -7985 @Override -7986 public void closeRegionOperation(Operation operation) throws IOException { -7987if (operation == Operation.SNAPSHOT) { -7988 stores.values().forEach(HStore::postSnapshotOperation); -7989} -7990lock.readLock().unlock(); -7991if (coprocessorHost != null) { -7992 coprocessorHost.postCloseRegionOperation(operation); -7993} -7994 } -7995 -7996 /** -7997 * This method needs to be called before any public call that reads or -7998 * modifies stores in bulk. It has to be called just before a try. -7999 *
[02/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.BulkLoadObserverOperation.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.BulkLoadObserverOperation.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.BulkLoadObserverOperation.html index 8aeb6da..dbadad7 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.BulkLoadObserverOperation.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.BulkLoadObserverOperation.html @@ -99,1432 +99,1431 @@ 091 * Implements the coprocessor environment and runtime support for coprocessors 092 * loaded within a {@link Region}. 093 */ -094@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) -095@InterfaceStability.Evolving -096public class RegionCoprocessorHost -097extends CoprocessorHostRegionCoprocessor, RegionCoprocessorEnvironment { -098 -099 private static final Log LOG = LogFactory.getLog(RegionCoprocessorHost.class); -100 // The shared data map -101 private static final ReferenceMapString, ConcurrentMapString, Object SHARED_DATA_MAP = -102 new ReferenceMap(AbstractReferenceMap.ReferenceStrength.HARD, -103 AbstractReferenceMap.ReferenceStrength.WEAK); -104 -105 // optimization: no need to call postScannerFilterRow, if no coprocessor implements it -106 private final boolean hasCustomPostScannerFilterRow; -107 -108 /** -109 * -110 * Encapsulation of the environment of each coprocessor -111 */ -112 static class RegionEnvironment extends BaseEnvironmentRegionCoprocessor -113 implements RegionCoprocessorEnvironment { -114 -115private Region region; -116private RegionServerServices rsServices; -117ConcurrentMapString, Object sharedData; -118private final MetricRegistry metricRegistry; -119 -120/** -121 * Constructor -122 * @param impl the coprocessor instance -123 * @param priority chaining priority -124 */ -125public RegionEnvironment(final RegionCoprocessor impl, final int priority, -126final int seq, final Configuration conf, final Region region, -127final RegionServerServices services, final ConcurrentMapString, Object sharedData) { -128 super(impl, priority, seq, conf); -129 this.region = region; -130 this.rsServices = services; -131 this.sharedData = sharedData; -132 this.metricRegistry = -133 MetricsCoprocessor.createRegistryForRegionCoprocessor(impl.getClass().getName()); -134} -135 -136/** @return the region */ -137@Override -138public Region getRegion() { -139 return region; -140} -141 -142/** @return reference to the region server services */ -143@Override -144public CoprocessorRegionServerServices getCoprocessorRegionServerServices() { -145 return rsServices; -146} -147 -148@Override -149public void shutdown() { -150 super.shutdown(); -151 MetricsCoprocessor.removeRegistry(this.metricRegistry); -152} -153 -154@Override -155public ConcurrentMapString, Object getSharedData() { -156 return sharedData; -157} -158 -159@Override -160public RegionInfo getRegionInfo() { -161 return region.getRegionInfo(); -162} -163 -164@Override -165public MetricRegistry getMetricRegistryForRegionServer() { -166 return metricRegistry; -167} -168 } -169 -170 static class TableCoprocessorAttribute { -171private Path path; -172private String className; -173private int priority; -174private Configuration conf; -175 -176public TableCoprocessorAttribute(Path path, String className, int priority, -177Configuration conf) { -178 this.path = path; -179 this.className = className; -180 this.priority = priority; -181 this.conf = conf; -182} -183 -184public Path getPath() { -185 return path; -186} -187 -188public String getClassName() { -189 return className; -190} -191 -192public int getPriority() { -193 return priority; -194} -195 -196public Configuration getConf() { -197 return conf; -198} -199 } -200 -201 /** The region server services */ -202 RegionServerServices rsServices; -203 /** The region */ -204 HRegion region; -205 -206 /** -207 * Constructor -208 * @param region the region -209 * @param rsServices interface to available region server functionality -210 * @param conf the configuration -211 */ -212 public RegionCoprocessorHost(final HRegion region, -213 final RegionServerServices rsServices, final Configuration conf) { -214super(rsServices); -215this.conf = conf; -216this.rsServices = rsServices; -217this.region = region; -218
[09/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html index 5d138ea..81d256e 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html @@ -2387,7 +2387,7 @@ 2379 return true; 2380} 2381long modifiedFlushCheckInterval = flushCheckInterval; -2382if (getRegionInfo().isSystemTable() +2382if (getRegionInfo().getTable().isSystemTable() 2383getRegionInfo().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) { 2384 modifiedFlushCheckInterval = SYSTEM_CACHE_FLUSH_INTERVAL; 2385} @@ -7869,7 +7869,7 @@ 7861 */ 7862 public byte[] checkSplit() { 7863// Can't split META -7864if (this.getRegionInfo().isMetaTable() || +7864if (this.getRegionInfo().isMetaRegion() || 7865 TableName.NAMESPACE_TABLE_NAME.equals(this.getRegionInfo().getTable())) { 7866 if (shouldForceSplit()) { 7867LOG.warn("Cannot split meta region in HBase 0.20 and above"); @@ -7914,374 +7914,372 @@ 7906 } 7907 7908 /** @return the coprocessor host */ -7909 @Override -7910 public RegionCoprocessorHost getCoprocessorHost() { -7911return coprocessorHost; -7912 } -7913 -7914 /** @param coprocessorHost the new coprocessor host */ -7915 public void setCoprocessorHost(final RegionCoprocessorHost coprocessorHost) { -7916this.coprocessorHost = coprocessorHost; -7917 } -7918 -7919 @Override -7920 public void startRegionOperation() throws IOException { -7921 startRegionOperation(Operation.ANY); -7922 } -7923 -7924 @Override -7925 @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="SF_SWITCH_FALLTHROUGH", -7926justification="Intentional") -7927 public void startRegionOperation(Operation op) throws IOException { -7928switch (op) { -7929 case GET: // read operations -7930 case SCAN: -7931checkReadsEnabled(); -7932 case INCREMENT: // write operations -7933 case APPEND: -7934 case SPLIT_REGION: -7935 case MERGE_REGION: -7936 case PUT: -7937 case DELETE: -7938 case BATCH_MUTATE: -7939 case COMPACT_REGION: -7940 case SNAPSHOT: -7941// when a region is in recovering state, no read, split, merge or snapshot is allowed -7942if (isRecovering() (this.disallowWritesInRecovering || -7943 (op != Operation.PUT op != Operation.DELETE op != Operation.BATCH_MUTATE))) { -7944 throw new RegionInRecoveryException(getRegionInfo().getRegionNameAsString() + -7945" is recovering; cannot take reads"); -7946} -7947break; -7948 default: -7949break; -7950} -7951if (op == Operation.MERGE_REGION || op == Operation.SPLIT_REGION -7952|| op == Operation.COMPACT_REGION) { -7953 // split, merge or compact region doesn't need to check the closing/closed state or lock the -7954 // region -7955 return; -7956} -7957if (this.closing.get()) { -7958 throw new NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is closing"); -7959} -7960lock(lock.readLock()); -7961if (this.closed.get()) { -7962 lock.readLock().unlock(); -7963 throw new NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is closed"); -7964} -7965// The unit for snapshot is a region. So, all stores for this region must be -7966// prepared for snapshot operation before proceeding. -7967if (op == Operation.SNAPSHOT) { -7968 stores.values().forEach(HStore::preSnapshotOperation); -7969} -7970try { -7971 if (coprocessorHost != null) { -7972 coprocessorHost.postStartRegionOperation(op); -7973 } -7974} catch (Exception e) { -7975 lock.readLock().unlock(); -7976 throw new IOException(e); -7977} -7978 } -7979 -7980 @Override -7981 public void closeRegionOperation() throws IOException { -7982 closeRegionOperation(Operation.ANY); -7983 } -7984 -7985 @Override -7986 public void closeRegionOperation(Operation operation) throws IOException { -7987if (operation == Operation.SNAPSHOT) { -7988 stores.values().forEach(HStore::postSnapshotOperation); -7989} -7990lock.readLock().unlock(); -7991if (coprocessorHost != null) { -7992 coprocessorHost.postCloseRegionOperation(operation); -7993} -7994 } -7995 -7996 /** -7997 * This method needs to be called before any public call that reads or -7998 * modifies stores in bulk. It has to be called just before a try. -7999 *
[43/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html b/devapidocs/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html index 5bea36e..aedd28b 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.html @@ -18,7 +18,7 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10}; +var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -152,10 +152,6 @@ extends private http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListImmutableSegment segments - -private TimeRangeTracker -timeRangeTracker - @@ -169,7 +165,7 @@ extends Segment -dataSize, FIXED_OVERHEAD, heapSize, minSequenceId, tagsPresent +dataSize, FIXED_OVERHEAD, heapSize, minSequenceId, tagsPresent, timeRangeTracker @@ -254,92 +250,88 @@ extends getMinSequenceId() -long -getMinTimestamp() - - int getNumOfSegments() - + KeyValueScanner getScanner(longreadPoint) Creates the scanner for the given read point - + KeyValueScanner getScanner(longreadPoint, longorder) Creates the scanner for the given read point, and a specific order in a list - + http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListKeyValueScanner getScanners(longreadPoint, longorder) - + TimeRangeTracker getTimeRangeTracker() - + http://docs.oracle.com/javase/8/docs/api/java/util/SortedSet.html?is-external=true; title="class or interface in java.util">SortedSetCell headSet(CellfirstKeyOnRow) - + long heapSize() - + void incScannerCount() - + protected void incSize(longdelta, longheapOverhead) Updates the heap size counter of the segment by the given delta - + protected long indexEntrySize() - + protected void internalAdd(Cellcell, booleanmslabUsed, MemStoreSizememstoreSize) - + boolean isEmpty() - + boolean isTagsPresent() - + http://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true; title="class or interface in java.util">IteratorCell iterator() - + long keySize() - + Cell last() - + Cell maybeCloneWithAllocator(Cellcell) If the segment has a memory allocator the cell is being cloned to this space, and returned; otherwise the given cell is returned - + protected CompositeImmutableSegment setCellSet(CellSetcellSetOld, CellSetcellSetNew) @@ -347,22 +339,22 @@ extends + boolean shouldSeek(TimeRangetr, longoldestUnexpiredTS) - + protected http://docs.oracle.com/javase/8/docs/api/java/util/SortedSet.html?is-external=true; title="class or interface in java.util">SortedSetCell tailSet(CellfirstCell) Returns a subset of the segment cell set, which starts with the given cell - + http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String toString() - + protected void updateMetaInfo(CellcellToAdd, booleansucc, @@ -407,22 +399,13 @@ extends http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListImmutableSegment segments - - - - - -timeRangeTracker -private finalTimeRangeTracker timeRangeTracker - - keySize -privatelong keySize +privatelong keySize @@ -439,7 +422,7 @@ extends CompositeImmutableSegment -publicCompositeImmutableSegment(CellComparatorcomparator, +publicCompositeImmutableSegment(CellComparatorcomparator, http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListImmutableSegmentsegments) @@ -457,7 +440,7 @@ extends getAllSegments -publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in
[22/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/Export.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/Export.html b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/Export.html index 95f6ef8..45f0981 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/Export.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/Export.html @@ -61,487 +61,488 @@ 053import org.apache.hadoop.hbase.protobuf.ProtobufUtil; 054import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.DelegationToken; 055import org.apache.hadoop.hbase.protobuf.generated.ExportProtos; -056import org.apache.hadoop.hbase.regionserver.InternalScanner; -057import org.apache.hadoop.hbase.regionserver.Region; -058import org.apache.hadoop.hbase.regionserver.RegionScanner; -059import org.apache.hadoop.hbase.security.User; -060import org.apache.hadoop.hbase.security.UserProvider; -061import org.apache.hadoop.hbase.security.token.FsDelegationToken; -062import org.apache.hadoop.hbase.util.ArrayUtils; -063import org.apache.hadoop.hbase.util.ByteStringer; -064import org.apache.hadoop.hbase.util.Bytes; -065import org.apache.hadoop.hbase.util.Triple; -066import org.apache.hadoop.io.SequenceFile; -067import org.apache.hadoop.io.Text; -068import org.apache.hadoop.io.compress.CompressionCodec; -069import org.apache.hadoop.io.compress.DefaultCodec; -070import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; -071import org.apache.hadoop.security.token.Token; -072import org.apache.hadoop.util.GenericOptionsParser; -073import org.apache.hadoop.util.ReflectionUtils; -074import org.apache.yetus.audience.InterfaceAudience; -075import org.apache.yetus.audience.InterfaceStability; -076 -077import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting; -078 -079import com.google.protobuf.RpcCallback; -080import com.google.protobuf.RpcController; -081import com.google.protobuf.Service; -082 -083/** -084 * Export an HBase table. Writes content to sequence files up in HDFS. Use -085 * {@link Import} to read it back in again. It is implemented by the endpoint -086 * technique. -087 * -088 * @see org.apache.hadoop.hbase.mapreduce.Export -089 */ -090@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) -091@InterfaceStability.Evolving -092public class Export extends ExportProtos.ExportService implements RegionCoprocessor { -093 -094 private static final Log LOG = LogFactory.getLog(Export.class); -095 private static final Class? extends CompressionCodec DEFAULT_CODEC = DefaultCodec.class; -096 private static final SequenceFile.CompressionType DEFAULT_TYPE = SequenceFile.CompressionType.RECORD; -097 private RegionCoprocessorEnvironment env = null; -098 private UserProvider userProvider; -099 -100 public static void main(String[] args) throws Throwable { -101Mapbyte[], Response response = run(HBaseConfiguration.create(), args); -102System.exit(response == null ? -1 : 0); -103 } -104 -105 @VisibleForTesting -106 static Mapbyte[], Response run(final Configuration conf, final String[] args) throws Throwable { -107String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs(); -108if (!ExportUtils.isValidArguements(args)) { -109 ExportUtils.usage("Wrong number of arguments: " + ArrayUtils.length(otherArgs)); -110 return null; -111} -112TripleTableName, Scan, Path arguments = ExportUtils.getArgumentsFromCommandLine(conf, otherArgs); -113return run(conf, arguments.getFirst(), arguments.getSecond(), arguments.getThird()); -114 } -115 -116 public static Mapbyte[], Response run(final Configuration conf, TableName tableName, Scan scan, Path dir) throws Throwable { -117FileSystem fs = dir.getFileSystem(conf); -118UserProvider userProvider = UserProvider.instantiate(conf); -119checkDir(fs, dir); -120FsDelegationToken fsDelegationToken = new FsDelegationToken(userProvider, "renewer"); -121 fsDelegationToken.acquireDelegationToken(fs); -122try { -123 final ExportProtos.ExportRequest request = getConfiguredRequest(conf, dir, -124scan, fsDelegationToken.getUserToken()); -125 try (Connection con = ConnectionFactory.createConnection(conf); -126 Table table = con.getTable(tableName)) { -127Mapbyte[], Response result = new TreeMap(Bytes.BYTES_COMPARATOR); -128 table.coprocessorService(ExportProtos.ExportService.class, -129 scan.getStartRow(), -130 scan.getStopRow(), -131 (ExportProtos.ExportService service) - { -132ServerRpcController controller = new ServerRpcController(); -133Mapbyte[], ExportProtos.ExportResponse rval = new TreeMap(Bytes.BYTES_COMPARATOR); -134
[41/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/org/apache/hadoop/hbase/regionserver/Region.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/Region.html b/devapidocs/org/apache/hadoop/hbase/regionserver/Region.html index 8e4414c..ed3b200 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/Region.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/Region.html @@ -18,7 +18,7 @@ catch(err) { } //--> -var methods = {"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":6,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":6,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":6,"i26":6,"i27":6,"i28":6,"i29":6,"i30":6,"i31":6,"i32":6,"i33":6,"i34":6,"i35":6,"i36":6,"i37":6,"i38":6,"i39":6,"i40":6,"i41":6,"i42":6,"i43":6,"i44":6,"i45":6,"i46":6,"i47":6,"i48":6,"i49":6,"i50":6,"i51":6,"i52":6,"i53":6}; +var methods = {"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":6,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":6,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":6,"i26":6,"i27":6,"i28":6,"i29":6,"i30":6,"i31":6,"i32":6,"i33":6,"i34":6,"i35":6,"i36":6,"i37":6,"i38":6,"i39":6,"i40":6,"i41":6,"i42":6,"i43":6,"i44":6,"i45":6,"i46":6,"i47":6,"i48":6,"i49":6,"i50":6,"i51":6,"i52":6}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],4:["t3","Abstract Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -253,70 +253,66 @@ extends getCompactionState() -RegionCoprocessorHost -getCoprocessorHost() - - long getDataInMemoryWithoutWAL() - + long getEarliestFlushTimeForAllStores() - + long getFilteredReadRequestsCount() - + long getMaxFlushedSeqId() - + http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true; title="class or interface in java.util">Mapbyte[],http://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true; title="class or interface in java.lang">Long getMaxStoreSeqId() - + long getMemStoreSize() - + long getNumMutationsWithoutWAL() - + long getOldestHfileTs(booleanmajorCompactionOnly) This can be used to determine the last time all files of this region were major compacted. - + long getReadPoint(IsolationLevelisolationLevel) - + long getReadRequestsCount() - + RegionInfo getRegionInfo() - + Region.RowLock getRowLock(byte[]row, booleanreadLock) Get a row lock for the specified row. - + RegionScanner getScanner(Scanscan) Return an iterator that scans over the HRegion, returning the indicated columns and rows specified by the Scan. - + RegionScanner getScanner(Scanscan, http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListKeyValueScanneradditionalScanners) @@ -324,71 +320,71 @@ extends Scan. - + Store getStore(byte[]family) Return the Store for the given family - + http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String getStoreFileList(byte[][]columns) - + http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">List? extends Store getStores() Return the list of Stores managed by this region - + TableDescriptor getTableDescriptor() - + long getWriteRequestsCount() - + Result increment(Incrementincrement) Perform one or more increment operations on a row. - + boolean isAvailable() - + boolean isClosed() - + boolean isClosing() - + boolean isMergeable() - + boolean isReadOnly() - + boolean isRecovering() - + boolean isSplittable() - + void mutateRow(RowMutationsmutations) Performs multiple mutations atomically on a single row. - + void mutateRowsWithLocks(http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true; title="class or interface in java.util">CollectionMutationmutations, http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true; title="class or interface in java.util">Collectionbyte[]rowsToLock, @@ -397,13 +393,13 @@ extends Perform atomic mutations within the region. - + void processRowsWithLocks(RowProcessor?,?processor) Performs atomic multiple reads and writes on a given row. - + void processRowsWithLocks(RowProcessor?,?processor, longnonceGroup, @@ -411,7 +407,7 @@ extends Performs atomic multiple reads and writes on a given row. - + void processRowsWithLocks(RowProcessor?,?processor, longtimeout, @@ -420,13 +416,13 @@ extends Performs atomic multiple reads and writes on a given
[31/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperation.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperation.html b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperation.html index ed99dd8..cb1b7e5 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperation.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperation.html @@ -66,670 +66,669 @@ 058 * @param E type of specific coprocessor environment this host requires. 059 * provides 060 */ -061@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) -062@InterfaceStability.Evolving -063public abstract class CoprocessorHostC extends Coprocessor, E extends CoprocessorEnvironmentC { -064 public static final String REGION_COPROCESSOR_CONF_KEY = -065 "hbase.coprocessor.region.classes"; -066 public static final String REGIONSERVER_COPROCESSOR_CONF_KEY = -067 "hbase.coprocessor.regionserver.classes"; -068 public static final String USER_REGION_COPROCESSOR_CONF_KEY = -069 "hbase.coprocessor.user.region.classes"; -070 public static final String MASTER_COPROCESSOR_CONF_KEY = -071 "hbase.coprocessor.master.classes"; -072 public static final String WAL_COPROCESSOR_CONF_KEY = -073"hbase.coprocessor.wal.classes"; -074 public static final String ABORT_ON_ERROR_KEY = "hbase.coprocessor.abortonerror"; -075 public static final boolean DEFAULT_ABORT_ON_ERROR = true; -076 public static final String COPROCESSORS_ENABLED_CONF_KEY = "hbase.coprocessor.enabled"; -077 public static final boolean DEFAULT_COPROCESSORS_ENABLED = true; -078 public static final String USER_COPROCESSORS_ENABLED_CONF_KEY = -079"hbase.coprocessor.user.enabled"; -080 public static final boolean DEFAULT_USER_COPROCESSORS_ENABLED = true; -081 -082 private static final Log LOG = LogFactory.getLog(CoprocessorHost.class); -083 protected Abortable abortable; -084 /** Ordered set of loaded coprocessors with lock */ -085 protected final SortedListE coprocEnvironments = -086 new SortedList(new EnvironmentPriorityComparator()); -087 protected Configuration conf; -088 // unique file prefix to use for local copies of jars when classloading -089 protected String pathPrefix; -090 protected AtomicInteger loadSequence = new AtomicInteger(); -091 -092 public CoprocessorHost(Abortable abortable) { -093this.abortable = abortable; -094this.pathPrefix = UUID.randomUUID().toString(); -095 } -096 -097 /** -098 * Not to be confused with the per-object _coprocessors_ (above), -099 * coprocessorNames is static and stores the set of all coprocessors ever -100 * loaded by any thread in this JVM. It is strictly additive: coprocessors are -101 * added to coprocessorNames, by checkAndLoadInstance() but are never removed, since -102 * the intention is to preserve a history of all loaded coprocessors for -103 * diagnosis in case of server crash (HBASE-4014). -104 */ -105 private static SetString coprocessorNames = -106 Collections.synchronizedSet(new HashSetString()); -107 -108 public static SetString getLoadedCoprocessors() { -109synchronized (coprocessorNames) { -110 return new HashSet(coprocessorNames); -111} -112 } -113 -114 /** -115 * Used to create a parameter to the HServerLoad constructor so that -116 * HServerLoad can provide information about the coprocessors loaded by this -117 * regionserver. -118 * (HBASE-4070: Improve region server metrics to report loaded coprocessors -119 * to master). -120 */ -121 public SetString getCoprocessors() { -122SetString returnValue = new TreeSet(); -123for (E e: coprocEnvironments) { -124 returnValue.add(e.getInstance().getClass().getSimpleName()); -125} -126return returnValue; -127 } -128 -129 /** -130 * Load system coprocessors once only. Read the class names from configuration. -131 * Called by constructor. -132 */ -133 protected void loadSystemCoprocessors(Configuration conf, String confKey) { -134boolean coprocessorsEnabled = conf.getBoolean(COPROCESSORS_ENABLED_CONF_KEY, -135 DEFAULT_COPROCESSORS_ENABLED); -136if (!coprocessorsEnabled) { -137 return; -138} -139 -140Class? implClass; -141 -142// load default coprocessors from configure file -143String[] defaultCPClasses = conf.getStrings(confKey); -144if (defaultCPClasses == null || defaultCPClasses.length == 0) -145 return; -146 -147int priority = Coprocessor.PRIORITY_SYSTEM; -148for (String className : defaultCPClasses) { -149 className = className.trim(); -150 if (findCoprocessor(className) != null) { -151// If already loaded
[06/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html index ea14609..bbd45f7 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html @@ -1244,7 +1244,7 @@ 1236if (getNumberOfOnlineRegions() 2) return false; 1237boolean allUserRegionsOffline = true; 1238for (Map.EntryString, HRegion e: this.onlineRegions.entrySet()) { -1239 if (!e.getValue().getRegionInfo().isMetaTable()) { +1239 if (!e.getValue().getRegionInfo().isMetaRegion()) { 1240allUserRegionsOffline = false; 1241break; 1242 } @@ -2086,7 +2086,7 @@ 2078 public WAL getWAL(RegionInfo regionInfo) throws IOException { 2079WAL wal; 2080// _ROOT_ and hbase:meta regions have separate WAL. -2081if (regionInfo != null regionInfo.isMetaTable() +2081if (regionInfo != null regionInfo.isMetaRegion() 2082 regionInfo.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) { 2083 wal = walFactory.getMetaWAL(regionInfo.getEncodedNameAsBytes()); 2084} else if (regionInfo == null) { @@ -2682,7 +2682,7 @@ 2674try { 2675 for (Map.EntryString, HRegion e: this.onlineRegions.entrySet()) { 2676HRegion r = e.getValue(); -2677if (!r.getRegionInfo().isMetaTable() r.isAvailable()) { +2677if (!r.getRegionInfo().isMetaRegion() r.isAvailable()) { 2678 // Don't update zk with this close transition; pass false. 2679 closeRegionIgnoreErrors(r.getRegionInfo(), abort); 2680} @@ -3094,7 +3094,7 @@ 3086 protected boolean closeRegion(String encodedName, final boolean abort, final ServerName sn) 3087 throws NotServingRegionException { 3088//Check for permissions to close. -3089Region actualRegion = this.getRegion(encodedName); +3089HRegion actualRegion = this.getRegion(encodedName); 3090// Can be null if we're calling close on a region that's not online 3091if ((actualRegion != null) (actualRegion.getCoprocessorHost() != null)) { 3092 try { @@ -3213,7 +3213,7 @@ 3205 * @return HRegion for the passed binary coderegionName/code or null if 3206 * named region is not member of the online regions. 3207 */ -3208 public Region getOnlineRegion(final byte[] regionName) { +3208 public HRegion getOnlineRegion(final byte[] regionName) { 3209String encodedRegionName = RegionInfo.encodeRegionName(regionName); 3210return this.onlineRegions.get(encodedRegionName); 3211 } http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html index ea14609..bbd45f7 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html @@ -1244,7 +1244,7 @@ 1236if (getNumberOfOnlineRegions() 2) return false; 1237boolean allUserRegionsOffline = true; 1238for (Map.EntryString, HRegion e: this.onlineRegions.entrySet()) { -1239 if (!e.getValue().getRegionInfo().isMetaTable()) { +1239 if (!e.getValue().getRegionInfo().isMetaRegion()) { 1240allUserRegionsOffline = false; 1241break; 1242 } @@ -2086,7 +2086,7 @@ 2078 public WAL getWAL(RegionInfo regionInfo) throws IOException { 2079WAL wal; 2080// _ROOT_ and hbase:meta regions have separate WAL. -2081if (regionInfo != null regionInfo.isMetaTable() +2081if (regionInfo != null regionInfo.isMetaRegion() 2082 regionInfo.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) { 2083 wal = walFactory.getMetaWAL(regionInfo.getEncodedNameAsBytes()); 2084} else if (regionInfo == null) { @@ -2682,7 +2682,7 @@ 2674try { 2675 for (Map.EntryString, HRegion e: this.onlineRegions.entrySet()) { 2676HRegion r = e.getValue(); -2677if (!r.getRegionInfo().isMetaTable() r.isAvailable()) { +2677if (!r.getRegionInfo().isMetaRegion() r.isAvailable()) { 2678 // Don't update zk with this close transition; pass false. 2679 closeRegionIgnoreErrors(r.getRegionInfo(), abort); 2680} @@ -3094,7 +3094,7 @@ 3086 protected boolean
[29/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperationWithoutResult.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperationWithoutResult.html b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperationWithoutResult.html index ed99dd8..cb1b7e5 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperationWithoutResult.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperationWithoutResult.html @@ -66,670 +66,669 @@ 058 * @param E type of specific coprocessor environment this host requires. 059 * provides 060 */ -061@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) -062@InterfaceStability.Evolving -063public abstract class CoprocessorHostC extends Coprocessor, E extends CoprocessorEnvironmentC { -064 public static final String REGION_COPROCESSOR_CONF_KEY = -065 "hbase.coprocessor.region.classes"; -066 public static final String REGIONSERVER_COPROCESSOR_CONF_KEY = -067 "hbase.coprocessor.regionserver.classes"; -068 public static final String USER_REGION_COPROCESSOR_CONF_KEY = -069 "hbase.coprocessor.user.region.classes"; -070 public static final String MASTER_COPROCESSOR_CONF_KEY = -071 "hbase.coprocessor.master.classes"; -072 public static final String WAL_COPROCESSOR_CONF_KEY = -073"hbase.coprocessor.wal.classes"; -074 public static final String ABORT_ON_ERROR_KEY = "hbase.coprocessor.abortonerror"; -075 public static final boolean DEFAULT_ABORT_ON_ERROR = true; -076 public static final String COPROCESSORS_ENABLED_CONF_KEY = "hbase.coprocessor.enabled"; -077 public static final boolean DEFAULT_COPROCESSORS_ENABLED = true; -078 public static final String USER_COPROCESSORS_ENABLED_CONF_KEY = -079"hbase.coprocessor.user.enabled"; -080 public static final boolean DEFAULT_USER_COPROCESSORS_ENABLED = true; -081 -082 private static final Log LOG = LogFactory.getLog(CoprocessorHost.class); -083 protected Abortable abortable; -084 /** Ordered set of loaded coprocessors with lock */ -085 protected final SortedListE coprocEnvironments = -086 new SortedList(new EnvironmentPriorityComparator()); -087 protected Configuration conf; -088 // unique file prefix to use for local copies of jars when classloading -089 protected String pathPrefix; -090 protected AtomicInteger loadSequence = new AtomicInteger(); -091 -092 public CoprocessorHost(Abortable abortable) { -093this.abortable = abortable; -094this.pathPrefix = UUID.randomUUID().toString(); -095 } -096 -097 /** -098 * Not to be confused with the per-object _coprocessors_ (above), -099 * coprocessorNames is static and stores the set of all coprocessors ever -100 * loaded by any thread in this JVM. It is strictly additive: coprocessors are -101 * added to coprocessorNames, by checkAndLoadInstance() but are never removed, since -102 * the intention is to preserve a history of all loaded coprocessors for -103 * diagnosis in case of server crash (HBASE-4014). -104 */ -105 private static SetString coprocessorNames = -106 Collections.synchronizedSet(new HashSetString()); -107 -108 public static SetString getLoadedCoprocessors() { -109synchronized (coprocessorNames) { -110 return new HashSet(coprocessorNames); -111} -112 } -113 -114 /** -115 * Used to create a parameter to the HServerLoad constructor so that -116 * HServerLoad can provide information about the coprocessors loaded by this -117 * regionserver. -118 * (HBASE-4070: Improve region server metrics to report loaded coprocessors -119 * to master). -120 */ -121 public SetString getCoprocessors() { -122SetString returnValue = new TreeSet(); -123for (E e: coprocEnvironments) { -124 returnValue.add(e.getInstance().getClass().getSimpleName()); -125} -126return returnValue; -127 } -128 -129 /** -130 * Load system coprocessors once only. Read the class names from configuration. -131 * Called by constructor. -132 */ -133 protected void loadSystemCoprocessors(Configuration conf, String confKey) { -134boolean coprocessorsEnabled = conf.getBoolean(COPROCESSORS_ENABLED_CONF_KEY, -135 DEFAULT_COPROCESSORS_ENABLED); -136if (!coprocessorsEnabled) { -137 return; -138} -139 -140Class? implClass; -141 -142// load default coprocessors from configure file -143String[] defaultCPClasses = conf.getStrings(confKey); -144if (defaultCPClasses == null || defaultCPClasses.length == 0) -145 return; -146 -147int priority = Coprocessor.PRIORITY_SYSTEM; -148for (String className : defaultCPClasses) { -149 className = className.trim(); -150 if
[33/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.EnvironmentPriorityComparator.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.EnvironmentPriorityComparator.html b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.EnvironmentPriorityComparator.html index ed99dd8..cb1b7e5 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.EnvironmentPriorityComparator.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.EnvironmentPriorityComparator.html @@ -66,670 +66,669 @@ 058 * @param E type of specific coprocessor environment this host requires. 059 * provides 060 */ -061@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) -062@InterfaceStability.Evolving -063public abstract class CoprocessorHostC extends Coprocessor, E extends CoprocessorEnvironmentC { -064 public static final String REGION_COPROCESSOR_CONF_KEY = -065 "hbase.coprocessor.region.classes"; -066 public static final String REGIONSERVER_COPROCESSOR_CONF_KEY = -067 "hbase.coprocessor.regionserver.classes"; -068 public static final String USER_REGION_COPROCESSOR_CONF_KEY = -069 "hbase.coprocessor.user.region.classes"; -070 public static final String MASTER_COPROCESSOR_CONF_KEY = -071 "hbase.coprocessor.master.classes"; -072 public static final String WAL_COPROCESSOR_CONF_KEY = -073"hbase.coprocessor.wal.classes"; -074 public static final String ABORT_ON_ERROR_KEY = "hbase.coprocessor.abortonerror"; -075 public static final boolean DEFAULT_ABORT_ON_ERROR = true; -076 public static final String COPROCESSORS_ENABLED_CONF_KEY = "hbase.coprocessor.enabled"; -077 public static final boolean DEFAULT_COPROCESSORS_ENABLED = true; -078 public static final String USER_COPROCESSORS_ENABLED_CONF_KEY = -079"hbase.coprocessor.user.enabled"; -080 public static final boolean DEFAULT_USER_COPROCESSORS_ENABLED = true; -081 -082 private static final Log LOG = LogFactory.getLog(CoprocessorHost.class); -083 protected Abortable abortable; -084 /** Ordered set of loaded coprocessors with lock */ -085 protected final SortedListE coprocEnvironments = -086 new SortedList(new EnvironmentPriorityComparator()); -087 protected Configuration conf; -088 // unique file prefix to use for local copies of jars when classloading -089 protected String pathPrefix; -090 protected AtomicInteger loadSequence = new AtomicInteger(); -091 -092 public CoprocessorHost(Abortable abortable) { -093this.abortable = abortable; -094this.pathPrefix = UUID.randomUUID().toString(); -095 } -096 -097 /** -098 * Not to be confused with the per-object _coprocessors_ (above), -099 * coprocessorNames is static and stores the set of all coprocessors ever -100 * loaded by any thread in this JVM. It is strictly additive: coprocessors are -101 * added to coprocessorNames, by checkAndLoadInstance() but are never removed, since -102 * the intention is to preserve a history of all loaded coprocessors for -103 * diagnosis in case of server crash (HBASE-4014). -104 */ -105 private static SetString coprocessorNames = -106 Collections.synchronizedSet(new HashSetString()); -107 -108 public static SetString getLoadedCoprocessors() { -109synchronized (coprocessorNames) { -110 return new HashSet(coprocessorNames); -111} -112 } -113 -114 /** -115 * Used to create a parameter to the HServerLoad constructor so that -116 * HServerLoad can provide information about the coprocessors loaded by this -117 * regionserver. -118 * (HBASE-4070: Improve region server metrics to report loaded coprocessors -119 * to master). -120 */ -121 public SetString getCoprocessors() { -122SetString returnValue = new TreeSet(); -123for (E e: coprocEnvironments) { -124 returnValue.add(e.getInstance().getClass().getSimpleName()); -125} -126return returnValue; -127 } -128 -129 /** -130 * Load system coprocessors once only. Read the class names from configuration. -131 * Called by constructor. -132 */ -133 protected void loadSystemCoprocessors(Configuration conf, String confKey) { -134boolean coprocessorsEnabled = conf.getBoolean(COPROCESSORS_ENABLED_CONF_KEY, -135 DEFAULT_COPROCESSORS_ENABLED); -136if (!coprocessorsEnabled) { -137 return; -138} -139 -140Class? implClass; -141 -142// load default coprocessors from configure file -143String[] defaultCPClasses = conf.getStrings(confKey); -144if (defaultCPClasses == null || defaultCPClasses.length == 0) -145 return; -146 -147int priority = Coprocessor.PRIORITY_SYSTEM; -148for (String className : defaultCPClasses) { -149 className = className.trim(); -150 if
hbase-site git commit: INFRA-10751 Empty commit
Repository: hbase-site Updated Branches: refs/heads/asf-site 5a2158f21 -> 373abd1ff INFRA-10751 Empty commit Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/373abd1f Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/373abd1f Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/373abd1f Branch: refs/heads/asf-site Commit: 373abd1ffe6fe760a5204b7db054eb3b4fb56ae4 Parents: 5a2158f Author: jenkinsAuthored: Sun Oct 15 15:16:11 2017 + Committer: jenkins Committed: Sun Oct 15 15:16:11 2017 + -- --
[42/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html b/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html index 873eb8c..0654d6f 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html @@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab"; -private final class HStore.StoreFlusherImpl +private final class HStore.StoreFlusherImpl extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object implements StoreFlushContext @@ -274,7 +274,7 @@ implements cacheFlushSeqNum -privatelong cacheFlushSeqNum +privatelong cacheFlushSeqNum @@ -283,7 +283,7 @@ implements snapshot -privateMemStoreSnapshot snapshot +privateMemStoreSnapshot snapshot @@ -292,7 +292,7 @@ implements tempFiles -privatehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">Listorg.apache.hadoop.fs.Path tempFiles +privatehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">Listorg.apache.hadoop.fs.Path tempFiles @@ -301,7 +301,7 @@ implements committedFiles -privatehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">Listorg.apache.hadoop.fs.Path committedFiles +privatehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">Listorg.apache.hadoop.fs.Path committedFiles @@ -310,7 +310,7 @@ implements cacheFlushCount -privatelong cacheFlushCount +privatelong cacheFlushCount @@ -319,7 +319,7 @@ implements cacheFlushSize -privatelong cacheFlushSize +privatelong cacheFlushSize @@ -328,7 +328,7 @@ implements outputFileSize -privatelong outputFileSize +privatelong outputFileSize @@ -345,7 +345,7 @@ implements StoreFlusherImpl -privateStoreFlusherImpl(longcacheFlushSeqNum) +privateStoreFlusherImpl(longcacheFlushSeqNum) @@ -362,7 +362,7 @@ implements prepare -publicvoidprepare() +publicvoidprepare() This is not thread safe. The caller should have a lock on the region or the store. If necessary, the lock can be added with the patch provided in HBASE-10087 @@ -377,7 +377,7 @@ implements flushCache -publicvoidflushCache(MonitoredTaskstatus) +publicvoidflushCache(MonitoredTaskstatus) throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException Description copied from interface:StoreFlushContext Flush the cache (create the new store file) @@ -398,7 +398,7 @@ implements commit -publicbooleancommit(MonitoredTaskstatus) +publicbooleancommit(MonitoredTaskstatus) throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException Description copied from interface:StoreFlushContext Commit the flush - add the store file to the store and clear the @@ -422,7 +422,7 @@ implements getOutputFileSize -publiclonggetOutputFileSize() +publiclonggetOutputFileSize() Specified by: getOutputFileSizein interfaceStoreFlushContext @@ -437,7 +437,7 @@ implements getCommittedFiles -publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">Listorg.apache.hadoop.fs.PathgetCommittedFiles() +publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">Listorg.apache.hadoop.fs.PathgetCommittedFiles() Description copied from interface:StoreFlushContext Returns the newly committed files from the flush. Called only if commit returns true @@ -454,7 +454,7 @@ implements replayFlush -publicvoidreplayFlush(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringfileNames, +publicvoidreplayFlush(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringfileNames, booleandropMemstoreSnapshot) throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class
[17/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.html index 5d138ea..81d256e 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.html @@ -2387,7 +2387,7 @@ 2379 return true; 2380} 2381long modifiedFlushCheckInterval = flushCheckInterval; -2382if (getRegionInfo().isSystemTable() +2382if (getRegionInfo().getTable().isSystemTable() 2383getRegionInfo().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) { 2384 modifiedFlushCheckInterval = SYSTEM_CACHE_FLUSH_INTERVAL; 2385} @@ -7869,7 +7869,7 @@ 7861 */ 7862 public byte[] checkSplit() { 7863// Can't split META -7864if (this.getRegionInfo().isMetaTable() || +7864if (this.getRegionInfo().isMetaRegion() || 7865 TableName.NAMESPACE_TABLE_NAME.equals(this.getRegionInfo().getTable())) { 7866 if (shouldForceSplit()) { 7867LOG.warn("Cannot split meta region in HBase 0.20 and above"); @@ -7914,374 +7914,372 @@ 7906 } 7907 7908 /** @return the coprocessor host */ -7909 @Override -7910 public RegionCoprocessorHost getCoprocessorHost() { -7911return coprocessorHost; -7912 } -7913 -7914 /** @param coprocessorHost the new coprocessor host */ -7915 public void setCoprocessorHost(final RegionCoprocessorHost coprocessorHost) { -7916this.coprocessorHost = coprocessorHost; -7917 } -7918 -7919 @Override -7920 public void startRegionOperation() throws IOException { -7921 startRegionOperation(Operation.ANY); -7922 } -7923 -7924 @Override -7925 @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="SF_SWITCH_FALLTHROUGH", -7926justification="Intentional") -7927 public void startRegionOperation(Operation op) throws IOException { -7928switch (op) { -7929 case GET: // read operations -7930 case SCAN: -7931checkReadsEnabled(); -7932 case INCREMENT: // write operations -7933 case APPEND: -7934 case SPLIT_REGION: -7935 case MERGE_REGION: -7936 case PUT: -7937 case DELETE: -7938 case BATCH_MUTATE: -7939 case COMPACT_REGION: -7940 case SNAPSHOT: -7941// when a region is in recovering state, no read, split, merge or snapshot is allowed -7942if (isRecovering() (this.disallowWritesInRecovering || -7943 (op != Operation.PUT op != Operation.DELETE op != Operation.BATCH_MUTATE))) { -7944 throw new RegionInRecoveryException(getRegionInfo().getRegionNameAsString() + -7945" is recovering; cannot take reads"); -7946} -7947break; -7948 default: -7949break; -7950} -7951if (op == Operation.MERGE_REGION || op == Operation.SPLIT_REGION -7952|| op == Operation.COMPACT_REGION) { -7953 // split, merge or compact region doesn't need to check the closing/closed state or lock the -7954 // region -7955 return; -7956} -7957if (this.closing.get()) { -7958 throw new NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is closing"); -7959} -7960lock(lock.readLock()); -7961if (this.closed.get()) { -7962 lock.readLock().unlock(); -7963 throw new NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is closed"); -7964} -7965// The unit for snapshot is a region. So, all stores for this region must be -7966// prepared for snapshot operation before proceeding. -7967if (op == Operation.SNAPSHOT) { -7968 stores.values().forEach(HStore::preSnapshotOperation); -7969} -7970try { -7971 if (coprocessorHost != null) { -7972 coprocessorHost.postStartRegionOperation(op); -7973 } -7974} catch (Exception e) { -7975 lock.readLock().unlock(); -7976 throw new IOException(e); -7977} -7978 } -7979 -7980 @Override -7981 public void closeRegionOperation() throws IOException { -7982 closeRegionOperation(Operation.ANY); -7983 } -7984 -7985 @Override -7986 public void closeRegionOperation(Operation operation) throws IOException { -7987if (operation == Operation.SNAPSHOT) { -7988 stores.values().forEach(HStore::postSnapshotOperation); -7989} -7990lock.readLock().unlock(); -7991if (coprocessorHost != null) { -7992 coprocessorHost.postCloseRegionOperation(operation); -7993} -7994 } -7995 -7996 /** -7997 * This method needs to be called before any public call that reads or -7998 * modifies stores in bulk. It has to be called just before a try. -7999 *
[01/51] [partial] hbase-site git commit: Published site at .
Repository: hbase-site Updated Branches: refs/heads/asf-site 6ca7e9400 -> 5a2158f21 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.RegionEnvironment.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.RegionEnvironment.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.RegionEnvironment.html index 8aeb6da..dbadad7 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.RegionEnvironment.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.RegionEnvironment.html @@ -99,1432 +99,1431 @@ 091 * Implements the coprocessor environment and runtime support for coprocessors 092 * loaded within a {@link Region}. 093 */ -094@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) -095@InterfaceStability.Evolving -096public class RegionCoprocessorHost -097extends CoprocessorHostRegionCoprocessor, RegionCoprocessorEnvironment { -098 -099 private static final Log LOG = LogFactory.getLog(RegionCoprocessorHost.class); -100 // The shared data map -101 private static final ReferenceMapString, ConcurrentMapString, Object SHARED_DATA_MAP = -102 new ReferenceMap(AbstractReferenceMap.ReferenceStrength.HARD, -103 AbstractReferenceMap.ReferenceStrength.WEAK); -104 -105 // optimization: no need to call postScannerFilterRow, if no coprocessor implements it -106 private final boolean hasCustomPostScannerFilterRow; -107 -108 /** -109 * -110 * Encapsulation of the environment of each coprocessor -111 */ -112 static class RegionEnvironment extends BaseEnvironmentRegionCoprocessor -113 implements RegionCoprocessorEnvironment { -114 -115private Region region; -116private RegionServerServices rsServices; -117ConcurrentMapString, Object sharedData; -118private final MetricRegistry metricRegistry; -119 -120/** -121 * Constructor -122 * @param impl the coprocessor instance -123 * @param priority chaining priority -124 */ -125public RegionEnvironment(final RegionCoprocessor impl, final int priority, -126final int seq, final Configuration conf, final Region region, -127final RegionServerServices services, final ConcurrentMapString, Object sharedData) { -128 super(impl, priority, seq, conf); -129 this.region = region; -130 this.rsServices = services; -131 this.sharedData = sharedData; -132 this.metricRegistry = -133 MetricsCoprocessor.createRegistryForRegionCoprocessor(impl.getClass().getName()); -134} -135 -136/** @return the region */ -137@Override -138public Region getRegion() { -139 return region; -140} -141 -142/** @return reference to the region server services */ -143@Override -144public CoprocessorRegionServerServices getCoprocessorRegionServerServices() { -145 return rsServices; -146} -147 -148@Override -149public void shutdown() { -150 super.shutdown(); -151 MetricsCoprocessor.removeRegistry(this.metricRegistry); -152} -153 -154@Override -155public ConcurrentMapString, Object getSharedData() { -156 return sharedData; -157} -158 -159@Override -160public RegionInfo getRegionInfo() { -161 return region.getRegionInfo(); -162} -163 -164@Override -165public MetricRegistry getMetricRegistryForRegionServer() { -166 return metricRegistry; -167} -168 } -169 -170 static class TableCoprocessorAttribute { -171private Path path; -172private String className; -173private int priority; -174private Configuration conf; -175 -176public TableCoprocessorAttribute(Path path, String className, int priority, -177Configuration conf) { -178 this.path = path; -179 this.className = className; -180 this.priority = priority; -181 this.conf = conf; -182} -183 -184public Path getPath() { -185 return path; -186} -187 -188public String getClassName() { -189 return className; -190} -191 -192public int getPriority() { -193 return priority; -194} -195 -196public Configuration getConf() { -197 return conf; -198} -199 } -200 -201 /** The region server services */ -202 RegionServerServices rsServices; -203 /** The region */ -204 HRegion region; -205 -206 /** -207 * Constructor -208 * @param region the region -209 * @param rsServices interface to available region server functionality -210 * @param conf the configuration -211 */ -212 public RegionCoprocessorHost(final HRegion region, -213 final RegionServerServices rsServices, final Configuration conf) { -214super(rsServices); -215this.conf = conf; -216this.rsServices = rsServices;
[23/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/Export.SecureWriter.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/Export.SecureWriter.html b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/Export.SecureWriter.html index 95f6ef8..45f0981 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/Export.SecureWriter.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/Export.SecureWriter.html @@ -61,487 +61,488 @@ 053import org.apache.hadoop.hbase.protobuf.ProtobufUtil; 054import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.DelegationToken; 055import org.apache.hadoop.hbase.protobuf.generated.ExportProtos; -056import org.apache.hadoop.hbase.regionserver.InternalScanner; -057import org.apache.hadoop.hbase.regionserver.Region; -058import org.apache.hadoop.hbase.regionserver.RegionScanner; -059import org.apache.hadoop.hbase.security.User; -060import org.apache.hadoop.hbase.security.UserProvider; -061import org.apache.hadoop.hbase.security.token.FsDelegationToken; -062import org.apache.hadoop.hbase.util.ArrayUtils; -063import org.apache.hadoop.hbase.util.ByteStringer; -064import org.apache.hadoop.hbase.util.Bytes; -065import org.apache.hadoop.hbase.util.Triple; -066import org.apache.hadoop.io.SequenceFile; -067import org.apache.hadoop.io.Text; -068import org.apache.hadoop.io.compress.CompressionCodec; -069import org.apache.hadoop.io.compress.DefaultCodec; -070import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; -071import org.apache.hadoop.security.token.Token; -072import org.apache.hadoop.util.GenericOptionsParser; -073import org.apache.hadoop.util.ReflectionUtils; -074import org.apache.yetus.audience.InterfaceAudience; -075import org.apache.yetus.audience.InterfaceStability; -076 -077import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting; -078 -079import com.google.protobuf.RpcCallback; -080import com.google.protobuf.RpcController; -081import com.google.protobuf.Service; -082 -083/** -084 * Export an HBase table. Writes content to sequence files up in HDFS. Use -085 * {@link Import} to read it back in again. It is implemented by the endpoint -086 * technique. -087 * -088 * @see org.apache.hadoop.hbase.mapreduce.Export -089 */ -090@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) -091@InterfaceStability.Evolving -092public class Export extends ExportProtos.ExportService implements RegionCoprocessor { -093 -094 private static final Log LOG = LogFactory.getLog(Export.class); -095 private static final Class? extends CompressionCodec DEFAULT_CODEC = DefaultCodec.class; -096 private static final SequenceFile.CompressionType DEFAULT_TYPE = SequenceFile.CompressionType.RECORD; -097 private RegionCoprocessorEnvironment env = null; -098 private UserProvider userProvider; -099 -100 public static void main(String[] args) throws Throwable { -101Mapbyte[], Response response = run(HBaseConfiguration.create(), args); -102System.exit(response == null ? -1 : 0); -103 } -104 -105 @VisibleForTesting -106 static Mapbyte[], Response run(final Configuration conf, final String[] args) throws Throwable { -107String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs(); -108if (!ExportUtils.isValidArguements(args)) { -109 ExportUtils.usage("Wrong number of arguments: " + ArrayUtils.length(otherArgs)); -110 return null; -111} -112TripleTableName, Scan, Path arguments = ExportUtils.getArgumentsFromCommandLine(conf, otherArgs); -113return run(conf, arguments.getFirst(), arguments.getSecond(), arguments.getThird()); -114 } -115 -116 public static Mapbyte[], Response run(final Configuration conf, TableName tableName, Scan scan, Path dir) throws Throwable { -117FileSystem fs = dir.getFileSystem(conf); -118UserProvider userProvider = UserProvider.instantiate(conf); -119checkDir(fs, dir); -120FsDelegationToken fsDelegationToken = new FsDelegationToken(userProvider, "renewer"); -121 fsDelegationToken.acquireDelegationToken(fs); -122try { -123 final ExportProtos.ExportRequest request = getConfiguredRequest(conf, dir, -124scan, fsDelegationToken.getUserToken()); -125 try (Connection con = ConnectionFactory.createConnection(conf); -126 Table table = con.getTable(tableName)) { -127Mapbyte[], Response result = new TreeMap(Bytes.BYTES_COMPARATOR); -128 table.coprocessorService(ExportProtos.ExportService.class, -129 scan.getStartRow(), -130 scan.getStopRow(), -131 (ExportProtos.ExportService service) - { -132ServerRpcController controller = new ServerRpcController(); -133Mapbyte[], ExportProtos.ExportResponse rval = new
[25/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/Export.Response.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/Export.Response.html b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/Export.Response.html index 95f6ef8..45f0981 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/Export.Response.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/Export.Response.html @@ -61,487 +61,488 @@ 053import org.apache.hadoop.hbase.protobuf.ProtobufUtil; 054import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.DelegationToken; 055import org.apache.hadoop.hbase.protobuf.generated.ExportProtos; -056import org.apache.hadoop.hbase.regionserver.InternalScanner; -057import org.apache.hadoop.hbase.regionserver.Region; -058import org.apache.hadoop.hbase.regionserver.RegionScanner; -059import org.apache.hadoop.hbase.security.User; -060import org.apache.hadoop.hbase.security.UserProvider; -061import org.apache.hadoop.hbase.security.token.FsDelegationToken; -062import org.apache.hadoop.hbase.util.ArrayUtils; -063import org.apache.hadoop.hbase.util.ByteStringer; -064import org.apache.hadoop.hbase.util.Bytes; -065import org.apache.hadoop.hbase.util.Triple; -066import org.apache.hadoop.io.SequenceFile; -067import org.apache.hadoop.io.Text; -068import org.apache.hadoop.io.compress.CompressionCodec; -069import org.apache.hadoop.io.compress.DefaultCodec; -070import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; -071import org.apache.hadoop.security.token.Token; -072import org.apache.hadoop.util.GenericOptionsParser; -073import org.apache.hadoop.util.ReflectionUtils; -074import org.apache.yetus.audience.InterfaceAudience; -075import org.apache.yetus.audience.InterfaceStability; -076 -077import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting; -078 -079import com.google.protobuf.RpcCallback; -080import com.google.protobuf.RpcController; -081import com.google.protobuf.Service; -082 -083/** -084 * Export an HBase table. Writes content to sequence files up in HDFS. Use -085 * {@link Import} to read it back in again. It is implemented by the endpoint -086 * technique. -087 * -088 * @see org.apache.hadoop.hbase.mapreduce.Export -089 */ -090@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) -091@InterfaceStability.Evolving -092public class Export extends ExportProtos.ExportService implements RegionCoprocessor { -093 -094 private static final Log LOG = LogFactory.getLog(Export.class); -095 private static final Class? extends CompressionCodec DEFAULT_CODEC = DefaultCodec.class; -096 private static final SequenceFile.CompressionType DEFAULT_TYPE = SequenceFile.CompressionType.RECORD; -097 private RegionCoprocessorEnvironment env = null; -098 private UserProvider userProvider; -099 -100 public static void main(String[] args) throws Throwable { -101Mapbyte[], Response response = run(HBaseConfiguration.create(), args); -102System.exit(response == null ? -1 : 0); -103 } -104 -105 @VisibleForTesting -106 static Mapbyte[], Response run(final Configuration conf, final String[] args) throws Throwable { -107String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs(); -108if (!ExportUtils.isValidArguements(args)) { -109 ExportUtils.usage("Wrong number of arguments: " + ArrayUtils.length(otherArgs)); -110 return null; -111} -112TripleTableName, Scan, Path arguments = ExportUtils.getArgumentsFromCommandLine(conf, otherArgs); -113return run(conf, arguments.getFirst(), arguments.getSecond(), arguments.getThird()); -114 } -115 -116 public static Mapbyte[], Response run(final Configuration conf, TableName tableName, Scan scan, Path dir) throws Throwable { -117FileSystem fs = dir.getFileSystem(conf); -118UserProvider userProvider = UserProvider.instantiate(conf); -119checkDir(fs, dir); -120FsDelegationToken fsDelegationToken = new FsDelegationToken(userProvider, "renewer"); -121 fsDelegationToken.acquireDelegationToken(fs); -122try { -123 final ExportProtos.ExportRequest request = getConfiguredRequest(conf, dir, -124scan, fsDelegationToken.getUserToken()); -125 try (Connection con = ConnectionFactory.createConnection(conf); -126 Table table = con.getTable(tableName)) { -127Mapbyte[], Response result = new TreeMap(Bytes.BYTES_COMPARATOR); -128 table.coprocessorService(ExportProtos.ExportService.class, -129 scan.getStartRow(), -130 scan.getStopRow(), -131 (ExportProtos.ExportService service) - { -132ServerRpcController controller = new ServerRpcController(); -133Mapbyte[], ExportProtos.ExportResponse rval = new TreeMap(Bytes.BYTES_COMPARATOR); -134
[12/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html index 5d138ea..81d256e 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html @@ -2387,7 +2387,7 @@ 2379 return true; 2380} 2381long modifiedFlushCheckInterval = flushCheckInterval; -2382if (getRegionInfo().isSystemTable() +2382if (getRegionInfo().getTable().isSystemTable() 2383getRegionInfo().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) { 2384 modifiedFlushCheckInterval = SYSTEM_CACHE_FLUSH_INTERVAL; 2385} @@ -7869,7 +7869,7 @@ 7861 */ 7862 public byte[] checkSplit() { 7863// Can't split META -7864if (this.getRegionInfo().isMetaTable() || +7864if (this.getRegionInfo().isMetaRegion() || 7865 TableName.NAMESPACE_TABLE_NAME.equals(this.getRegionInfo().getTable())) { 7866 if (shouldForceSplit()) { 7867LOG.warn("Cannot split meta region in HBase 0.20 and above"); @@ -7914,374 +7914,372 @@ 7906 } 7907 7908 /** @return the coprocessor host */ -7909 @Override -7910 public RegionCoprocessorHost getCoprocessorHost() { -7911return coprocessorHost; -7912 } -7913 -7914 /** @param coprocessorHost the new coprocessor host */ -7915 public void setCoprocessorHost(final RegionCoprocessorHost coprocessorHost) { -7916this.coprocessorHost = coprocessorHost; -7917 } -7918 -7919 @Override -7920 public void startRegionOperation() throws IOException { -7921 startRegionOperation(Operation.ANY); -7922 } -7923 -7924 @Override -7925 @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="SF_SWITCH_FALLTHROUGH", -7926justification="Intentional") -7927 public void startRegionOperation(Operation op) throws IOException { -7928switch (op) { -7929 case GET: // read operations -7930 case SCAN: -7931checkReadsEnabled(); -7932 case INCREMENT: // write operations -7933 case APPEND: -7934 case SPLIT_REGION: -7935 case MERGE_REGION: -7936 case PUT: -7937 case DELETE: -7938 case BATCH_MUTATE: -7939 case COMPACT_REGION: -7940 case SNAPSHOT: -7941// when a region is in recovering state, no read, split, merge or snapshot is allowed -7942if (isRecovering() (this.disallowWritesInRecovering || -7943 (op != Operation.PUT op != Operation.DELETE op != Operation.BATCH_MUTATE))) { -7944 throw new RegionInRecoveryException(getRegionInfo().getRegionNameAsString() + -7945" is recovering; cannot take reads"); -7946} -7947break; -7948 default: -7949break; -7950} -7951if (op == Operation.MERGE_REGION || op == Operation.SPLIT_REGION -7952|| op == Operation.COMPACT_REGION) { -7953 // split, merge or compact region doesn't need to check the closing/closed state or lock the -7954 // region -7955 return; -7956} -7957if (this.closing.get()) { -7958 throw new NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is closing"); -7959} -7960lock(lock.readLock()); -7961if (this.closed.get()) { -7962 lock.readLock().unlock(); -7963 throw new NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is closed"); -7964} -7965// The unit for snapshot is a region. So, all stores for this region must be -7966// prepared for snapshot operation before proceeding. -7967if (op == Operation.SNAPSHOT) { -7968 stores.values().forEach(HStore::preSnapshotOperation); -7969} -7970try { -7971 if (coprocessorHost != null) { -7972 coprocessorHost.postStartRegionOperation(op); -7973 } -7974} catch (Exception e) { -7975 lock.readLock().unlock(); -7976 throw new IOException(e); -7977} -7978 } -7979 -7980 @Override -7981 public void closeRegionOperation() throws IOException { -7982 closeRegionOperation(Operation.ANY); -7983 } -7984 -7985 @Override -7986 public void closeRegionOperation(Operation operation) throws IOException { -7987if (operation == Operation.SNAPSHOT) { -7988 stores.values().forEach(HStore::postSnapshotOperation); -7989} -7990lock.readLock().unlock(); -7991if (coprocessorHost != null) { -7992 coprocessorHost.postCloseRegionOperation(operation); -7993} -7994 } -7995 -7996 /** -7997 * This method needs to be called before any public call that reads or -7998 * modifies stores in bulk. It has to be called just before a try.
[13/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html index 5d138ea..81d256e 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html @@ -2387,7 +2387,7 @@ 2379 return true; 2380} 2381long modifiedFlushCheckInterval = flushCheckInterval; -2382if (getRegionInfo().isSystemTable() +2382if (getRegionInfo().getTable().isSystemTable() 2383getRegionInfo().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) { 2384 modifiedFlushCheckInterval = SYSTEM_CACHE_FLUSH_INTERVAL; 2385} @@ -7869,7 +7869,7 @@ 7861 */ 7862 public byte[] checkSplit() { 7863// Can't split META -7864if (this.getRegionInfo().isMetaTable() || +7864if (this.getRegionInfo().isMetaRegion() || 7865 TableName.NAMESPACE_TABLE_NAME.equals(this.getRegionInfo().getTable())) { 7866 if (shouldForceSplit()) { 7867LOG.warn("Cannot split meta region in HBase 0.20 and above"); @@ -7914,374 +7914,372 @@ 7906 } 7907 7908 /** @return the coprocessor host */ -7909 @Override -7910 public RegionCoprocessorHost getCoprocessorHost() { -7911return coprocessorHost; -7912 } -7913 -7914 /** @param coprocessorHost the new coprocessor host */ -7915 public void setCoprocessorHost(final RegionCoprocessorHost coprocessorHost) { -7916this.coprocessorHost = coprocessorHost; -7917 } -7918 -7919 @Override -7920 public void startRegionOperation() throws IOException { -7921 startRegionOperation(Operation.ANY); -7922 } -7923 -7924 @Override -7925 @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="SF_SWITCH_FALLTHROUGH", -7926justification="Intentional") -7927 public void startRegionOperation(Operation op) throws IOException { -7928switch (op) { -7929 case GET: // read operations -7930 case SCAN: -7931checkReadsEnabled(); -7932 case INCREMENT: // write operations -7933 case APPEND: -7934 case SPLIT_REGION: -7935 case MERGE_REGION: -7936 case PUT: -7937 case DELETE: -7938 case BATCH_MUTATE: -7939 case COMPACT_REGION: -7940 case SNAPSHOT: -7941// when a region is in recovering state, no read, split, merge or snapshot is allowed -7942if (isRecovering() (this.disallowWritesInRecovering || -7943 (op != Operation.PUT op != Operation.DELETE op != Operation.BATCH_MUTATE))) { -7944 throw new RegionInRecoveryException(getRegionInfo().getRegionNameAsString() + -7945" is recovering; cannot take reads"); -7946} -7947break; -7948 default: -7949break; -7950} -7951if (op == Operation.MERGE_REGION || op == Operation.SPLIT_REGION -7952|| op == Operation.COMPACT_REGION) { -7953 // split, merge or compact region doesn't need to check the closing/closed state or lock the -7954 // region -7955 return; -7956} -7957if (this.closing.get()) { -7958 throw new NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is closing"); -7959} -7960lock(lock.readLock()); -7961if (this.closed.get()) { -7962 lock.readLock().unlock(); -7963 throw new NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is closed"); -7964} -7965// The unit for snapshot is a region. So, all stores for this region must be -7966// prepared for snapshot operation before proceeding. -7967if (op == Operation.SNAPSHOT) { -7968 stores.values().forEach(HStore::preSnapshotOperation); -7969} -7970try { -7971 if (coprocessorHost != null) { -7972 coprocessorHost.postStartRegionOperation(op); -7973 } -7974} catch (Exception e) { -7975 lock.readLock().unlock(); -7976 throw new IOException(e); -7977} -7978 } -7979 -7980 @Override -7981 public void closeRegionOperation() throws IOException { -7982 closeRegionOperation(Operation.ANY); -7983 } -7984 -7985 @Override -7986 public void closeRegionOperation(Operation operation) throws IOException { -7987if (operation == Operation.SNAPSHOT) { -7988 stores.values().forEach(HStore::postSnapshotOperation); -7989} -7990lock.readLock().unlock(); -7991if (coprocessorHost != null) { -7992 coprocessorHost.postCloseRegionOperation(operation); -7993} -7994 } -7995 -7996 /** -7997 * This method needs to be called before any public call that reads or -7998 * modifies stores in bulk. It has to be called just before a
[15/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatch.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatch.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatch.html index 5d138ea..81d256e 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatch.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatch.html @@ -2387,7 +2387,7 @@ 2379 return true; 2380} 2381long modifiedFlushCheckInterval = flushCheckInterval; -2382if (getRegionInfo().isSystemTable() +2382if (getRegionInfo().getTable().isSystemTable() 2383getRegionInfo().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) { 2384 modifiedFlushCheckInterval = SYSTEM_CACHE_FLUSH_INTERVAL; 2385} @@ -7869,7 +7869,7 @@ 7861 */ 7862 public byte[] checkSplit() { 7863// Can't split META -7864if (this.getRegionInfo().isMetaTable() || +7864if (this.getRegionInfo().isMetaRegion() || 7865 TableName.NAMESPACE_TABLE_NAME.equals(this.getRegionInfo().getTable())) { 7866 if (shouldForceSplit()) { 7867LOG.warn("Cannot split meta region in HBase 0.20 and above"); @@ -7914,374 +7914,372 @@ 7906 } 7907 7908 /** @return the coprocessor host */ -7909 @Override -7910 public RegionCoprocessorHost getCoprocessorHost() { -7911return coprocessorHost; -7912 } -7913 -7914 /** @param coprocessorHost the new coprocessor host */ -7915 public void setCoprocessorHost(final RegionCoprocessorHost coprocessorHost) { -7916this.coprocessorHost = coprocessorHost; -7917 } -7918 -7919 @Override -7920 public void startRegionOperation() throws IOException { -7921 startRegionOperation(Operation.ANY); -7922 } -7923 -7924 @Override -7925 @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="SF_SWITCH_FALLTHROUGH", -7926justification="Intentional") -7927 public void startRegionOperation(Operation op) throws IOException { -7928switch (op) { -7929 case GET: // read operations -7930 case SCAN: -7931checkReadsEnabled(); -7932 case INCREMENT: // write operations -7933 case APPEND: -7934 case SPLIT_REGION: -7935 case MERGE_REGION: -7936 case PUT: -7937 case DELETE: -7938 case BATCH_MUTATE: -7939 case COMPACT_REGION: -7940 case SNAPSHOT: -7941// when a region is in recovering state, no read, split, merge or snapshot is allowed -7942if (isRecovering() (this.disallowWritesInRecovering || -7943 (op != Operation.PUT op != Operation.DELETE op != Operation.BATCH_MUTATE))) { -7944 throw new RegionInRecoveryException(getRegionInfo().getRegionNameAsString() + -7945" is recovering; cannot take reads"); -7946} -7947break; -7948 default: -7949break; -7950} -7951if (op == Operation.MERGE_REGION || op == Operation.SPLIT_REGION -7952|| op == Operation.COMPACT_REGION) { -7953 // split, merge or compact region doesn't need to check the closing/closed state or lock the -7954 // region -7955 return; -7956} -7957if (this.closing.get()) { -7958 throw new NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is closing"); -7959} -7960lock(lock.readLock()); -7961if (this.closed.get()) { -7962 lock.readLock().unlock(); -7963 throw new NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is closed"); -7964} -7965// The unit for snapshot is a region. So, all stores for this region must be -7966// prepared for snapshot operation before proceeding. -7967if (op == Operation.SNAPSHOT) { -7968 stores.values().forEach(HStore::preSnapshotOperation); -7969} -7970try { -7971 if (coprocessorHost != null) { -7972 coprocessorHost.postStartRegionOperation(op); -7973 } -7974} catch (Exception e) { -7975 lock.readLock().unlock(); -7976 throw new IOException(e); -7977} -7978 } -7979 -7980 @Override -7981 public void closeRegionOperation() throws IOException { -7982 closeRegionOperation(Operation.ANY); -7983 } -7984 -7985 @Override -7986 public void closeRegionOperation(Operation operation) throws IOException { -7987if (operation == Operation.SNAPSHOT) { -7988 stores.values().forEach(HStore::postSnapshotOperation); -7989} -7990lock.readLock().unlock(); -7991if (coprocessorHost != null) { -7992 coprocessorHost.postCloseRegionOperation(operation); -7993} -7994 } -7995 -7996 /** -7997 * This method needs to be called before any public call that reads or -7998 * modifies stores in bulk. It has to be called just before a try. -7999 *
[40/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.html b/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.html index 3102325..ddc6dc9 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.html @@ -114,9 +114,8 @@ var activeTableTab = "activeTableTab"; -@InterfaceAudience.LimitedPrivate(value="Coprocesssor") - @InterfaceStability.Evolving -public class RegionCoprocessorHost +@InterfaceAudience.Private +public class RegionCoprocessorHost extends CoprocessorHostRegionCoprocessor,RegionCoprocessorEnvironment Implements the coprocessor environment and runtime support for coprocessors loaded within a Region. @@ -736,7 +735,7 @@ extends LOG -private static finalorg.apache.commons.logging.Log LOG +private static finalorg.apache.commons.logging.Log LOG @@ -745,7 +744,7 @@ extends SHARED_DATA_MAP -private static finalorg.apache.commons.collections4.map.ReferenceMaphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentMap.html?is-external=true; title="class or interface in java.util.concurrent">ConcurrentMaphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object SHARED_DATA_MAP +private static finalorg.apache.commons.collections4.map.ReferenceMaphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentMap.html?is-external=true; title="class or interface in java.util.concurrent">ConcurrentMaphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object SHARED_DATA_MAP @@ -754,7 +753,7 @@ extends hasCustomPostScannerFilterRow -private finalboolean hasCustomPostScannerFilterRow +private finalboolean hasCustomPostScannerFilterRow @@ -763,7 +762,7 @@ extends rsServices -RegionServerServices rsServices +RegionServerServices rsServices The region server services @@ -773,7 +772,7 @@ extends region -HRegion region +HRegion region The region @@ -783,7 +782,7 @@ extends regionObserverGetter -privateCoprocessorHost.ObserverGetterRegionCoprocessor,RegionObserver regionObserverGetter +privateCoprocessorHost.ObserverGetterRegionCoprocessor,RegionObserver regionObserverGetter @@ -792,7 +791,7 @@ extends endpointObserverGetter -privateCoprocessorHost.ObserverGetterRegionCoprocessor,EndpointObserver endpointObserverGetter +privateCoprocessorHost.ObserverGetterRegionCoprocessor,EndpointObserver endpointObserverGetter @@ -809,7 +808,7 @@ extends RegionCoprocessorHost -publicRegionCoprocessorHost(HRegionregion, +publicRegionCoprocessorHost(HRegionregion, RegionServerServicesrsServices, org.apache.hadoop.conf.Configurationconf) Constructor @@ -835,7 +834,7 @@ extends getTableCoprocessorAttrsFromSchema -statichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListRegionCoprocessorHost.TableCoprocessorAttributegetTableCoprocessorAttrsFromSchema(org.apache.hadoop.conf.Configurationconf, +statichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListRegionCoprocessorHost.TableCoprocessorAttributegetTableCoprocessorAttrsFromSchema(org.apache.hadoop.conf.Configurationconf, TableDescriptorhtd) @@ -845,7 +844,7 @@ extends testTableCoprocessorAttrs -public staticvoidtestTableCoprocessorAttrs(org.apache.hadoop.conf.Configurationconf, +public staticvoidtestTableCoprocessorAttrs(org.apache.hadoop.conf.Configurationconf, TableDescriptorhtd) throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException Sanity check the table coprocessor attributes of the supplied schema. Will @@ -865,7 +864,7 @@ extends loadTableCoprocessors
[28/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.html b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.html index ed99dd8..cb1b7e5 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.html @@ -66,670 +66,669 @@ 058 * @param E type of specific coprocessor environment this host requires. 059 * provides 060 */ -061@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) -062@InterfaceStability.Evolving -063public abstract class CoprocessorHostC extends Coprocessor, E extends CoprocessorEnvironmentC { -064 public static final String REGION_COPROCESSOR_CONF_KEY = -065 "hbase.coprocessor.region.classes"; -066 public static final String REGIONSERVER_COPROCESSOR_CONF_KEY = -067 "hbase.coprocessor.regionserver.classes"; -068 public static final String USER_REGION_COPROCESSOR_CONF_KEY = -069 "hbase.coprocessor.user.region.classes"; -070 public static final String MASTER_COPROCESSOR_CONF_KEY = -071 "hbase.coprocessor.master.classes"; -072 public static final String WAL_COPROCESSOR_CONF_KEY = -073"hbase.coprocessor.wal.classes"; -074 public static final String ABORT_ON_ERROR_KEY = "hbase.coprocessor.abortonerror"; -075 public static final boolean DEFAULT_ABORT_ON_ERROR = true; -076 public static final String COPROCESSORS_ENABLED_CONF_KEY = "hbase.coprocessor.enabled"; -077 public static final boolean DEFAULT_COPROCESSORS_ENABLED = true; -078 public static final String USER_COPROCESSORS_ENABLED_CONF_KEY = -079"hbase.coprocessor.user.enabled"; -080 public static final boolean DEFAULT_USER_COPROCESSORS_ENABLED = true; -081 -082 private static final Log LOG = LogFactory.getLog(CoprocessorHost.class); -083 protected Abortable abortable; -084 /** Ordered set of loaded coprocessors with lock */ -085 protected final SortedListE coprocEnvironments = -086 new SortedList(new EnvironmentPriorityComparator()); -087 protected Configuration conf; -088 // unique file prefix to use for local copies of jars when classloading -089 protected String pathPrefix; -090 protected AtomicInteger loadSequence = new AtomicInteger(); -091 -092 public CoprocessorHost(Abortable abortable) { -093this.abortable = abortable; -094this.pathPrefix = UUID.randomUUID().toString(); -095 } -096 -097 /** -098 * Not to be confused with the per-object _coprocessors_ (above), -099 * coprocessorNames is static and stores the set of all coprocessors ever -100 * loaded by any thread in this JVM. It is strictly additive: coprocessors are -101 * added to coprocessorNames, by checkAndLoadInstance() but are never removed, since -102 * the intention is to preserve a history of all loaded coprocessors for -103 * diagnosis in case of server crash (HBASE-4014). -104 */ -105 private static SetString coprocessorNames = -106 Collections.synchronizedSet(new HashSetString()); -107 -108 public static SetString getLoadedCoprocessors() { -109synchronized (coprocessorNames) { -110 return new HashSet(coprocessorNames); -111} -112 } -113 -114 /** -115 * Used to create a parameter to the HServerLoad constructor so that -116 * HServerLoad can provide information about the coprocessors loaded by this -117 * regionserver. -118 * (HBASE-4070: Improve region server metrics to report loaded coprocessors -119 * to master). -120 */ -121 public SetString getCoprocessors() { -122SetString returnValue = new TreeSet(); -123for (E e: coprocEnvironments) { -124 returnValue.add(e.getInstance().getClass().getSimpleName()); -125} -126return returnValue; -127 } -128 -129 /** -130 * Load system coprocessors once only. Read the class names from configuration. -131 * Called by constructor. -132 */ -133 protected void loadSystemCoprocessors(Configuration conf, String confKey) { -134boolean coprocessorsEnabled = conf.getBoolean(COPROCESSORS_ENABLED_CONF_KEY, -135 DEFAULT_COPROCESSORS_ENABLED); -136if (!coprocessorsEnabled) { -137 return; -138} -139 -140Class? implClass; -141 -142// load default coprocessors from configure file -143String[] defaultCPClasses = conf.getStrings(confKey); -144if (defaultCPClasses == null || defaultCPClasses.length == 0) -145 return; -146 -147int priority = Coprocessor.PRIORITY_SYSTEM; -148for (String className : defaultCPClasses) { -149 className = className.trim(); -150 if (findCoprocessor(className) != null) { -151// If already loaded will just continue -152LOG.warn("Attempted duplicate loading of " + className +
[49/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/book.html -- diff --git a/book.html b/book.html index e75c297..2a4a579 100644 --- a/book.html +++ b/book.html @@ -35225,7 +35225,7 @@ The server will return cellblocks compressed using this same compressor as long Version 3.0.0-SNAPSHOT -Last updated 2017-10-14 14:29:38 UTC +Last updated 2017-10-15 14:29:42 UTC http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/bulk-loads.html -- diff --git a/bulk-loads.html b/bulk-loads.html index 0d572b0..24eecdd 100644 --- a/bulk-loads.html +++ b/bulk-loads.html @@ -7,7 +7,7 @@ - + Apache HBase Bulk Loads in Apache HBase (TM) @@ -311,7 +311,7 @@ under the License. --> https://www.apache.org/;>The Apache Software Foundation. All rights reserved. - Last Published: 2017-10-14 + Last Published: 2017-10-15
[19/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html index 5d138ea..81d256e 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html @@ -2387,7 +2387,7 @@ 2379 return true; 2380} 2381long modifiedFlushCheckInterval = flushCheckInterval; -2382if (getRegionInfo().isSystemTable() +2382if (getRegionInfo().getTable().isSystemTable() 2383getRegionInfo().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) { 2384 modifiedFlushCheckInterval = SYSTEM_CACHE_FLUSH_INTERVAL; 2385} @@ -7869,7 +7869,7 @@ 7861 */ 7862 public byte[] checkSplit() { 7863// Can't split META -7864if (this.getRegionInfo().isMetaTable() || +7864if (this.getRegionInfo().isMetaRegion() || 7865 TableName.NAMESPACE_TABLE_NAME.equals(this.getRegionInfo().getTable())) { 7866 if (shouldForceSplit()) { 7867LOG.warn("Cannot split meta region in HBase 0.20 and above"); @@ -7914,374 +7914,372 @@ 7906 } 7907 7908 /** @return the coprocessor host */ -7909 @Override -7910 public RegionCoprocessorHost getCoprocessorHost() { -7911return coprocessorHost; -7912 } -7913 -7914 /** @param coprocessorHost the new coprocessor host */ -7915 public void setCoprocessorHost(final RegionCoprocessorHost coprocessorHost) { -7916this.coprocessorHost = coprocessorHost; -7917 } -7918 -7919 @Override -7920 public void startRegionOperation() throws IOException { -7921 startRegionOperation(Operation.ANY); -7922 } -7923 -7924 @Override -7925 @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="SF_SWITCH_FALLTHROUGH", -7926justification="Intentional") -7927 public void startRegionOperation(Operation op) throws IOException { -7928switch (op) { -7929 case GET: // read operations -7930 case SCAN: -7931checkReadsEnabled(); -7932 case INCREMENT: // write operations -7933 case APPEND: -7934 case SPLIT_REGION: -7935 case MERGE_REGION: -7936 case PUT: -7937 case DELETE: -7938 case BATCH_MUTATE: -7939 case COMPACT_REGION: -7940 case SNAPSHOT: -7941// when a region is in recovering state, no read, split, merge or snapshot is allowed -7942if (isRecovering() (this.disallowWritesInRecovering || -7943 (op != Operation.PUT op != Operation.DELETE op != Operation.BATCH_MUTATE))) { -7944 throw new RegionInRecoveryException(getRegionInfo().getRegionNameAsString() + -7945" is recovering; cannot take reads"); -7946} -7947break; -7948 default: -7949break; -7950} -7951if (op == Operation.MERGE_REGION || op == Operation.SPLIT_REGION -7952|| op == Operation.COMPACT_REGION) { -7953 // split, merge or compact region doesn't need to check the closing/closed state or lock the -7954 // region -7955 return; -7956} -7957if (this.closing.get()) { -7958 throw new NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is closing"); -7959} -7960lock(lock.readLock()); -7961if (this.closed.get()) { -7962 lock.readLock().unlock(); -7963 throw new NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is closed"); -7964} -7965// The unit for snapshot is a region. So, all stores for this region must be -7966// prepared for snapshot operation before proceeding. -7967if (op == Operation.SNAPSHOT) { -7968 stores.values().forEach(HStore::preSnapshotOperation); -7969} -7970try { -7971 if (coprocessorHost != null) { -7972 coprocessorHost.postStartRegionOperation(op); -7973 } -7974} catch (Exception e) { -7975 lock.readLock().unlock(); -7976 throw new IOException(e); -7977} -7978 } -7979 -7980 @Override -7981 public void closeRegionOperation() throws IOException { -7982 closeRegionOperation(Operation.ANY); -7983 } -7984 -7985 @Override -7986 public void closeRegionOperation(Operation operation) throws IOException { -7987if (operation == Operation.SNAPSHOT) { -7988 stores.values().forEach(HStore::postSnapshotOperation); -7989} -7990lock.readLock().unlock(); -7991if (coprocessorHost != null) { -7992 coprocessorHost.postCloseRegionOperation(operation); -7993} -7994 } -7995 -7996 /** -7997 * This method needs to be called before any public call that reads or -7998 * modifies stores in bulk. It has to be called just before a try. -7999
[10/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html index 5d138ea..81d256e 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html @@ -2387,7 +2387,7 @@ 2379 return true; 2380} 2381long modifiedFlushCheckInterval = flushCheckInterval; -2382if (getRegionInfo().isSystemTable() +2382if (getRegionInfo().getTable().isSystemTable() 2383getRegionInfo().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) { 2384 modifiedFlushCheckInterval = SYSTEM_CACHE_FLUSH_INTERVAL; 2385} @@ -7869,7 +7869,7 @@ 7861 */ 7862 public byte[] checkSplit() { 7863// Can't split META -7864if (this.getRegionInfo().isMetaTable() || +7864if (this.getRegionInfo().isMetaRegion() || 7865 TableName.NAMESPACE_TABLE_NAME.equals(this.getRegionInfo().getTable())) { 7866 if (shouldForceSplit()) { 7867LOG.warn("Cannot split meta region in HBase 0.20 and above"); @@ -7914,374 +7914,372 @@ 7906 } 7907 7908 /** @return the coprocessor host */ -7909 @Override -7910 public RegionCoprocessorHost getCoprocessorHost() { -7911return coprocessorHost; -7912 } -7913 -7914 /** @param coprocessorHost the new coprocessor host */ -7915 public void setCoprocessorHost(final RegionCoprocessorHost coprocessorHost) { -7916this.coprocessorHost = coprocessorHost; -7917 } -7918 -7919 @Override -7920 public void startRegionOperation() throws IOException { -7921 startRegionOperation(Operation.ANY); -7922 } -7923 -7924 @Override -7925 @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="SF_SWITCH_FALLTHROUGH", -7926justification="Intentional") -7927 public void startRegionOperation(Operation op) throws IOException { -7928switch (op) { -7929 case GET: // read operations -7930 case SCAN: -7931checkReadsEnabled(); -7932 case INCREMENT: // write operations -7933 case APPEND: -7934 case SPLIT_REGION: -7935 case MERGE_REGION: -7936 case PUT: -7937 case DELETE: -7938 case BATCH_MUTATE: -7939 case COMPACT_REGION: -7940 case SNAPSHOT: -7941// when a region is in recovering state, no read, split, merge or snapshot is allowed -7942if (isRecovering() (this.disallowWritesInRecovering || -7943 (op != Operation.PUT op != Operation.DELETE op != Operation.BATCH_MUTATE))) { -7944 throw new RegionInRecoveryException(getRegionInfo().getRegionNameAsString() + -7945" is recovering; cannot take reads"); -7946} -7947break; -7948 default: -7949break; -7950} -7951if (op == Operation.MERGE_REGION || op == Operation.SPLIT_REGION -7952|| op == Operation.COMPACT_REGION) { -7953 // split, merge or compact region doesn't need to check the closing/closed state or lock the -7954 // region -7955 return; -7956} -7957if (this.closing.get()) { -7958 throw new NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is closing"); -7959} -7960lock(lock.readLock()); -7961if (this.closed.get()) { -7962 lock.readLock().unlock(); -7963 throw new NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is closed"); -7964} -7965// The unit for snapshot is a region. So, all stores for this region must be -7966// prepared for snapshot operation before proceeding. -7967if (op == Operation.SNAPSHOT) { -7968 stores.values().forEach(HStore::preSnapshotOperation); -7969} -7970try { -7971 if (coprocessorHost != null) { -7972 coprocessorHost.postStartRegionOperation(op); -7973 } -7974} catch (Exception e) { -7975 lock.readLock().unlock(); -7976 throw new IOException(e); -7977} -7978 } -7979 -7980 @Override -7981 public void closeRegionOperation() throws IOException { -7982 closeRegionOperation(Operation.ANY); -7983 } -7984 -7985 @Override -7986 public void closeRegionOperation(Operation operation) throws IOException { -7987if (operation == Operation.SNAPSHOT) { -7988 stores.values().forEach(HStore::postSnapshotOperation); -7989} -7990lock.readLock().unlock(); -7991if (coprocessorHost != null) { -7992 coprocessorHost.postCloseRegionOperation(operation); -7993} -7994 } -7995 -7996 /** -7997 * This method needs to be called before any public call that reads or -7998 * modifies stores in bulk. It has to be called just before a try. -7999 *
[32/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverGetter.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverGetter.html b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverGetter.html index ed99dd8..cb1b7e5 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverGetter.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverGetter.html @@ -66,670 +66,669 @@ 058 * @param E type of specific coprocessor environment this host requires. 059 * provides 060 */ -061@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) -062@InterfaceStability.Evolving -063public abstract class CoprocessorHostC extends Coprocessor, E extends CoprocessorEnvironmentC { -064 public static final String REGION_COPROCESSOR_CONF_KEY = -065 "hbase.coprocessor.region.classes"; -066 public static final String REGIONSERVER_COPROCESSOR_CONF_KEY = -067 "hbase.coprocessor.regionserver.classes"; -068 public static final String USER_REGION_COPROCESSOR_CONF_KEY = -069 "hbase.coprocessor.user.region.classes"; -070 public static final String MASTER_COPROCESSOR_CONF_KEY = -071 "hbase.coprocessor.master.classes"; -072 public static final String WAL_COPROCESSOR_CONF_KEY = -073"hbase.coprocessor.wal.classes"; -074 public static final String ABORT_ON_ERROR_KEY = "hbase.coprocessor.abortonerror"; -075 public static final boolean DEFAULT_ABORT_ON_ERROR = true; -076 public static final String COPROCESSORS_ENABLED_CONF_KEY = "hbase.coprocessor.enabled"; -077 public static final boolean DEFAULT_COPROCESSORS_ENABLED = true; -078 public static final String USER_COPROCESSORS_ENABLED_CONF_KEY = -079"hbase.coprocessor.user.enabled"; -080 public static final boolean DEFAULT_USER_COPROCESSORS_ENABLED = true; -081 -082 private static final Log LOG = LogFactory.getLog(CoprocessorHost.class); -083 protected Abortable abortable; -084 /** Ordered set of loaded coprocessors with lock */ -085 protected final SortedListE coprocEnvironments = -086 new SortedList(new EnvironmentPriorityComparator()); -087 protected Configuration conf; -088 // unique file prefix to use for local copies of jars when classloading -089 protected String pathPrefix; -090 protected AtomicInteger loadSequence = new AtomicInteger(); -091 -092 public CoprocessorHost(Abortable abortable) { -093this.abortable = abortable; -094this.pathPrefix = UUID.randomUUID().toString(); -095 } -096 -097 /** -098 * Not to be confused with the per-object _coprocessors_ (above), -099 * coprocessorNames is static and stores the set of all coprocessors ever -100 * loaded by any thread in this JVM. It is strictly additive: coprocessors are -101 * added to coprocessorNames, by checkAndLoadInstance() but are never removed, since -102 * the intention is to preserve a history of all loaded coprocessors for -103 * diagnosis in case of server crash (HBASE-4014). -104 */ -105 private static SetString coprocessorNames = -106 Collections.synchronizedSet(new HashSetString()); -107 -108 public static SetString getLoadedCoprocessors() { -109synchronized (coprocessorNames) { -110 return new HashSet(coprocessorNames); -111} -112 } -113 -114 /** -115 * Used to create a parameter to the HServerLoad constructor so that -116 * HServerLoad can provide information about the coprocessors loaded by this -117 * regionserver. -118 * (HBASE-4070: Improve region server metrics to report loaded coprocessors -119 * to master). -120 */ -121 public SetString getCoprocessors() { -122SetString returnValue = new TreeSet(); -123for (E e: coprocEnvironments) { -124 returnValue.add(e.getInstance().getClass().getSimpleName()); -125} -126return returnValue; -127 } -128 -129 /** -130 * Load system coprocessors once only. Read the class names from configuration. -131 * Called by constructor. -132 */ -133 protected void loadSystemCoprocessors(Configuration conf, String confKey) { -134boolean coprocessorsEnabled = conf.getBoolean(COPROCESSORS_ENABLED_CONF_KEY, -135 DEFAULT_COPROCESSORS_ENABLED); -136if (!coprocessorsEnabled) { -137 return; -138} -139 -140Class? implClass; -141 -142// load default coprocessors from configure file -143String[] defaultCPClasses = conf.getStrings(confKey); -144if (defaultCPClasses == null || defaultCPClasses.length == 0) -145 return; -146 -147int priority = Coprocessor.PRIORITY_SYSTEM; -148for (String className : defaultCPClasses) { -149 className = className.trim(); -150 if (findCoprocessor(className) != null) { -151// If already loaded will just
[03/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ImmutableSegment.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ImmutableSegment.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ImmutableSegment.html index c8cdcb6..166edcf 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ImmutableSegment.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ImmutableSegment.html @@ -43,68 +43,47 @@ 035@InterfaceAudience.Private 036public abstract class ImmutableSegment extends Segment { 037 -038 public static final long DEEP_OVERHEAD = Segment.DEEP_OVERHEAD -039 + ClassSize.align(ClassSize.REFERENCE // Referent to timeRange -040 + ClassSize.TIMERANGE); -041 -042 /** -043 * This is an immutable segment so use the read-only TimeRange rather than the heavy-weight -044 * TimeRangeTracker with all its synchronization when doing time range stuff. -045 */ -046 private final TimeRange timeRange; -047 -048 // each sub-type of immutable segment knows whether it is flat or not -049 protected abstract boolean canBeFlattened(); +038 public static final long DEEP_OVERHEAD = Segment.DEEP_OVERHEAD + ClassSize.NON_SYNC_TIMERANGE_TRACKER; +039 +040 // each sub-type of immutable segment knows whether it is flat or not +041 protected abstract boolean canBeFlattened(); +042 +043 / CONSTRUCTORS / +044 /** +045 * Empty C-tor to be used only for CompositeImmutableSegment +046 */ +047 protected ImmutableSegment(CellComparator comparator) { +048super(comparator, TimeRangeTracker.create(TimeRangeTracker.Type.NON_SYNC)); +049 } 050 -051 / CONSTRUCTORS / -052 /** -053 * Empty C-tor to be used only for CompositeImmutableSegment -054 */ -055 protected ImmutableSegment(CellComparator comparator) { -056super(comparator); -057this.timeRange = null; -058 } -059 -060 /** -061 * C-tor to be used to build the derived classes +051 /** +052 * C-tor to be used to build the derived classes +053 */ +054 protected ImmutableSegment(CellSet cs, CellComparator comparator, MemStoreLAB memStoreLAB) { +055super(cs, comparator, memStoreLAB, TimeRangeTracker.create(TimeRangeTracker.Type.NON_SYNC)); +056 } +057 +058 /** +059 * Copy C-tor to be used when new CSLMImmutableSegment (derived) is being built from a Mutable one. +060 * This C-tor should be used when active MutableSegment is pushed into the compaction +061 * pipeline and becomes an ImmutableSegment. 062 */ -063 protected ImmutableSegment(CellSet cs, CellComparator comparator, MemStoreLAB memStoreLAB) { -064super(cs, comparator, memStoreLAB); -065this.timeRange = this.timeRangeTracker == null ? null : this.timeRangeTracker.toTimeRange(); -066 } +063 protected ImmutableSegment(Segment segment) { +064super(segment); +065 } +066 067 -068 /** -069 * Copy C-tor to be used when new CSLMImmutableSegment (derived) is being built from a Mutable one. -070 * This C-tor should be used when active MutableSegment is pushed into the compaction -071 * pipeline and becomes an ImmutableSegment. -072 */ -073 protected ImmutableSegment(Segment segment) { -074super(segment); -075this.timeRange = this.timeRangeTracker == null ? null : this.timeRangeTracker.toTimeRange(); -076 } -077 -078 -079 / PUBLIC METHODS / -080 @Override -081 public boolean shouldSeek(TimeRange tr, long oldestUnexpiredTS) { -082return this.timeRange.includesTimeRange(tr) -083this.timeRange.getMax() = oldestUnexpiredTS; -084 } -085 -086 @Override -087 public long getMinTimestamp() { -088return this.timeRange.getMin(); -089 } -090 -091 public int getNumOfSegments() { -092return 1; -093 } -094 -095 public ListSegment getAllSegments() { -096ListSegment res = new ArrayList(Arrays.asList(this)); -097return res; -098 } -099} +068 / PUBLIC METHODS / +069 +070 public int getNumOfSegments() { +071return 1; +072 } +073 +074 public ListSegment getAllSegments() { +075ListSegment res = new ArrayList(Arrays.asList(this)); +076return res; +077 } +078}
[07/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html index 5d138ea..81d256e 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html @@ -2387,7 +2387,7 @@ 2379 return true; 2380} 2381long modifiedFlushCheckInterval = flushCheckInterval; -2382if (getRegionInfo().isSystemTable() +2382if (getRegionInfo().getTable().isSystemTable() 2383getRegionInfo().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) { 2384 modifiedFlushCheckInterval = SYSTEM_CACHE_FLUSH_INTERVAL; 2385} @@ -7869,7 +7869,7 @@ 7861 */ 7862 public byte[] checkSplit() { 7863// Can't split META -7864if (this.getRegionInfo().isMetaTable() || +7864if (this.getRegionInfo().isMetaRegion() || 7865 TableName.NAMESPACE_TABLE_NAME.equals(this.getRegionInfo().getTable())) { 7866 if (shouldForceSplit()) { 7867LOG.warn("Cannot split meta region in HBase 0.20 and above"); @@ -7914,374 +7914,372 @@ 7906 } 7907 7908 /** @return the coprocessor host */ -7909 @Override -7910 public RegionCoprocessorHost getCoprocessorHost() { -7911return coprocessorHost; -7912 } -7913 -7914 /** @param coprocessorHost the new coprocessor host */ -7915 public void setCoprocessorHost(final RegionCoprocessorHost coprocessorHost) { -7916this.coprocessorHost = coprocessorHost; -7917 } -7918 -7919 @Override -7920 public void startRegionOperation() throws IOException { -7921 startRegionOperation(Operation.ANY); -7922 } -7923 -7924 @Override -7925 @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="SF_SWITCH_FALLTHROUGH", -7926justification="Intentional") -7927 public void startRegionOperation(Operation op) throws IOException { -7928switch (op) { -7929 case GET: // read operations -7930 case SCAN: -7931checkReadsEnabled(); -7932 case INCREMENT: // write operations -7933 case APPEND: -7934 case SPLIT_REGION: -7935 case MERGE_REGION: -7936 case PUT: -7937 case DELETE: -7938 case BATCH_MUTATE: -7939 case COMPACT_REGION: -7940 case SNAPSHOT: -7941// when a region is in recovering state, no read, split, merge or snapshot is allowed -7942if (isRecovering() (this.disallowWritesInRecovering || -7943 (op != Operation.PUT op != Operation.DELETE op != Operation.BATCH_MUTATE))) { -7944 throw new RegionInRecoveryException(getRegionInfo().getRegionNameAsString() + -7945" is recovering; cannot take reads"); -7946} -7947break; -7948 default: -7949break; -7950} -7951if (op == Operation.MERGE_REGION || op == Operation.SPLIT_REGION -7952|| op == Operation.COMPACT_REGION) { -7953 // split, merge or compact region doesn't need to check the closing/closed state or lock the -7954 // region -7955 return; -7956} -7957if (this.closing.get()) { -7958 throw new NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is closing"); -7959} -7960lock(lock.readLock()); -7961if (this.closed.get()) { -7962 lock.readLock().unlock(); -7963 throw new NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is closed"); -7964} -7965// The unit for snapshot is a region. So, all stores for this region must be -7966// prepared for snapshot operation before proceeding. -7967if (op == Operation.SNAPSHOT) { -7968 stores.values().forEach(HStore::preSnapshotOperation); -7969} -7970try { -7971 if (coprocessorHost != null) { -7972 coprocessorHost.postStartRegionOperation(op); -7973 } -7974} catch (Exception e) { -7975 lock.readLock().unlock(); -7976 throw new IOException(e); -7977} -7978 } -7979 -7980 @Override -7981 public void closeRegionOperation() throws IOException { -7982 closeRegionOperation(Operation.ANY); -7983 } -7984 -7985 @Override -7986 public void closeRegionOperation(Operation operation) throws IOException { -7987if (operation == Operation.SNAPSHOT) { -7988 stores.values().forEach(HStore::postSnapshotOperation); -7989} -7990lock.readLock().unlock(); -7991if (coprocessorHost != null) { -7992 coprocessorHost.postCloseRegionOperation(operation); -7993} -7994 } -7995 -7996 /** -7997 * This method needs to be called before any public call that reads or -7998 * modifies stores in bulk. It has to be called just before a try. -7999 * #closeBulkRegionOperation needs to be called in the try's finally block -8000 *
[18/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html index 5d138ea..81d256e 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html @@ -2387,7 +2387,7 @@ 2379 return true; 2380} 2381long modifiedFlushCheckInterval = flushCheckInterval; -2382if (getRegionInfo().isSystemTable() +2382if (getRegionInfo().getTable().isSystemTable() 2383getRegionInfo().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) { 2384 modifiedFlushCheckInterval = SYSTEM_CACHE_FLUSH_INTERVAL; 2385} @@ -7869,7 +7869,7 @@ 7861 */ 7862 public byte[] checkSplit() { 7863// Can't split META -7864if (this.getRegionInfo().isMetaTable() || +7864if (this.getRegionInfo().isMetaRegion() || 7865 TableName.NAMESPACE_TABLE_NAME.equals(this.getRegionInfo().getTable())) { 7866 if (shouldForceSplit()) { 7867LOG.warn("Cannot split meta region in HBase 0.20 and above"); @@ -7914,374 +7914,372 @@ 7906 } 7907 7908 /** @return the coprocessor host */ -7909 @Override -7910 public RegionCoprocessorHost getCoprocessorHost() { -7911return coprocessorHost; -7912 } -7913 -7914 /** @param coprocessorHost the new coprocessor host */ -7915 public void setCoprocessorHost(final RegionCoprocessorHost coprocessorHost) { -7916this.coprocessorHost = coprocessorHost; -7917 } -7918 -7919 @Override -7920 public void startRegionOperation() throws IOException { -7921 startRegionOperation(Operation.ANY); -7922 } -7923 -7924 @Override -7925 @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="SF_SWITCH_FALLTHROUGH", -7926justification="Intentional") -7927 public void startRegionOperation(Operation op) throws IOException { -7928switch (op) { -7929 case GET: // read operations -7930 case SCAN: -7931checkReadsEnabled(); -7932 case INCREMENT: // write operations -7933 case APPEND: -7934 case SPLIT_REGION: -7935 case MERGE_REGION: -7936 case PUT: -7937 case DELETE: -7938 case BATCH_MUTATE: -7939 case COMPACT_REGION: -7940 case SNAPSHOT: -7941// when a region is in recovering state, no read, split, merge or snapshot is allowed -7942if (isRecovering() (this.disallowWritesInRecovering || -7943 (op != Operation.PUT op != Operation.DELETE op != Operation.BATCH_MUTATE))) { -7944 throw new RegionInRecoveryException(getRegionInfo().getRegionNameAsString() + -7945" is recovering; cannot take reads"); -7946} -7947break; -7948 default: -7949break; -7950} -7951if (op == Operation.MERGE_REGION || op == Operation.SPLIT_REGION -7952|| op == Operation.COMPACT_REGION) { -7953 // split, merge or compact region doesn't need to check the closing/closed state or lock the -7954 // region -7955 return; -7956} -7957if (this.closing.get()) { -7958 throw new NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is closing"); -7959} -7960lock(lock.readLock()); -7961if (this.closed.get()) { -7962 lock.readLock().unlock(); -7963 throw new NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is closed"); -7964} -7965// The unit for snapshot is a region. So, all stores for this region must be -7966// prepared for snapshot operation before proceeding. -7967if (op == Operation.SNAPSHOT) { -7968 stores.values().forEach(HStore::preSnapshotOperation); -7969} -7970try { -7971 if (coprocessorHost != null) { -7972 coprocessorHost.postStartRegionOperation(op); -7973 } -7974} catch (Exception e) { -7975 lock.readLock().unlock(); -7976 throw new IOException(e); -7977} -7978 } -7979 -7980 @Override -7981 public void closeRegionOperation() throws IOException { -7982 closeRegionOperation(Operation.ANY); -7983 } -7984 -7985 @Override -7986 public void closeRegionOperation(Operation operation) throws IOException { -7987if (operation == Operation.SNAPSHOT) { -7988 stores.values().forEach(HStore::postSnapshotOperation); -7989} -7990lock.readLock().unlock(); -7991if (coprocessorHost != null) { -7992 coprocessorHost.postCloseRegionOperation(operation); -7993} -7994 } -7995 -7996 /** -7997 * This method needs to be called before any public call that reads or -7998 * modifies stores in bulk. It has to be called just before a
[46/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/org/apache/hadoop/hbase/client/RegionInfoBuilder.MutableRegionInfo.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/RegionInfoBuilder.MutableRegionInfo.html b/devapidocs/org/apache/hadoop/hbase/client/RegionInfoBuilder.MutableRegionInfo.html index d1e4563..22e95dc 100644 --- a/devapidocs/org/apache/hadoop/hbase/client/RegionInfoBuilder.MutableRegionInfo.html +++ b/devapidocs/org/apache/hadoop/hbase/client/RegionInfoBuilder.MutableRegionInfo.html @@ -18,7 +18,7 @@ catch(err) { } //--> -var methods = {"i0":9,"i1":9,"i2":9,"i3":9,"i4":10,"i5":10,"i6":10,"i7":10,"i8":9,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10}; +var methods = {"i0":9,"i1":9,"i2":9,"i3":9,"i4":10,"i5":10,"i6":10,"i7":10,"i8":9,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10}; var tabs = {65535:["t0","All Methods"],1:["t1","Static Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -357,36 +357,28 @@ implements boolean -isMetaTable() - - -boolean isOffline() - + boolean isSplit() - + boolean isSplitParent() - -boolean -isSystemTable() - - + RegionInfoBuilder.MutableRegionInfo setOffline(booleanoffLine) The parent of a region split is offline while split daughters hold references to the parent. - + RegionInfoBuilder.MutableRegionInfo setSplit(booleansplit) - + http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String toString() @@ -831,28 +823,13 @@ implements - - - - -isMetaTable -publicbooleanisMetaTable() - -Specified by: -isMetaTablein interfaceRegionInfo -Returns: -true if this region is from hbase:meta - - - isMetaRegion -publicbooleanisMetaRegion() +publicbooleanisMetaRegion() Specified by: isMetaRegionin interfaceRegionInfo @@ -861,28 +838,13 @@ implements - - - - -isSystemTable -publicbooleanisSystemTable() - -Specified by: -isSystemTablein interfaceRegionInfo -Returns: -true if this region is from a system table - - - isSplit -publicbooleanisSplit() +publicbooleanisSplit() Specified by: isSplitin interfaceRegionInfo @@ -897,7 +859,7 @@ implements setSplit -publicRegionInfoBuilder.MutableRegionInfosetSplit(booleansplit) +publicRegionInfoBuilder.MutableRegionInfosetSplit(booleansplit) Parameters: split - set split status @@ -912,7 +874,7 @@ implements isOffline -publicbooleanisOffline() +publicbooleanisOffline() Specified by: isOfflinein interfaceRegionInfo @@ -927,7 +889,7 @@ implements setOffline -publicRegionInfoBuilder.MutableRegionInfosetOffline(booleanoffLine) +publicRegionInfoBuilder.MutableRegionInfosetOffline(booleanoffLine) The parent of a region split is offline while split daughters hold references to the parent. Offlined regions are closed. @@ -944,7 +906,7 @@ implements isSplitParent -publicbooleanisSplitParent() +publicbooleanisSplitParent() Specified by: isSplitParentin interfaceRegionInfo @@ -959,7 +921,7 @@ implements getReplicaId -publicintgetReplicaId() +publicintgetReplicaId() Returns the region replica id Specified by: @@ -975,7 +937,7 @@ implements toString -publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringtoString() +publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringtoString() Overrides: http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--; title="class or interface in java.lang">toStringin classhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object @@ -990,7 +952,7 @@ implements equals -publicbooleanequals(http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Objecto) +publicbooleanequals(http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Objecto) Overrides: http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-; title="class or interface in java.lang">equalsin classhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object @@ -1007,7 +969,7 @@ implements hashCode -publicinthashCode() +publicinthashCode() Overrides:
[08/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html index 5d138ea..81d256e 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html @@ -2387,7 +2387,7 @@ 2379 return true; 2380} 2381long modifiedFlushCheckInterval = flushCheckInterval; -2382if (getRegionInfo().isSystemTable() +2382if (getRegionInfo().getTable().isSystemTable() 2383getRegionInfo().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) { 2384 modifiedFlushCheckInterval = SYSTEM_CACHE_FLUSH_INTERVAL; 2385} @@ -7869,7 +7869,7 @@ 7861 */ 7862 public byte[] checkSplit() { 7863// Can't split META -7864if (this.getRegionInfo().isMetaTable() || +7864if (this.getRegionInfo().isMetaRegion() || 7865 TableName.NAMESPACE_TABLE_NAME.equals(this.getRegionInfo().getTable())) { 7866 if (shouldForceSplit()) { 7867LOG.warn("Cannot split meta region in HBase 0.20 and above"); @@ -7914,374 +7914,372 @@ 7906 } 7907 7908 /** @return the coprocessor host */ -7909 @Override -7910 public RegionCoprocessorHost getCoprocessorHost() { -7911return coprocessorHost; -7912 } -7913 -7914 /** @param coprocessorHost the new coprocessor host */ -7915 public void setCoprocessorHost(final RegionCoprocessorHost coprocessorHost) { -7916this.coprocessorHost = coprocessorHost; -7917 } -7918 -7919 @Override -7920 public void startRegionOperation() throws IOException { -7921 startRegionOperation(Operation.ANY); -7922 } -7923 -7924 @Override -7925 @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="SF_SWITCH_FALLTHROUGH", -7926justification="Intentional") -7927 public void startRegionOperation(Operation op) throws IOException { -7928switch (op) { -7929 case GET: // read operations -7930 case SCAN: -7931checkReadsEnabled(); -7932 case INCREMENT: // write operations -7933 case APPEND: -7934 case SPLIT_REGION: -7935 case MERGE_REGION: -7936 case PUT: -7937 case DELETE: -7938 case BATCH_MUTATE: -7939 case COMPACT_REGION: -7940 case SNAPSHOT: -7941// when a region is in recovering state, no read, split, merge or snapshot is allowed -7942if (isRecovering() (this.disallowWritesInRecovering || -7943 (op != Operation.PUT op != Operation.DELETE op != Operation.BATCH_MUTATE))) { -7944 throw new RegionInRecoveryException(getRegionInfo().getRegionNameAsString() + -7945" is recovering; cannot take reads"); -7946} -7947break; -7948 default: -7949break; -7950} -7951if (op == Operation.MERGE_REGION || op == Operation.SPLIT_REGION -7952|| op == Operation.COMPACT_REGION) { -7953 // split, merge or compact region doesn't need to check the closing/closed state or lock the -7954 // region -7955 return; -7956} -7957if (this.closing.get()) { -7958 throw new NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is closing"); -7959} -7960lock(lock.readLock()); -7961if (this.closed.get()) { -7962 lock.readLock().unlock(); -7963 throw new NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is closed"); -7964} -7965// The unit for snapshot is a region. So, all stores for this region must be -7966// prepared for snapshot operation before proceeding. -7967if (op == Operation.SNAPSHOT) { -7968 stores.values().forEach(HStore::preSnapshotOperation); -7969} -7970try { -7971 if (coprocessorHost != null) { -7972 coprocessorHost.postStartRegionOperation(op); -7973 } -7974} catch (Exception e) { -7975 lock.readLock().unlock(); -7976 throw new IOException(e); -7977} -7978 } -7979 -7980 @Override -7981 public void closeRegionOperation() throws IOException { -7982 closeRegionOperation(Operation.ANY); -7983 } -7984 -7985 @Override -7986 public void closeRegionOperation(Operation operation) throws IOException { -7987if (operation == Operation.SNAPSHOT) { -7988 stores.values().forEach(HStore::postSnapshotOperation); -7989} -7990lock.readLock().unlock(); -7991if (coprocessorHost != null) { -7992 coprocessorHost.postCloseRegionOperation(operation); -7993} -7994 } -7995 -7996 /** -7997 * This method needs to be called before any public call that reads or -7998 * modifies stores in bulk. It has to be called just before a try. -7999 * #closeBulkRegionOperation
[39/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.RegionServerEnvironment.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.RegionServerEnvironment.html b/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.RegionServerEnvironment.html index f262afd..59d5a59 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.RegionServerEnvironment.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.RegionServerEnvironment.html @@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab"; -private static class RegionServerCoprocessorHost.RegionServerEnvironment +private static class RegionServerCoprocessorHost.RegionServerEnvironment extends BaseEnvironmentRegionServerCoprocessor implements RegionServerCoprocessorEnvironment Coprocessor environment extension providing access to region server @@ -256,7 +256,7 @@ implements regionServerServices -private finalRegionServerServices regionServerServices +private finalRegionServerServices regionServerServices @@ -265,7 +265,7 @@ implements metricRegistry -private finalMetricRegistry metricRegistry +private finalMetricRegistry metricRegistry @@ -282,7 +282,7 @@ implements RegionServerEnvironment -publicRegionServerEnvironment(RegionServerCoprocessorimpl, +publicRegionServerEnvironment(RegionServerCoprocessorimpl, intpriority, intseq, org.apache.hadoop.conf.Configurationconf, @@ -303,7 +303,7 @@ implements getCoprocessorRegionServerServices -publicCoprocessorRegionServerServicesgetCoprocessorRegionServerServices() +publicCoprocessorRegionServerServicesgetCoprocessorRegionServerServices() Description copied from interface:RegionServerCoprocessorEnvironment Gets the region server services. @@ -320,7 +320,7 @@ implements getMetricRegistryForRegionServer -publicMetricRegistrygetMetricRegistryForRegionServer() +publicMetricRegistrygetMetricRegistryForRegionServer() Description copied from interface:RegionServerCoprocessorEnvironment Returns a MetricRegistry that can be used to track metrics at the region server level. @@ -340,7 +340,7 @@ implements shutdown -publicvoidshutdown() +publicvoidshutdown() Description copied from class:BaseEnvironment Clean up the environment http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.RegionServerObserverOperation.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.RegionServerObserverOperation.html b/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.RegionServerObserverOperation.html index a222603..5ad5c5e 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.RegionServerObserverOperation.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.RegionServerObserverOperation.html @@ -126,7 +126,7 @@ -abstract class RegionServerCoprocessorHost.RegionServerObserverOperation +abstract class RegionServerCoprocessorHost.RegionServerObserverOperation extends CoprocessorHost.ObserverOperationWithoutResultRegionServerObserver @@ -208,7 +208,7 @@ extends RegionServerObserverOperation -publicRegionServerObserverOperation() +publicRegionServerObserverOperation() @@ -217,7 +217,7 @@ extends RegionServerObserverOperation -publicRegionServerObserverOperation(Useruser) +publicRegionServerObserverOperation(Useruser) http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.html b/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.html index 275420c..5cc0c45 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.html @@ -114,9 +114,8 @@ var activeTableTab = "activeTableTab"; -@InterfaceAudience.LimitedPrivate(value="Coprocesssor") - @InterfaceStability.Evolving -public class RegionServerCoprocessorHost +@InterfaceAudience.Private +public class RegionServerCoprocessorHost extends CoprocessorHostRegionServerCoprocessor,RegionServerCoprocessorEnvironment @@ -304,7 +303,7 @@ extends LOG -private static finalorg.apache.commons.logging.Log LOG +private static
[48/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/checkstyle-aggregate.html -- diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html index a46fc76..93e11d0 100644 --- a/checkstyle-aggregate.html +++ b/checkstyle-aggregate.html @@ -7,7 +7,7 @@ - + Apache HBase Checkstyle Results @@ -289,7 +289,7 @@ 2055 0 0 -13596 +13608 Files @@ -1902,7 +1902,7 @@ org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java 0 0 -15 +17 org/apache/hadoop/hbase/coprocessor/CoprocessorService.java 0 @@ -4257,7 +4257,7 @@ org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java 0 0 -4 +5 org/apache/hadoop/hbase/mob/ExpiredMobFileCleaner.java 0 @@ -5077,7 +5077,7 @@ org/apache/hadoop/hbase/regionserver/ImmutableSegment.java 0 0 -4 +6 org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java 0 @@ -5272,7 +5272,7 @@ org/apache/hadoop/hbase/regionserver/MutableSegment.java 0 0 -4 +5 org/apache/hadoop/hbase/regionserver/NoOpHeapMemoryTuner.java 0 @@ -5327,7 +5327,7 @@ org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java 0 0 -48 +50 org/apache/hadoop/hbase/regionserver/RegionScanner.java 0 @@ -5342,7 +5342,7 @@ org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.java 0 0 -5 +7 org/apache/hadoop/hbase/regionserver/RegionServerServices.java 0 @@ -5422,7 +5422,7 @@ org/apache/hadoop/hbase/regionserver/Segment.java 0 0 -1 +2 org/apache/hadoop/hbase/regionserver/SegmentFactory.java 0 @@ -5512,7 +5512,7 @@ org/apache/hadoop/hbase/regionserver/StoreFlusher.java 0 0 -2 +3 org/apache/hadoop/hbase/regionserver/StoreScanner.java 0 @@ -8273,7 +8273,7 @@ http://checkstyle.sourceforge.net/config_imports.html#UnusedImports;>UnusedImports processJavadoc: true -109 +117 Error indentation @@ -8291,12 +8291,12 @@ http://checkstyle.sourceforge.net/config_javadoc.html#JavadocTagContinuationIndentation;>JavadocTagContinuationIndentation offset: 2 -769 +763 Error http://checkstyle.sourceforge.net/config_javadoc.html#NonEmptyAtclauseDescription;>NonEmptyAtclauseDescription -3265 +3271 Error misc @@ -8314,7 +8314,7 @@ max: 100 ignorePattern: ^package.*|^import.*|a href|href|http://|https://|ftp://|org.apache.thrift.|com.google.protobuf.|hbase.protobuf.generated -1098 +1102 Error @@ -14680,7 +14680,7 @@ Error javadoc -JavadocTagContinuationIndentation +NonEmptyAtclauseDescription Javadoc comment at column 43 has parse error. Missed HTML close tag 'TableName'. Sometimes it means that close tag missed for one of previous tags. 179 @@ -19105,7 +19105,7 @@ Error javadoc -JavadocTagContinuationIndentation +NonEmptyAtclauseDescription Javadoc comment at column 64 has parse error. Missed HTML close tag 'code'. Sometimes it means that close tag missed for one of previous tags. 2103 @@ -21987,289 +21987,289 @@ javadoc NonEmptyAtclauseDescription At-clause should have a non-empty description. -233 +223 Error javadoc NonEmptyAtclauseDescription At-clause should have a non-empty description. -234 +224 Error javadoc JavadocTagContinuationIndentation Line continuation have incorrect indentation level, expected level should be 2. -236 +226 Error javadoc JavadocTagContinuationIndentation Line continuation have incorrect indentation level, expected level should be 2. -237 +227 Error javadoc JavadocTagContinuationIndentation Line continuation have incorrect indentation level, expected level should be 2. -238 +228 Error javadoc NonEmptyAtclauseDescription At-clause should have a non-empty description. -244 +234 Error javadoc JavadocTagContinuationIndentation Line continuation have incorrect indentation level, expected level should be 2. -286 +276 Error javadoc JavadocTagContinuationIndentation Line continuation have incorrect indentation level, expected level should be 2. -294 +284 Error javadoc NonEmptyAtclauseDescription At-clause should have a non-empty description. -325 +315 Error javadoc NonEmptyAtclauseDescription At-clause should have a non-empty description. -327 +317 Error indentation Indentation '.' have incorrect indentation level 6, expected level should be 8. -340 +330 Error javadoc NonEmptyAtclauseDescription At-clause should have a non-empty description. -348 +338 Error javadoc JavadocTagContinuationIndentation Line continuation have incorrect indentation level, expected level should be 2. -350 +340 Error blocks NeedBraces 'if' construct must use '{}'s. -354 +344 Error javadoc NonEmptyAtclauseDescription At-clause should have a non-empty description. -359 +349 Error javadoc NonEmptyAtclauseDescription At-clause should have a non-empty description. -360 +350 Error javadoc NonEmptyAtclauseDescription At-clause should have a non-empty description. -361 +351 Error
[35/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/src-html/org/apache/hadoop/hbase/client/RegionInfo.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/RegionInfo.html b/devapidocs/src-html/org/apache/hadoop/hbase/client/RegionInfo.html index 07af8e5..757cb15 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/RegionInfo.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/RegionInfo.html @@ -223,541 +223,531 @@ 215 boolean isSplitParent(); 216 217 /** -218 * @return true if this region is from hbase:meta. +218 * @return true if this region is a meta region. 219 */ -220 boolean isMetaTable(); +220 boolean isMetaRegion(); 221 222 /** -223 * @return true if this region is from a system table. -224 */ -225 boolean isSystemTable(); -226 -227 /** -228 * @return true if this region is a meta region. -229 */ -230 boolean isMetaRegion(); -231 -232 /** -233 * @param rangeStartKey -234 * @param rangeEndKey -235 * @return true if the given inclusive range of rows is fully contained -236 * by this region. For example, if the region is foo,a,g and this is -237 * passed ["b","c"] or ["a","c"] it will return true, but if this is passed -238 * ["b","z"] it will return false. -239 * @throws IllegalArgumentException if the range passed is invalid (ie. end lt; start) -240 */ -241 boolean containsRange(byte[] rangeStartKey, byte[] rangeEndKey); -242 -243 /** -244 * @param row -245 * @return true if the given row falls in this region. -246 */ -247 boolean containsRow(byte[] row); -248 -249 /** -250 * Does region name contain its encoded name? -251 * @param regionName region name -252 * @return boolean indicating if this a new format region -253 * name which contains its encoded name. +223 * @param rangeStartKey +224 * @param rangeEndKey +225 * @return true if the given inclusive range of rows is fully contained +226 * by this region. For example, if the region is foo,a,g and this is +227 * passed ["b","c"] or ["a","c"] it will return true, but if this is passed +228 * ["b","z"] it will return false. +229 * @throws IllegalArgumentException if the range passed is invalid (ie. end lt; start) +230 */ +231 boolean containsRange(byte[] rangeStartKey, byte[] rangeEndKey); +232 +233 /** +234 * @param row +235 * @return true if the given row falls in this region. +236 */ +237 boolean containsRow(byte[] row); +238 +239 /** +240 * Does region name contain its encoded name? +241 * @param regionName region name +242 * @return boolean indicating if this a new format region +243 * name which contains its encoded name. +244 */ +245 @InterfaceAudience.Private +246 static boolean hasEncodedName(final byte[] regionName) { +247// check if region name ends in ENC_SEPARATOR +248return (regionName.length = 1) +249 (regionName[regionName.length - 1] == RegionInfo.ENC_SEPARATOR); +250 } +251 +252 /** +253 * @return the encodedName 254 */ 255 @InterfaceAudience.Private -256 static boolean hasEncodedName(final byte[] regionName) { -257// check if region name ends in ENC_SEPARATOR -258return (regionName.length = 1) -259 (regionName[regionName.length - 1] == RegionInfo.ENC_SEPARATOR); -260 } -261 -262 /** -263 * @return the encodedName -264 */ -265 @InterfaceAudience.Private -266 static String encodeRegionName(final byte [] regionName) { -267String encodedName; -268if (hasEncodedName(regionName)) { -269 // region is in new format: -270 // tableName,startKey,regionIdTimeStamp/encodedName/ -271 encodedName = Bytes.toString(regionName, -272 regionName.length - MD5_HEX_LENGTH - 1, -273 MD5_HEX_LENGTH); -274} else { -275 // old format region name. First hbase:meta region also -276 // use this format.EncodedName is the JenkinsHash value. -277 HashKeybyte[] key = new ByteArrayHashKey(regionName, 0, regionName.length); -278 int hashVal = Math.abs(JenkinsHash.getInstance().hash(key, 0)); -279 encodedName = String.valueOf(hashVal); -280} -281return encodedName; -282 } -283 -284 /** -285 * @return Return a String of short, printable names for codehris/code -286 * (usually encoded name) for us logging. -287 */ -288 static String getShortNameToLog(RegionInfo...hris) { -289return getShortNameToLog(Arrays.asList(hris)); -290 } -291 -292 /** -293 * @return Return a String of short, printable names for codehris/code -294 * (usually encoded name) for us logging. +256 static String encodeRegionName(final byte [] regionName) { +257String encodedName; +258if (hasEncodedName(regionName)) { +259 // region is in new format: +260 // tableName,startKey,regionIdTimeStamp/encodedName/ +261 encodedName = Bytes.toString(regionName,
[05/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html index 5243b06..af3ba51 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html @@ -2128,464 +2128,463 @@ 2120return this.region; 2121 } 2122 -2123 @Override -2124 public RegionCoprocessorHost getCoprocessorHost() { -2125return this.region.getCoprocessorHost(); -2126 } -2127 -2128 @Override -2129 public RegionInfo getRegionInfo() { -2130return this.fs.getRegionInfo(); -2131 } -2132 -2133 @Override -2134 public boolean areWritesEnabled() { -2135return this.region.areWritesEnabled(); -2136 } -2137 -2138 @Override -2139 public long getSmallestReadPoint() { -2140return this.region.getSmallestReadPoint(); -2141 } -2142 -2143 /** -2144 * Adds or replaces the specified KeyValues. -2145 * p -2146 * For each KeyValue specified, if a cell with the same row, family, and qualifier exists in -2147 * MemStore, it will be replaced. Otherwise, it will just be inserted to MemStore. -2148 * p -2149 * This operation is atomic on each KeyValue (row/family/qualifier) but not necessarily atomic -2150 * across all of them. -2151 * @param cells -2152 * @param readpoint readpoint below which we can safely remove duplicate KVs -2153 * @param memstoreSize -2154 * @throws IOException -2155 */ -2156 public void upsert(IterableCell cells, long readpoint, MemStoreSize memstoreSize) -2157 throws IOException { -2158this.lock.readLock().lock(); -2159try { -2160 this.memstore.upsert(cells, readpoint, memstoreSize); -2161} finally { -2162 this.lock.readLock().unlock(); -2163} -2164 } -2165 -2166 public StoreFlushContext createFlushContext(long cacheFlushId) { -2167return new StoreFlusherImpl(cacheFlushId); -2168 } -2169 -2170 private final class StoreFlusherImpl implements StoreFlushContext { -2171 -2172private long cacheFlushSeqNum; -2173private MemStoreSnapshot snapshot; -2174private ListPath tempFiles; -2175private ListPath committedFiles; -2176private long cacheFlushCount; -2177private long cacheFlushSize; -2178private long outputFileSize; -2179 -2180private StoreFlusherImpl(long cacheFlushSeqNum) { -2181 this.cacheFlushSeqNum = cacheFlushSeqNum; -2182} -2183 -2184/** -2185 * This is not thread safe. The caller should have a lock on the region or the store. -2186 * If necessary, the lock can be added with the patch provided in HBASE-10087 -2187 */ -2188@Override -2189public void prepare() { -2190 // passing the current sequence number of the wal - to allow bookkeeping in the memstore -2191 this.snapshot = memstore.snapshot(); -2192 this.cacheFlushCount = snapshot.getCellsCount(); -2193 this.cacheFlushSize = snapshot.getDataSize(); -2194 committedFiles = new ArrayList(1); -2195} -2196 -2197@Override -2198public void flushCache(MonitoredTask status) throws IOException { -2199 RegionServerServices rsService = region.getRegionServerServices(); -2200 ThroughputController throughputController = -2201 rsService == null ? null : rsService.getFlushThroughputController(); -2202 tempFiles = HStore.this.flushCache(cacheFlushSeqNum, snapshot, status, throughputController); -2203} -2204 -2205@Override -2206public boolean commit(MonitoredTask status) throws IOException { -2207 if (this.tempFiles == null || this.tempFiles.isEmpty()) { -2208return false; -2209 } -2210 ListHStoreFile storeFiles = new ArrayList(this.tempFiles.size()); -2211 for (Path storeFilePath : tempFiles) { -2212try { -2213 HStoreFile sf = HStore.this.commitFile(storeFilePath, cacheFlushSeqNum, status); -2214 outputFileSize += sf.getReader().length(); -2215 storeFiles.add(sf); -2216} catch (IOException ex) { -2217 LOG.error("Failed to commit store file " + storeFilePath, ex); -2218 // Try to delete the files we have committed before. -2219 for (HStoreFile sf : storeFiles) { -2220Path pathToDelete = sf.getPath(); -2221try { - sf.deleteStoreFile(); -2223} catch (IOException deleteEx) { -2224 LOG.fatal("Failed to delete store file we committed, halting " + pathToDelete, ex); -2225 Runtime.getRuntime().halt(1); -2226} -2227 } -2228 throw new IOException("Failed to
[20/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html index 5d138ea..81d256e 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html @@ -2387,7 +2387,7 @@ 2379 return true; 2380} 2381long modifiedFlushCheckInterval = flushCheckInterval; -2382if (getRegionInfo().isSystemTable() +2382if (getRegionInfo().getTable().isSystemTable() 2383getRegionInfo().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) { 2384 modifiedFlushCheckInterval = SYSTEM_CACHE_FLUSH_INTERVAL; 2385} @@ -7869,7 +7869,7 @@ 7861 */ 7862 public byte[] checkSplit() { 7863// Can't split META -7864if (this.getRegionInfo().isMetaTable() || +7864if (this.getRegionInfo().isMetaRegion() || 7865 TableName.NAMESPACE_TABLE_NAME.equals(this.getRegionInfo().getTable())) { 7866 if (shouldForceSplit()) { 7867LOG.warn("Cannot split meta region in HBase 0.20 and above"); @@ -7914,374 +7914,372 @@ 7906 } 7907 7908 /** @return the coprocessor host */ -7909 @Override -7910 public RegionCoprocessorHost getCoprocessorHost() { -7911return coprocessorHost; -7912 } -7913 -7914 /** @param coprocessorHost the new coprocessor host */ -7915 public void setCoprocessorHost(final RegionCoprocessorHost coprocessorHost) { -7916this.coprocessorHost = coprocessorHost; -7917 } -7918 -7919 @Override -7920 public void startRegionOperation() throws IOException { -7921 startRegionOperation(Operation.ANY); -7922 } -7923 -7924 @Override -7925 @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="SF_SWITCH_FALLTHROUGH", -7926justification="Intentional") -7927 public void startRegionOperation(Operation op) throws IOException { -7928switch (op) { -7929 case GET: // read operations -7930 case SCAN: -7931checkReadsEnabled(); -7932 case INCREMENT: // write operations -7933 case APPEND: -7934 case SPLIT_REGION: -7935 case MERGE_REGION: -7936 case PUT: -7937 case DELETE: -7938 case BATCH_MUTATE: -7939 case COMPACT_REGION: -7940 case SNAPSHOT: -7941// when a region is in recovering state, no read, split, merge or snapshot is allowed -7942if (isRecovering() (this.disallowWritesInRecovering || -7943 (op != Operation.PUT op != Operation.DELETE op != Operation.BATCH_MUTATE))) { -7944 throw new RegionInRecoveryException(getRegionInfo().getRegionNameAsString() + -7945" is recovering; cannot take reads"); -7946} -7947break; -7948 default: -7949break; -7950} -7951if (op == Operation.MERGE_REGION || op == Operation.SPLIT_REGION -7952|| op == Operation.COMPACT_REGION) { -7953 // split, merge or compact region doesn't need to check the closing/closed state or lock the -7954 // region -7955 return; -7956} -7957if (this.closing.get()) { -7958 throw new NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is closing"); -7959} -7960lock(lock.readLock()); -7961if (this.closed.get()) { -7962 lock.readLock().unlock(); -7963 throw new NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is closed"); -7964} -7965// The unit for snapshot is a region. So, all stores for this region must be -7966// prepared for snapshot operation before proceeding. -7967if (op == Operation.SNAPSHOT) { -7968 stores.values().forEach(HStore::preSnapshotOperation); -7969} -7970try { -7971 if (coprocessorHost != null) { -7972 coprocessorHost.postStartRegionOperation(op); -7973 } -7974} catch (Exception e) { -7975 lock.readLock().unlock(); -7976 throw new IOException(e); -7977} -7978 } -7979 -7980 @Override -7981 public void closeRegionOperation() throws IOException { -7982 closeRegionOperation(Operation.ANY); -7983 } -7984 -7985 @Override -7986 public void closeRegionOperation(Operation operation) throws IOException { -7987if (operation == Operation.SNAPSHOT) { -7988 stores.values().forEach(HStore::postSnapshotOperation); -7989} -7990lock.readLock().unlock(); -7991if (coprocessorHost != null) { -7992 coprocessorHost.postCloseRegionOperation(operation); -7993} -7994 } -7995 -7996 /** -7997 * This method needs to be called before any public call that reads or -7998 * modifies stores in bulk. It has to be called just before a try. -7999 *
[50/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/apidocs/src-html/org/apache/hadoop/hbase/client/RegionInfo.html -- diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/RegionInfo.html b/apidocs/src-html/org/apache/hadoop/hbase/client/RegionInfo.html index 07af8e5..757cb15 100644 --- a/apidocs/src-html/org/apache/hadoop/hbase/client/RegionInfo.html +++ b/apidocs/src-html/org/apache/hadoop/hbase/client/RegionInfo.html @@ -223,541 +223,531 @@ 215 boolean isSplitParent(); 216 217 /** -218 * @return true if this region is from hbase:meta. +218 * @return true if this region is a meta region. 219 */ -220 boolean isMetaTable(); +220 boolean isMetaRegion(); 221 222 /** -223 * @return true if this region is from a system table. -224 */ -225 boolean isSystemTable(); -226 -227 /** -228 * @return true if this region is a meta region. -229 */ -230 boolean isMetaRegion(); -231 -232 /** -233 * @param rangeStartKey -234 * @param rangeEndKey -235 * @return true if the given inclusive range of rows is fully contained -236 * by this region. For example, if the region is foo,a,g and this is -237 * passed ["b","c"] or ["a","c"] it will return true, but if this is passed -238 * ["b","z"] it will return false. -239 * @throws IllegalArgumentException if the range passed is invalid (ie. end lt; start) -240 */ -241 boolean containsRange(byte[] rangeStartKey, byte[] rangeEndKey); -242 -243 /** -244 * @param row -245 * @return true if the given row falls in this region. -246 */ -247 boolean containsRow(byte[] row); -248 -249 /** -250 * Does region name contain its encoded name? -251 * @param regionName region name -252 * @return boolean indicating if this a new format region -253 * name which contains its encoded name. +223 * @param rangeStartKey +224 * @param rangeEndKey +225 * @return true if the given inclusive range of rows is fully contained +226 * by this region. For example, if the region is foo,a,g and this is +227 * passed ["b","c"] or ["a","c"] it will return true, but if this is passed +228 * ["b","z"] it will return false. +229 * @throws IllegalArgumentException if the range passed is invalid (ie. end lt; start) +230 */ +231 boolean containsRange(byte[] rangeStartKey, byte[] rangeEndKey); +232 +233 /** +234 * @param row +235 * @return true if the given row falls in this region. +236 */ +237 boolean containsRow(byte[] row); +238 +239 /** +240 * Does region name contain its encoded name? +241 * @param regionName region name +242 * @return boolean indicating if this a new format region +243 * name which contains its encoded name. +244 */ +245 @InterfaceAudience.Private +246 static boolean hasEncodedName(final byte[] regionName) { +247// check if region name ends in ENC_SEPARATOR +248return (regionName.length = 1) +249 (regionName[regionName.length - 1] == RegionInfo.ENC_SEPARATOR); +250 } +251 +252 /** +253 * @return the encodedName 254 */ 255 @InterfaceAudience.Private -256 static boolean hasEncodedName(final byte[] regionName) { -257// check if region name ends in ENC_SEPARATOR -258return (regionName.length = 1) -259 (regionName[regionName.length - 1] == RegionInfo.ENC_SEPARATOR); -260 } -261 -262 /** -263 * @return the encodedName -264 */ -265 @InterfaceAudience.Private -266 static String encodeRegionName(final byte [] regionName) { -267String encodedName; -268if (hasEncodedName(regionName)) { -269 // region is in new format: -270 // tableName,startKey,regionIdTimeStamp/encodedName/ -271 encodedName = Bytes.toString(regionName, -272 regionName.length - MD5_HEX_LENGTH - 1, -273 MD5_HEX_LENGTH); -274} else { -275 // old format region name. First hbase:meta region also -276 // use this format.EncodedName is the JenkinsHash value. -277 HashKeybyte[] key = new ByteArrayHashKey(regionName, 0, regionName.length); -278 int hashVal = Math.abs(JenkinsHash.getInstance().hash(key, 0)); -279 encodedName = String.valueOf(hashVal); -280} -281return encodedName; -282 } -283 -284 /** -285 * @return Return a String of short, printable names for codehris/code -286 * (usually encoded name) for us logging. -287 */ -288 static String getShortNameToLog(RegionInfo...hris) { -289return getShortNameToLog(Arrays.asList(hris)); -290 } -291 -292 /** -293 * @return Return a String of short, printable names for codehris/code -294 * (usually encoded name) for us logging. +256 static String encodeRegionName(final byte [] regionName) { +257String encodedName; +258if (hasEncodedName(regionName)) { +259 // region is in new format: +260 // tableName,startKey,regionIdTimeStamp/encodedName/ +261 encodedName = Bytes.toString(regionName, +262
[45/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.html b/devapidocs/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.html index ba6063b..227b9aa 100644 --- a/devapidocs/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.html +++ b/devapidocs/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.html @@ -119,9 +119,8 @@ var activeTableTab = "activeTableTab"; -@InterfaceAudience.LimitedPrivate(value="Coprocesssor") - @InterfaceStability.Evolving -public abstract class CoprocessorHostC extends Coprocessor,E extends CoprocessorEnvironmentC +@InterfaceAudience.Private +public abstract class CoprocessorHostC extends Coprocessor,E extends CoprocessorEnvironmentC extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object Provides the common setup framework and runtime services for coprocessor invocation from HBase services. @@ -486,7 +485,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? REGION_COPROCESSOR_CONF_KEY -public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String REGION_COPROCESSOR_CONF_KEY +public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String REGION_COPROCESSOR_CONF_KEY See Also: Constant Field Values @@ -499,7 +498,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? REGIONSERVER_COPROCESSOR_CONF_KEY -public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String REGIONSERVER_COPROCESSOR_CONF_KEY +public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String REGIONSERVER_COPROCESSOR_CONF_KEY See Also: Constant Field Values @@ -512,7 +511,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? USER_REGION_COPROCESSOR_CONF_KEY -public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String USER_REGION_COPROCESSOR_CONF_KEY +public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String USER_REGION_COPROCESSOR_CONF_KEY See Also: Constant Field Values @@ -525,7 +524,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? MASTER_COPROCESSOR_CONF_KEY -public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String MASTER_COPROCESSOR_CONF_KEY +public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String MASTER_COPROCESSOR_CONF_KEY See Also: Constant Field Values @@ -538,7 +537,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? WAL_COPROCESSOR_CONF_KEY -public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String WAL_COPROCESSOR_CONF_KEY +public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String WAL_COPROCESSOR_CONF_KEY See Also: Constant Field Values @@ -551,7 +550,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? ABORT_ON_ERROR_KEY -public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String ABORT_ON_ERROR_KEY +public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String ABORT_ON_ERROR_KEY See Also: Constant Field Values @@ -564,7 +563,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? DEFAULT_ABORT_ON_ERROR -public static finalboolean DEFAULT_ABORT_ON_ERROR +public static finalboolean DEFAULT_ABORT_ON_ERROR See Also: Constant Field Values @@ -577,7 +576,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? COPROCESSORS_ENABLED_CONF_KEY -public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String COPROCESSORS_ENABLED_CONF_KEY +public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String COPROCESSORS_ENABLED_CONF_KEY See
[30/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperationWithResult.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperationWithResult.html b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperationWithResult.html index ed99dd8..cb1b7e5 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperationWithResult.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperationWithResult.html @@ -66,670 +66,669 @@ 058 * @param E type of specific coprocessor environment this host requires. 059 * provides 060 */ -061@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) -062@InterfaceStability.Evolving -063public abstract class CoprocessorHostC extends Coprocessor, E extends CoprocessorEnvironmentC { -064 public static final String REGION_COPROCESSOR_CONF_KEY = -065 "hbase.coprocessor.region.classes"; -066 public static final String REGIONSERVER_COPROCESSOR_CONF_KEY = -067 "hbase.coprocessor.regionserver.classes"; -068 public static final String USER_REGION_COPROCESSOR_CONF_KEY = -069 "hbase.coprocessor.user.region.classes"; -070 public static final String MASTER_COPROCESSOR_CONF_KEY = -071 "hbase.coprocessor.master.classes"; -072 public static final String WAL_COPROCESSOR_CONF_KEY = -073"hbase.coprocessor.wal.classes"; -074 public static final String ABORT_ON_ERROR_KEY = "hbase.coprocessor.abortonerror"; -075 public static final boolean DEFAULT_ABORT_ON_ERROR = true; -076 public static final String COPROCESSORS_ENABLED_CONF_KEY = "hbase.coprocessor.enabled"; -077 public static final boolean DEFAULT_COPROCESSORS_ENABLED = true; -078 public static final String USER_COPROCESSORS_ENABLED_CONF_KEY = -079"hbase.coprocessor.user.enabled"; -080 public static final boolean DEFAULT_USER_COPROCESSORS_ENABLED = true; -081 -082 private static final Log LOG = LogFactory.getLog(CoprocessorHost.class); -083 protected Abortable abortable; -084 /** Ordered set of loaded coprocessors with lock */ -085 protected final SortedListE coprocEnvironments = -086 new SortedList(new EnvironmentPriorityComparator()); -087 protected Configuration conf; -088 // unique file prefix to use for local copies of jars when classloading -089 protected String pathPrefix; -090 protected AtomicInteger loadSequence = new AtomicInteger(); -091 -092 public CoprocessorHost(Abortable abortable) { -093this.abortable = abortable; -094this.pathPrefix = UUID.randomUUID().toString(); -095 } -096 -097 /** -098 * Not to be confused with the per-object _coprocessors_ (above), -099 * coprocessorNames is static and stores the set of all coprocessors ever -100 * loaded by any thread in this JVM. It is strictly additive: coprocessors are -101 * added to coprocessorNames, by checkAndLoadInstance() but are never removed, since -102 * the intention is to preserve a history of all loaded coprocessors for -103 * diagnosis in case of server crash (HBASE-4014). -104 */ -105 private static SetString coprocessorNames = -106 Collections.synchronizedSet(new HashSetString()); -107 -108 public static SetString getLoadedCoprocessors() { -109synchronized (coprocessorNames) { -110 return new HashSet(coprocessorNames); -111} -112 } -113 -114 /** -115 * Used to create a parameter to the HServerLoad constructor so that -116 * HServerLoad can provide information about the coprocessors loaded by this -117 * regionserver. -118 * (HBASE-4070: Improve region server metrics to report loaded coprocessors -119 * to master). -120 */ -121 public SetString getCoprocessors() { -122SetString returnValue = new TreeSet(); -123for (E e: coprocEnvironments) { -124 returnValue.add(e.getInstance().getClass().getSimpleName()); -125} -126return returnValue; -127 } -128 -129 /** -130 * Load system coprocessors once only. Read the class names from configuration. -131 * Called by constructor. -132 */ -133 protected void loadSystemCoprocessors(Configuration conf, String confKey) { -134boolean coprocessorsEnabled = conf.getBoolean(COPROCESSORS_ENABLED_CONF_KEY, -135 DEFAULT_COPROCESSORS_ENABLED); -136if (!coprocessorsEnabled) { -137 return; -138} -139 -140Class? implClass; -141 -142// load default coprocessors from configure file -143String[] defaultCPClasses = conf.getStrings(confKey); -144if (defaultCPClasses == null || defaultCPClasses.length == 0) -145 return; -146 -147int priority = Coprocessor.PRIORITY_SYSTEM; -148for (String className : defaultCPClasses) { -149 className = className.trim(); -150 if
[27/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5a2158f2/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/Export.PrivilegedWriter.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/Export.PrivilegedWriter.html b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/Export.PrivilegedWriter.html index 95f6ef8..45f0981 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/Export.PrivilegedWriter.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/Export.PrivilegedWriter.html @@ -61,487 +61,488 @@ 053import org.apache.hadoop.hbase.protobuf.ProtobufUtil; 054import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.DelegationToken; 055import org.apache.hadoop.hbase.protobuf.generated.ExportProtos; -056import org.apache.hadoop.hbase.regionserver.InternalScanner; -057import org.apache.hadoop.hbase.regionserver.Region; -058import org.apache.hadoop.hbase.regionserver.RegionScanner; -059import org.apache.hadoop.hbase.security.User; -060import org.apache.hadoop.hbase.security.UserProvider; -061import org.apache.hadoop.hbase.security.token.FsDelegationToken; -062import org.apache.hadoop.hbase.util.ArrayUtils; -063import org.apache.hadoop.hbase.util.ByteStringer; -064import org.apache.hadoop.hbase.util.Bytes; -065import org.apache.hadoop.hbase.util.Triple; -066import org.apache.hadoop.io.SequenceFile; -067import org.apache.hadoop.io.Text; -068import org.apache.hadoop.io.compress.CompressionCodec; -069import org.apache.hadoop.io.compress.DefaultCodec; -070import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; -071import org.apache.hadoop.security.token.Token; -072import org.apache.hadoop.util.GenericOptionsParser; -073import org.apache.hadoop.util.ReflectionUtils; -074import org.apache.yetus.audience.InterfaceAudience; -075import org.apache.yetus.audience.InterfaceStability; -076 -077import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting; -078 -079import com.google.protobuf.RpcCallback; -080import com.google.protobuf.RpcController; -081import com.google.protobuf.Service; -082 -083/** -084 * Export an HBase table. Writes content to sequence files up in HDFS. Use -085 * {@link Import} to read it back in again. It is implemented by the endpoint -086 * technique. -087 * -088 * @see org.apache.hadoop.hbase.mapreduce.Export -089 */ -090@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) -091@InterfaceStability.Evolving -092public class Export extends ExportProtos.ExportService implements RegionCoprocessor { -093 -094 private static final Log LOG = LogFactory.getLog(Export.class); -095 private static final Class? extends CompressionCodec DEFAULT_CODEC = DefaultCodec.class; -096 private static final SequenceFile.CompressionType DEFAULT_TYPE = SequenceFile.CompressionType.RECORD; -097 private RegionCoprocessorEnvironment env = null; -098 private UserProvider userProvider; -099 -100 public static void main(String[] args) throws Throwable { -101Mapbyte[], Response response = run(HBaseConfiguration.create(), args); -102System.exit(response == null ? -1 : 0); -103 } -104 -105 @VisibleForTesting -106 static Mapbyte[], Response run(final Configuration conf, final String[] args) throws Throwable { -107String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs(); -108if (!ExportUtils.isValidArguements(args)) { -109 ExportUtils.usage("Wrong number of arguments: " + ArrayUtils.length(otherArgs)); -110 return null; -111} -112TripleTableName, Scan, Path arguments = ExportUtils.getArgumentsFromCommandLine(conf, otherArgs); -113return run(conf, arguments.getFirst(), arguments.getSecond(), arguments.getThird()); -114 } -115 -116 public static Mapbyte[], Response run(final Configuration conf, TableName tableName, Scan scan, Path dir) throws Throwable { -117FileSystem fs = dir.getFileSystem(conf); -118UserProvider userProvider = UserProvider.instantiate(conf); -119checkDir(fs, dir); -120FsDelegationToken fsDelegationToken = new FsDelegationToken(userProvider, "renewer"); -121 fsDelegationToken.acquireDelegationToken(fs); -122try { -123 final ExportProtos.ExportRequest request = getConfiguredRequest(conf, dir, -124scan, fsDelegationToken.getUserToken()); -125 try (Connection con = ConnectionFactory.createConnection(conf); -126 Table table = con.getTable(tableName)) { -127Mapbyte[], Response result = new TreeMap(Bytes.BYTES_COMPARATOR); -128 table.coprocessorService(ExportProtos.ExportService.class, -129 scan.getStartRow(), -130 scan.getStopRow(), -131 (ExportProtos.ExportService service) - { -132ServerRpcController controller = new ServerRpcController(); -133Mapbyte[], ExportProtos.ExportResponse rval = new
hbase git commit: HBASE-18954 Make *CoprocessorHost classes private.
Repository: hbase Updated Branches: refs/heads/branch-2 d2e4e0e1f -> e04b15c68 HBASE-18954 Make *CoprocessorHost classes private. Change-Id: I89fded0f74ad83c9bcc2a2b2de925f56aed4e11b Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e04b15c6 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e04b15c6 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e04b15c6 Branch: refs/heads/branch-2 Commit: e04b15c68534500eb7af655a39fd5ae1dec3b3d2 Parents: d2e4e0e Author: Apekshit SharmaAuthored: Fri Oct 6 16:10:51 2017 -0700 Committer: Apekshit Sharma Committed: Sun Oct 15 04:07:24 2017 -0700 -- .../apache/hadoop/hbase/coprocessor/Export.java | 5 +++-- .../security/access/SecureBulkLoadEndpoint.java | 4 ++-- .../hbase/coprocessor/TestClassLoading.java | 20 ++-- .../hbase/coprocessor/CoprocessorHost.java | 3 +-- .../hadoop/hbase/regionserver/HRegion.java | 1 - .../hbase/regionserver/HRegionServer.java | 4 ++-- .../hadoop/hbase/regionserver/HStore.java | 1 - .../hadoop/hbase/regionserver/Region.java | 4 .../regionserver/RegionCoprocessorHost.java | 3 +-- .../RegionServerCoprocessorHost.java| 3 +-- .../regionserver/SecureBulkLoadManager.java | 4 ++-- .../apache/hadoop/hbase/regionserver/Store.java | 2 -- .../hbase/client/TestFromClientSide3.java | 5 ++--- .../TestRegionObserverScannerOpenHook.java | 2 +- .../hbase/security/access/SecureTestUtil.java | 4 ++-- .../security/access/TestAccessController.java | 6 +++--- .../security/access/TestAccessController3.java | 4 ++-- .../access/TestWithDisabledAuthorization.java | 4 ++-- 18 files changed, 38 insertions(+), 41 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/e04b15c6/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/Export.java -- diff --git a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/Export.java b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/Export.java index a00af0f..f1ee4f2 100644 --- a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/Export.java +++ b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/Export.java @@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.mapreduce.ResultSerialization; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.DelegationToken; import org.apache.hadoop.hbase.protobuf.generated.ExportProtos; +import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.RegionScanner; @@ -377,10 +378,10 @@ public class Export extends ExportProtos.ExportService implements RegionCoproces private static class ScanCoprocessor { -private final Region region; +private final HRegion region; ScanCoprocessor(final Region region) { - this.region = region; + this.region = (HRegion) region; } RegionScanner checkScannerOpen(final Scan scan) throws IOException { http://git-wip-us.apache.org/repos/asf/hbase/blob/e04b15c6/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java -- diff --git a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java index 8b579bf..f9798aa 100644 --- a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java +++ b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java @@ -84,7 +84,7 @@ public class SecureBulkLoadEndpoint extends SecureBulkLoadService implements Reg try { SecureBulkLoadManager secureBulkLoadManager = this.rsServices.getSecureBulkLoadManager(); - String bulkToken = secureBulkLoadManager.prepareBulkLoad(this.env.getRegion(), + String bulkToken = secureBulkLoadManager.prepareBulkLoad((HRegion) this.env.getRegion(), convert(request)); done.run(PrepareBulkLoadResponse.newBuilder().setBulkToken(bulkToken).build()); } catch (IOException e) { @@ -110,7 +110,7 @@ public class SecureBulkLoadEndpoint extends SecureBulkLoadService implements Reg RpcCallback done) { try { SecureBulkLoadManager secureBulkLoadManager = this.rsServices.getSecureBulkLoadManager(); -
hbase git commit: HBASE-18954 Make *CoprocessorHost classes private.
Repository: hbase Updated Branches: refs/heads/master 240b4b16f -> 202e414eb HBASE-18954 Make *CoprocessorHost classes private. Change-Id: I89fded0f74ad83c9bcc2a2b2de925f56aed4e11b Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/202e414e Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/202e414e Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/202e414e Branch: refs/heads/master Commit: 202e414eb2e9bf9c825b6ac64a9c2ae50e1dcf5d Parents: 240b4b1 Author: Apekshit SharmaAuthored: Fri Oct 6 16:10:51 2017 -0700 Committer: Apekshit Sharma Committed: Sun Oct 15 04:03:12 2017 -0700 -- .../apache/hadoop/hbase/coprocessor/Export.java | 5 +++-- .../security/access/SecureBulkLoadEndpoint.java | 4 ++-- .../hbase/coprocessor/TestClassLoading.java | 20 ++-- .../hbase/coprocessor/CoprocessorHost.java | 3 +-- .../hadoop/hbase/regionserver/HRegion.java | 1 - .../hbase/regionserver/HRegionServer.java | 4 ++-- .../hadoop/hbase/regionserver/HStore.java | 1 - .../hadoop/hbase/regionserver/Region.java | 4 .../regionserver/RegionCoprocessorHost.java | 3 +-- .../RegionServerCoprocessorHost.java| 3 +-- .../regionserver/SecureBulkLoadManager.java | 4 ++-- .../apache/hadoop/hbase/regionserver/Store.java | 2 -- .../hbase/client/TestFromClientSide3.java | 5 ++--- .../TestRegionObserverScannerOpenHook.java | 2 +- .../hbase/security/access/SecureTestUtil.java | 4 ++-- .../security/access/TestAccessController.java | 6 +++--- .../security/access/TestAccessController3.java | 4 ++-- .../access/TestWithDisabledAuthorization.java | 4 ++-- 18 files changed, 38 insertions(+), 41 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/202e414e/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/Export.java -- diff --git a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/Export.java b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/Export.java index a00af0f..f1ee4f2 100644 --- a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/Export.java +++ b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/Export.java @@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.mapreduce.ResultSerialization; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.DelegationToken; import org.apache.hadoop.hbase.protobuf.generated.ExportProtos; +import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.RegionScanner; @@ -377,10 +378,10 @@ public class Export extends ExportProtos.ExportService implements RegionCoproces private static class ScanCoprocessor { -private final Region region; +private final HRegion region; ScanCoprocessor(final Region region) { - this.region = region; + this.region = (HRegion) region; } RegionScanner checkScannerOpen(final Scan scan) throws IOException { http://git-wip-us.apache.org/repos/asf/hbase/blob/202e414e/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java -- diff --git a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java index 8b579bf..f9798aa 100644 --- a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java +++ b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java @@ -84,7 +84,7 @@ public class SecureBulkLoadEndpoint extends SecureBulkLoadService implements Reg try { SecureBulkLoadManager secureBulkLoadManager = this.rsServices.getSecureBulkLoadManager(); - String bulkToken = secureBulkLoadManager.prepareBulkLoad(this.env.getRegion(), + String bulkToken = secureBulkLoadManager.prepareBulkLoad((HRegion) this.env.getRegion(), convert(request)); done.run(PrepareBulkLoadResponse.newBuilder().setBulkToken(bulkToken).build()); } catch (IOException e) { @@ -110,7 +110,7 @@ public class SecureBulkLoadEndpoint extends SecureBulkLoadService implements Reg RpcCallback done) { try { SecureBulkLoadManager secureBulkLoadManager = this.rsServices.getSecureBulkLoadManager(); -