hbase git commit: HBASE-21566 Release notes and changes for 2.0.4RC0 and 2.1.2RC0
Repository: hbase Updated Branches: refs/heads/branch-2.1 c9e85773d -> 434bd0cd9 HBASE-21566 Release notes and changes for 2.0.4RC0 and 2.1.2RC0 Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/434bd0cd Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/434bd0cd Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/434bd0cd Branch: refs/heads/branch-2.1 Commit: 434bd0cd91d08353a8e7207ced530df3b3b1af76 Parents: c9e8577 Author: stack Authored: Thu Dec 6 21:54:17 2018 -0800 Committer: stack Committed: Thu Dec 6 21:54:17 2018 -0800 -- CHANGES.md | 72 RELEASENOTES.md | 67 +++- 2 files changed, 138 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/434bd0cd/CHANGES.md -- diff --git a/CHANGES.md b/CHANGES.md index 643c928..06169ad 100755 --- a/CHANGES.md +++ b/CHANGES.md @@ -18,6 +18,78 @@ # limitations under the License. --> +## Release 2.1.2 - Unreleased (as of 2018-12-07) + +### IMPROVEMENTS: + +| JIRA | Summary | Priority | Component | +|: |: | :--- |: | +| [HBASE-21413](https://issues.apache.org/jira/browse/HBASE-21413) | Empty meta log doesn't get split when restart whole cluster | Major | . | +| [HBASE-21524](https://issues.apache.org/jira/browse/HBASE-21524) | Unnecessary DEBUG log in ConnectionImplementation#isTableEnabled | Major | Client | +| [HBASE-21511](https://issues.apache.org/jira/browse/HBASE-21511) | Remove in progress snapshot check in SnapshotFileCache#getUnreferencedFiles | Minor | . | +| [HBASE-21480](https://issues.apache.org/jira/browse/HBASE-21480) | Taking snapshot when RS crashes prevent we bring the regions online | Major | snapshots | +| [HBASE-21485](https://issues.apache.org/jira/browse/HBASE-21485) | Add more debug logs for remote procedure execution | Major | proc-v2 | +| [HBASE-21388](https://issues.apache.org/jira/browse/HBASE-21388) | No need to instantiate MemStoreLAB for master which not carry table | Major | . | + + +### BUG FIXES: + +| JIRA | Summary | Priority | Component | +|: |: | :--- |: | +| [HBASE-21559](https://issues.apache.org/jira/browse/HBASE-21559) | The RestoreSnapshotFromClientTestBase related UT are flaky | Major | . | +| [HBASE-21551](https://issues.apache.org/jira/browse/HBASE-21551) | Memory leak when use scan with STREAM at server side | Blocker | regionserver | +| [HBASE-21479](https://issues.apache.org/jira/browse/HBASE-21479) | Individual tests in TestHRegionReplayEvents class are failing | Major | . | +| [HBASE-21518](https://issues.apache.org/jira/browse/HBASE-21518) | TestMasterFailoverWithProcedures is flaky | Major | . | +| [HBASE-21504](https://issues.apache.org/jira/browse/HBASE-21504) | If enable FIFOCompactionPolicy, a compaction may write a "empty" hfile whose maxTimeStamp is long max. This kind of hfile will never be archived. | Critical | Compaction | +| [HBASE-21300](https://issues.apache.org/jira/browse/HBASE-21300) | Fix the wrong reference file path when restoring snapshots for tables with MOB columns | Major | . | +| [HBASE-21492](https://issues.apache.org/jira/browse/HBASE-21492) | CellCodec Written To WAL Before It's Verified | Critical | wal | +| [HBASE-21507](https://issues.apache.org/jira/browse/HBASE-21507) | Compaction failed when execute AbstractMultiFileWriter.beforeShipped() method | Major | regionserver | +| [HBASE-21387](https://issues.apache.org/jira/browse/HBASE-21387) | Race condition surrounding in progress snapshot handling in snapshot cache leads to loss of snapshot files | Major | snapshots | +| [HBASE-21503](https://issues.apache.org/jira/browse/HBASE-21503) | Replication normal source can get stuck due potential race conditions between source wal reader and wal provider initialization threads. | Blocker | Replication | +| [HBASE-21440](https://issues.apache.org/jira/browse/HBASE-21440) | Assign procedure on the crashed server is not properly interrupted | Major | . | +| [HBASE-21468](https://issues.apache.org/jira/browse/HBASE-21468) | separate workers for meta table is not working | Major | . | +| [HBASE-21445](https://issues.apache.org/jira/browse/HBASE-21445) | CopyTable by bulkload will write hfile into yarn's HDFS | Major | mapreduce | +| [HBASE-21437](https://issues.apache.org/jira/browse/HBASE-21437) | Bypassed procedure throw IllegalArgumentException when its state is WAITING\_TIMEOUT | Major | . | +| [HBASE-21439](https://issues.apache.org/jira/browse/HBASE-21439) | StochasticLoadBalancer RegionLoads arenât being used in RegionLoad cost functions | Major | Balancer | +| [HBASE-20604](https://issues.apac
hbase git commit: HBASE-21566 Release notes and changes for 2.0.4RC0 and 2.1.1RC0
Repository: hbase Updated Branches: refs/heads/branch-2.0 790e880e0 -> 909782156 HBASE-21566 Release notes and changes for 2.0.4RC0 and 2.1.1RC0 Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/90978215 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/90978215 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/90978215 Branch: refs/heads/branch-2.0 Commit: 9097821560f630dff0fb9df4b0c589ad2acb8016 Parents: 790e880 Author: stack Authored: Thu Dec 6 21:49:17 2018 -0800 Committer: stack Committed: Thu Dec 6 21:49:17 2018 -0800 -- CHANGES.md | 40 ++- RELEASENOTES.md | 53 +++- 2 files changed, 91 insertions(+), 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/90978215/CHANGES.md -- diff --git a/CHANGES.md b/CHANGES.md index 7d9607d..df80826 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -41,7 +41,45 @@ may have to bulk import old-style CHANGES.txt on to the end in a code comment to preserve continuity of the CHANGELOG. --> -## Release 2.0.3 - Unreleased (as of 2018-11-19) +## Release 2.0.4 - Unreleased (as of 2018-12-07) + +### IMPROVEMENTS: + +| JIRA | Summary | Priority | Component | +|: |: | :--- |: | +| [HBASE-21413](https://issues.apache.org/jira/browse/HBASE-21413) | Empty meta log doesn't get split when restart whole cluster | Major | . | +| [HBASE-21146](https://issues.apache.org/jira/browse/HBASE-21146) | (2.0) Add ability for HBase Canary to ignore a configurable number of ZooKeeper down nodes | Minor | canary, Zookeeper | + + +### BUG FIXES: + +| JIRA | Summary | Priority | Component | +|: |: | :--- |: | +| [HBASE-21559](https://issues.apache.org/jira/browse/HBASE-21559) | The RestoreSnapshotFromClientTestBase related UT are flaky | Major | . | +| [HBASE-21551](https://issues.apache.org/jira/browse/HBASE-21551) | Memory leak when use scan with STREAM at server side | Blocker | regionserver | +| [HBASE-21544](https://issues.apache.org/jira/browse/HBASE-21544) | Backport HBASE-20734 Colocate recovered edits directory with hbase.wal.dir | Major | wal | +| [HBASE-21518](https://issues.apache.org/jira/browse/HBASE-21518) | TestMasterFailoverWithProcedures is flaky | Major | . | +| [HBASE-21504](https://issues.apache.org/jira/browse/HBASE-21504) | If enable FIFOCompactionPolicy, a compaction may write a "empty" hfile whose maxTimeStamp is long max. This kind of hfile will never be archived. | Critical | Compaction | +| [HBASE-21492](https://issues.apache.org/jira/browse/HBASE-21492) | CellCodec Written To WAL Before It's Verified | Critical | wal | +| [HBASE-21507](https://issues.apache.org/jira/browse/HBASE-21507) | Compaction failed when execute AbstractMultiFileWriter.beforeShipped() method | Major | regionserver | + + +### SUB-TASKS: + +| JIRA | Summary | Priority | Component | +|: |: | :--- |: | +| [HBASE-21557](https://issues.apache.org/jira/browse/HBASE-21557) | Set version to 2.0.4 on branch-2.0 so can cut an RC | Major | release | + + +### OTHER: + +| JIRA | Summary | Priority | Component | +|: |: | :--- |: | +| [HBASE-21495](https://issues.apache.org/jira/browse/HBASE-21495) | Create 2.0.3 Release | Major | . | +| [HBASE-21517](https://issues.apache.org/jira/browse/HBASE-21517) | Move the getTableRegionForRow method from HMaster to TestMaster | Major | test | + + +## Release 2.0.3 - Released 2018-12-02 ### INCOMPATIBLE CHANGES: http://git-wip-us.apache.org/repos/asf/hbase/blob/90978215/RELEASENOTES.md -- diff --git a/RELEASENOTES.md b/RELEASENOTES.md index 0809957..1514864 100644 --- a/RELEASENOTES.md +++ b/RELEASENOTES.md @@ -1,4 +1,4 @@ -# HBASE 2.0.3 Release Notes +# HBASE 2.0.4 Release Notes +These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements. + + +--- + +* [HBASE-21551](https://issues.apache.org/jira/browse/HBASE-21551) | *Blocker* | **Memory leak when use scan with STREAM at server side** + + +### Summary +HBase clusters will experience Region Server failures due to out of memory errors due to a leak given any of the following: + +* User initiates Scan operations set to use the STREAM reading type +* User initiates Scan operations set to use the default reading type that read more than 4 * the block size of column families involved in the scan (e.g. by default 4*64KiB) +* Compactions run + +### Root cause + +When there are long running scans the Region Server process attempts to optimize access by using a different API geared towards sequential access. Du
hbase git commit: HBASE-21554 Show replication endpoint classname for replication peer on master web UI
Repository: hbase Updated Branches: refs/heads/master e0e0694fa -> 8d7061a48 HBASE-21554 Show replication endpoint classname for replication peer on master web UI Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8d7061a4 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8d7061a4 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8d7061a4 Branch: refs/heads/master Commit: 8d7061a487357344f10ee260979cc2c47cd833dd Parents: e0e0694 Author: Guanghao Zhang Authored: Thu Dec 6 10:54:14 2018 +0800 Committer: Guanghao Zhang Committed: Fri Dec 7 13:31:59 2018 +0800 -- .../org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon | 2 ++ 1 file changed, 2 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/8d7061a4/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon -- diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon index 7bb6c40..da44052 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon @@ -621,6 +621,7 @@ AssignmentManager assignmentManager = master.getAssignmentManager(); Peer Id Cluster Key +Endpoint State IsSerial Remote WAL @@ -641,6 +642,7 @@ AssignmentManager assignmentManager = master.getAssignmentManager(); <% peerId %> <% peerConfig.getClusterKey() %> +<% peerConfig.getReplicationEndpointImpl() %> <% peer.isEnabled() ? "ENABLED" : "DISABLED" %> <% peerConfig.isSerial() %> <% peerConfig.getRemoteWALDir() == null ? "" : peerConfig.getRemoteWALDir() %>
hbase git commit: HBASE-21413 Empty meta log doesn't get split when restart whole cluster Signed-off-by: stack
Repository: hbase Updated Branches: refs/heads/master 1e65bd5cf -> e0e0694fa HBASE-21413 Empty meta log doesn't get split when restart whole cluster Signed-off-by: stack Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e0e0694f Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e0e0694f Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e0e0694f Branch: refs/heads/master Commit: e0e0694fa18372e6551420687bcf28ffe0bbc1f8 Parents: 1e65bd5 Author: Allan Yang Authored: Thu Dec 6 21:13:03 2018 -0800 Committer: stack Committed: Thu Dec 6 21:21:41 2018 -0800 -- .../hadoop/hbase/master/MasterWalManager.java | 40 ++ .../master/procedure/ServerCrashProcedure.java | 3 + .../hbase/regionserver/TestCleanupMetaWAL.java | 83 3 files changed, 126 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/e0e0694f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java index 2b1a81f..5ab1c28 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java @@ -35,6 +35,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; @@ -356,4 +357,43 @@ public class MasterWalManager { } } } + + /** + * For meta region open and closed normally on a server, it may leave some meta + * WAL in the server's wal dir. Since meta region is no long on this server, + * The SCP won't split those meta wals, just leaving them there. So deleting + * the wal dir will fail since the dir is not empty. Actually We can safely achive those + * meta log and Archiving the meta log and delete the dir. + * @param serverName the server to archive meta log + */ + public void archiveMetaLog(final ServerName serverName) { +try { + Path logDir = new Path(this.rootDir, + AbstractFSWALProvider.getWALDirectoryName(serverName.toString())); + Path splitDir = logDir.suffix(AbstractFSWALProvider.SPLITTING_EXT); + if (fs.exists(splitDir)) { +FileStatus[] logfiles = FSUtils.listStatus(fs, splitDir, META_FILTER); +if (logfiles != null) { + for (FileStatus status : logfiles) { +if (!status.isDir()) { + Path newPath = AbstractFSWAL.getWALArchivePath(this.oldLogDir, + status.getPath()); + if (!FSUtils.renameAndSetModifyTime(fs, status.getPath(), newPath)) { +LOG.warn("Unable to move " + status.getPath() + " to " + newPath); + } else { +LOG.debug("Archived meta log " + status.getPath() + " to " + newPath); + } +} + } +} +if (!fs.delete(splitDir, false)) { + LOG.warn("Unable to delete log dir. Ignoring. " + splitDir); +} + } +} catch (IOException ie) { + LOG.warn("Failed archiving meta log for server " + serverName, ie); +} + } + + } http://git-wip-us.apache.org/repos/asf/hbase/blob/e0e0694f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java index 048bca8..b93f8fa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java @@ -221,6 +221,9 @@ public class ServerCrashProcedure // PROBLEM!!! WE BLOCK HERE. am.getRegionStates().logSplitting(this.serverName); mwm.splitLog(this.serverName); +if (!carryingMeta) { + mwm.archiveMetaLog(this.serverName); +} am.getRegionStates().logSplit(this.serverName); LOG.debug("Done splitting WALs {}", this); } http://git-wip-us.apache.org/repos/asf/hbase/blob/e0e0694f/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCleanupMetaWAL.java --
[2/2] hbase git commit: HBASE-21413 Empty meta log doesn't get split when restart whole cluster Signed-off-by: stack
HBASE-21413 Empty meta log doesn't get split when restart whole cluster Signed-off-by: stack Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f86d311f Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f86d311f Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f86d311f Branch: refs/heads/branch-2 Commit: f86d311f760241e0411ce91c094c277ed2e6628e Parents: 2ebf5ab Author: Allan Yang Authored: Thu Dec 6 21:13:03 2018 -0800 Committer: stack Committed: Thu Dec 6 21:20:55 2018 -0800 -- .../hadoop/hbase/master/MasterWalManager.java | 40 ++ .../master/procedure/ServerCrashProcedure.java | 3 + .../hbase/regionserver/TestCleanupMetaWAL.java | 83 3 files changed, 126 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/f86d311f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java index 2b1a81f..5ab1c28 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java @@ -35,6 +35,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; @@ -356,4 +357,43 @@ public class MasterWalManager { } } } + + /** + * For meta region open and closed normally on a server, it may leave some meta + * WAL in the server's wal dir. Since meta region is no long on this server, + * The SCP won't split those meta wals, just leaving them there. So deleting + * the wal dir will fail since the dir is not empty. Actually We can safely achive those + * meta log and Archiving the meta log and delete the dir. + * @param serverName the server to archive meta log + */ + public void archiveMetaLog(final ServerName serverName) { +try { + Path logDir = new Path(this.rootDir, + AbstractFSWALProvider.getWALDirectoryName(serverName.toString())); + Path splitDir = logDir.suffix(AbstractFSWALProvider.SPLITTING_EXT); + if (fs.exists(splitDir)) { +FileStatus[] logfiles = FSUtils.listStatus(fs, splitDir, META_FILTER); +if (logfiles != null) { + for (FileStatus status : logfiles) { +if (!status.isDir()) { + Path newPath = AbstractFSWAL.getWALArchivePath(this.oldLogDir, + status.getPath()); + if (!FSUtils.renameAndSetModifyTime(fs, status.getPath(), newPath)) { +LOG.warn("Unable to move " + status.getPath() + " to " + newPath); + } else { +LOG.debug("Archived meta log " + status.getPath() + " to " + newPath); + } +} + } +} +if (!fs.delete(splitDir, false)) { + LOG.warn("Unable to delete log dir. Ignoring. " + splitDir); +} + } +} catch (IOException ie) { + LOG.warn("Failed archiving meta log for server " + serverName, ie); +} + } + + } http://git-wip-us.apache.org/repos/asf/hbase/blob/f86d311f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java index 048bca8..b93f8fa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java @@ -221,6 +221,9 @@ public class ServerCrashProcedure // PROBLEM!!! WE BLOCK HERE. am.getRegionStates().logSplitting(this.serverName); mwm.splitLog(this.serverName); +if (!carryingMeta) { + mwm.archiveMetaLog(this.serverName); +} am.getRegionStates().logSplit(this.serverName); LOG.debug("Done splitting WALs {}", this); } http://git-wip-us.apache.org/repos/asf/hbase/blob/f86d311f/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCleanupMetaWAL.java -- diff --git a/hbase-server/src/test/java/o
[1/2] hbase git commit: Backport "HBASE-21126 Add ability for HBase Canary to ignore a configurable number of ZooKeeper down nodes" to branch-2.0
Repository: hbase Updated Branches: refs/heads/branch-2 5cb8c3e9c -> f86d311f7 Backport "HBASE-21126 Add ability for HBase Canary to ignore a configurable number of ZooKeeper down nodes" to branch-2.0 Signed-off-by: Michael Stack Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2ebf5ab0 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2ebf5ab0 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2ebf5ab0 Branch: refs/heads/branch-2 Commit: 2ebf5ab02fda95fde61341db37fede89b0416130 Parents: 5cb8c3e Author: David Manning Authored: Wed Aug 29 12:06:59 2018 -0700 Committer: stack Committed: Thu Dec 6 21:20:51 2018 -0800 -- .../main/java/org/apache/hadoop/hbase/tool/Canary.java | 11 +-- 1 file changed, 5 insertions(+), 6 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/2ebf5ab0/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java index 40f4aa6..71af23e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java @@ -599,7 +599,6 @@ public final class Canary implements Tool { * True if we are to run in zookeeper 'mode'. */ private boolean zookeeperMode = false; - private long permittedFailures = 0; private boolean regionServerAllRegions = false; private boolean writeSniffing = false; @@ -892,6 +891,8 @@ public final class Canary implements Tool { "random one."); System.err.println(" -zookeeper set 'zookeeper mode'; grab zookeeper.znode.parent on " + "each ensemble member"); +System.err.println(" -permittedZookeeperFailures Ignore first N failures when attempting to " + +"connect to individual zookeeper nodes in the ensemble"); System.err.println(" -daemon continuous check at defined intervals."); System.err.println(" -intervalinterval between checks in seconds"); System.err.println(" -e consider table/regionserver argument as regular " + @@ -957,8 +958,7 @@ public final class Canary implements Tool { monitor = new ZookeeperMonitor(connection, monitorTargets, this.useRegExp, getSink(connection.getConfiguration(), ZookeeperStdOutSink.class), - this.executor, this.treatFailureAsError, - this.permittedFailures); + this.executor, this.treatFailureAsError, this.permittedFailures); } else { monitor = new RegionMonitor(connection, monitorTargets, this.useRegExp, @@ -1078,10 +1078,9 @@ public final class Canary implements Tool { public RegionMonitor(Connection connection, String[] monitorTargets, boolean useRegExp, Sink sink, ExecutorService executor, boolean writeSniffing, TableName writeTableName, boolean treatFailureAsError, HashMap configuredReadTableTimeouts, -long configuredWriteTableTimeout, -long allowedFailures) { +long configuredWriteTableTimeout, long allowedFailures) { super(connection, monitorTargets, useRegExp, sink, executor, treatFailureAsError, - allowedFailures); + allowedFailures); Configuration conf = connection.getConfiguration(); this.writeSniffing = writeSniffing; this.writeTableName = writeTableName;
hbase git commit: HBASE-21413 Empty meta log doesn't get split when restart whole cluster Signed-off-by: stack
Repository: hbase Updated Branches: refs/heads/branch-2.0 fc8a50df1 -> 790e880e0 HBASE-21413 Empty meta log doesn't get split when restart whole cluster Signed-off-by: stack Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/790e880e Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/790e880e Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/790e880e Branch: refs/heads/branch-2.0 Commit: 790e880e01351b45d58299f6a128ce1a34fe48f8 Parents: fc8a50d Author: stack Authored: Thu Dec 6 21:17:28 2018 -0800 Committer: stack Committed: Thu Dec 6 21:17:28 2018 -0800 -- .../hadoop/hbase/master/MasterWalManager.java | 40 ++ .../master/procedure/ServerCrashProcedure.java | 3 + .../hbase/regionserver/TestCleanupMetaWAL.java | 83 3 files changed, 126 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/790e880e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java index 21112a1..7ba5709 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java @@ -35,6 +35,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; @@ -408,4 +409,43 @@ public class MasterWalManager { } } } + + /** + * For meta region open and closed normally on a server, it may leave some meta + * WAL in the server's wal dir. Since meta region is no long on this server, + * The SCP won't split those meta wals, just leaving them there. So deleting + * the wal dir will fail since the dir is not empty. Actually We can safely achive those + * meta log and Archiving the meta log and delete the dir. + * @param serverName the server to archive meta log + */ + public void archiveMetaLog(final ServerName serverName) { +try { + Path logDir = new Path(this.rootDir, + AbstractFSWALProvider.getWALDirectoryName(serverName.toString())); + Path splitDir = logDir.suffix(AbstractFSWALProvider.SPLITTING_EXT); + if (fs.exists(splitDir)) { +FileStatus[] logfiles = FSUtils.listStatus(fs, splitDir, META_FILTER); +if (logfiles != null) { + for (FileStatus status : logfiles) { +if (!status.isDir()) { + Path newPath = AbstractFSWAL.getWALArchivePath(this.oldLogDir, + status.getPath()); + if (!FSUtils.renameAndSetModifyTime(fs, status.getPath(), newPath)) { +LOG.warn("Unable to move " + status.getPath() + " to " + newPath); + } else { +LOG.debug("Archived meta log " + status.getPath() + " to " + newPath); + } +} + } +} +if (!fs.delete(splitDir, false)) { + LOG.warn("Unable to delete log dir. Ignoring. " + splitDir); +} + } +} catch (IOException ie) { + LOG.warn("Failed archiving meta log for server " + serverName, ie); +} + } + + } http://git-wip-us.apache.org/repos/asf/hbase/blob/790e880e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java index 9fe5545..c929028 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java @@ -251,6 +251,9 @@ public class ServerCrashProcedure // PROBLEM!!! WE BLOCK HERE. am.getRegionStates().logSplitting(this.serverName); mwm.splitLog(this.serverName); +if (!carryingMeta) { + mwm.archiveMetaLog(this.serverName); +} am.getRegionStates().logSplit(this.serverName); LOG.debug("Done splitting WALs {}", this); } http://git-wip-us.apache.org/repos/asf/hbase/blob/790e880e/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCleanupMetaWAL.java ---
hbase git commit: HBASE-21413 Empty meta log doesn't get split when restart whole cluster Signed-off-by: stack
Repository: hbase Updated Branches: refs/heads/branch-2.1 45324b6bb -> c9e85773d HBASE-21413 Empty meta log doesn't get split when restart whole cluster Signed-off-by: stack Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c9e85773 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c9e85773 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c9e85773 Branch: refs/heads/branch-2.1 Commit: c9e85773d9226caa48e327b17bd6e52344f83001 Parents: 45324b6 Author: Allan Yang Authored: Thu Dec 6 21:13:03 2018 -0800 Committer: stack Committed: Thu Dec 6 21:15:30 2018 -0800 -- .../hadoop/hbase/master/MasterWalManager.java | 40 ++ .../master/procedure/ServerCrashProcedure.java | 3 + .../hbase/regionserver/TestCleanupMetaWAL.java | 83 3 files changed, 126 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/c9e85773/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java index 21112a1..7ba5709 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java @@ -35,6 +35,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; @@ -408,4 +409,43 @@ public class MasterWalManager { } } } + + /** + * For meta region open and closed normally on a server, it may leave some meta + * WAL in the server's wal dir. Since meta region is no long on this server, + * The SCP won't split those meta wals, just leaving them there. So deleting + * the wal dir will fail since the dir is not empty. Actually We can safely achive those + * meta log and Archiving the meta log and delete the dir. + * @param serverName the server to archive meta log + */ + public void archiveMetaLog(final ServerName serverName) { +try { + Path logDir = new Path(this.rootDir, + AbstractFSWALProvider.getWALDirectoryName(serverName.toString())); + Path splitDir = logDir.suffix(AbstractFSWALProvider.SPLITTING_EXT); + if (fs.exists(splitDir)) { +FileStatus[] logfiles = FSUtils.listStatus(fs, splitDir, META_FILTER); +if (logfiles != null) { + for (FileStatus status : logfiles) { +if (!status.isDir()) { + Path newPath = AbstractFSWAL.getWALArchivePath(this.oldLogDir, + status.getPath()); + if (!FSUtils.renameAndSetModifyTime(fs, status.getPath(), newPath)) { +LOG.warn("Unable to move " + status.getPath() + " to " + newPath); + } else { +LOG.debug("Archived meta log " + status.getPath() + " to " + newPath); + } +} + } +} +if (!fs.delete(splitDir, false)) { + LOG.warn("Unable to delete log dir. Ignoring. " + splitDir); +} + } +} catch (IOException ie) { + LOG.warn("Failed archiving meta log for server " + serverName, ie); +} + } + + } http://git-wip-us.apache.org/repos/asf/hbase/blob/c9e85773/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java index 9fe5545..c929028 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java @@ -251,6 +251,9 @@ public class ServerCrashProcedure // PROBLEM!!! WE BLOCK HERE. am.getRegionStates().logSplitting(this.serverName); mwm.splitLog(this.serverName); +if (!carryingMeta) { + mwm.archiveMetaLog(this.serverName); +} am.getRegionStates().logSplit(this.serverName); LOG.debug("Done splitting WALs {}", this); } http://git-wip-us.apache.org/repos/asf/hbase/blob/c9e85773/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCleanupMetaWAL.java --
hbase git commit: HBASE-21549 Add shell command for serial replication peer
Repository: hbase Updated Branches: refs/heads/master dfb9ae8e0 -> 1e65bd5cf HBASE-21549 Add shell command for serial replication peer Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1e65bd5c Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1e65bd5c Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1e65bd5c Branch: refs/heads/master Commit: 1e65bd5cf96cf5affd446596ef10b1034e2e0a88 Parents: dfb9ae8 Author: Guanghao Zhang Authored: Wed Dec 5 18:05:03 2018 +0800 Committer: Guanghao Zhang Committed: Fri Dec 7 10:10:13 2018 +0800 -- .../src/main/ruby/hbase/replication_admin.rb| 5 + hbase-shell/src/main/ruby/hbase_constants.rb| 1 + .../src/main/ruby/shell/commands/add_peer.rb| 4 .../main/ruby/shell/commands/set_peer_serial.rb | 4 ++-- .../test/ruby/hbase/replication_admin_test.rb | 22 +++ src/main/asciidoc/_chapters/ops_mgt.adoc| 23 +++- 6 files changed, 56 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/1e65bd5c/hbase-shell/src/main/ruby/hbase/replication_admin.rb -- diff --git a/hbase-shell/src/main/ruby/hbase/replication_admin.rb b/hbase-shell/src/main/ruby/hbase/replication_admin.rb index 5f86365..c01b6ea 100644 --- a/hbase-shell/src/main/ruby/hbase/replication_admin.rb +++ b/hbase-shell/src/main/ruby/hbase/replication_admin.rb @@ -66,6 +66,7 @@ module Hbase namespaces = args.fetch(NAMESPACES, nil) peer_state = args.fetch(STATE, nil) remote_wal_dir = args.fetch(REMOTE_WAL_DIR, nil) +serial = args.fetch(SERIAL, nil) # Create and populate a ReplicationPeerConfig builder = ReplicationPeerConfig.newBuilder() @@ -79,6 +80,10 @@ module Hbase builder.setRemoteWALDir(remote_wal_dir) end +unless serial.nil? + builder.setSerial(serial) +end + unless config.nil? builder.putAllConfiguration(config) end http://git-wip-us.apache.org/repos/asf/hbase/blob/1e65bd5c/hbase-shell/src/main/ruby/hbase_constants.rb -- diff --git a/hbase-shell/src/main/ruby/hbase_constants.rb b/hbase-shell/src/main/ruby/hbase_constants.rb index 2870dfb..4c1ad22 100644 --- a/hbase-shell/src/main/ruby/hbase_constants.rb +++ b/hbase-shell/src/main/ruby/hbase_constants.rb @@ -78,6 +78,7 @@ module HBaseConstants ENDPOINT_CLASSNAME = 'ENDPOINT_CLASSNAME'.freeze CLUSTER_KEY = 'CLUSTER_KEY'.freeze REMOTE_WAL_DIR = 'REMOTE_WAL_DIR'.freeze + SERIAL = 'SERIAL'.freeze TABLE_CFS = 'TABLE_CFS'.freeze NAMESPACES = 'NAMESPACES'.freeze STATE = 'STATE'.freeze http://git-wip-us.apache.org/repos/asf/hbase/blob/1e65bd5c/hbase-shell/src/main/ruby/shell/commands/add_peer.rb -- diff --git a/hbase-shell/src/main/ruby/shell/commands/add_peer.rb b/hbase-shell/src/main/ruby/shell/commands/add_peer.rb index 4b6f294..9be42ac 100644 --- a/hbase-shell/src/main/ruby/shell/commands/add_peer.rb +++ b/hbase-shell/src/main/ruby/shell/commands/add_peer.rb @@ -34,6 +34,8 @@ An optional parameter for namespaces identifies which namespace's tables will be to the peer cluster. An optional parameter for table column families identifies which tables and/or column families will be replicated to the peer cluster. +An optional parameter for serial flag identifies whether or not the replication peer is a serial +replication peer. The default serial flag is false. Note: Set a namespace in the peer config means that all tables in this namespace will be replicated to the peer cluster. So if you already have set a namespace in peer config, @@ -50,6 +52,8 @@ Examples: NAMESPACES => ["ns1", "ns2", "ns3"] hbase> add_peer '2', CLUSTER_KEY => "zk1,zk2,zk3:2182:/hbase-prod", NAMESPACES => ["ns1", "ns2"], TABLE_CFS => { "ns3:table1" => [], "ns3:table2" => ["cf1"] } + hbase> add_peer '3', CLUSTER_KEY => "zk1,zk2,zk3:2182:/hbase-prod", +NAMESPACES => ["ns1", "ns2", "ns3"], SERIAL => true For a custom replication endpoint, the ENDPOINT_CLASSNAME can be provided. Two optional arguments are DATA and CONFIG which can be specified to set different either the peer_data or configuration http://git-wip-us.apache.org/repos/asf/hbase/blob/1e65bd5c/hbase-shell/src/main/ruby/shell/commands/set_peer_serial.rb -- diff --git a/hbase-shell/src/main/ruby/shell/commands/set_peer_serial.rb b/hbase-shell/src/main/ruby/shell/commands/set_peer_serial.rb index d556077..a6484cd 100644 --- a/hbase-shell/src/main/ruby/shell/commands/set_peer_
hbase git commit: HBASE-21559 The RestoreSnapshotFromClientTestBase related UT are flaky
Repository: hbase Updated Branches: refs/heads/branch-2.1 ec39dc8c1 -> 45324b6bb HBASE-21559 The RestoreSnapshotFromClientTestBase related UT are flaky Signed-off-by: zhangduo Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/45324b6b Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/45324b6b Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/45324b6b Branch: refs/heads/branch-2.1 Commit: 45324b6bb2fc8eee21630edfb40ddcfd11940aa7 Parents: ec39dc8 Author: huzheng Authored: Thu Dec 6 20:35:30 2018 +0800 Committer: zhangduo Committed: Fri Dec 7 08:26:02 2018 +0800 -- .../apache/hadoop/hbase/master/snapshot/SnapshotManager.java| 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/45324b6b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java index a486418..6bfa594 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java @@ -27,6 +27,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -150,7 +151,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable // The map is always accessed and modified under the object lock using synchronized. // snapshotTable() will insert an Handler in the table. // isSnapshotDone() will remove the handler requested if the operation is finished. - private Map snapshotHandlers = new HashMap<>(); + private Map snapshotHandlers = new ConcurrentHashMap<>(); // Restore map, with table name as key, procedure ID as value. // The map is always accessed and modified under the object lock using synchronized. @@ -419,7 +420,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable * @param tableName name of the table being snapshotted. * @return true if there is a snapshot in progress on the specified table. */ - public synchronized boolean isTakingSnapshot(final TableName tableName) { + public boolean isTakingSnapshot(final TableName tableName) { SnapshotSentinel handler = this.snapshotHandlers.get(tableName); return handler != null && !handler.isFinished(); }
hbase git commit: HBASE-21559 The RestoreSnapshotFromClientTestBase related UT are flaky
Repository: hbase Updated Branches: refs/heads/branch-2.0 14f0f72ac -> fc8a50df1 HBASE-21559 The RestoreSnapshotFromClientTestBase related UT are flaky Signed-off-by: zhangduo Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/fc8a50df Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/fc8a50df Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/fc8a50df Branch: refs/heads/branch-2.0 Commit: fc8a50df17087d3d7ae68773b777a7ed260348bf Parents: 14f0f72 Author: huzheng Authored: Thu Dec 6 20:35:30 2018 +0800 Committer: zhangduo Committed: Fri Dec 7 08:26:24 2018 +0800 -- .../apache/hadoop/hbase/master/snapshot/SnapshotManager.java| 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/fc8a50df/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java index a486418..6bfa594 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java @@ -27,6 +27,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -150,7 +151,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable // The map is always accessed and modified under the object lock using synchronized. // snapshotTable() will insert an Handler in the table. // isSnapshotDone() will remove the handler requested if the operation is finished. - private Map snapshotHandlers = new HashMap<>(); + private Map snapshotHandlers = new ConcurrentHashMap<>(); // Restore map, with table name as key, procedure ID as value. // The map is always accessed and modified under the object lock using synchronized. @@ -419,7 +420,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable * @param tableName name of the table being snapshotted. * @return true if there is a snapshot in progress on the specified table. */ - public synchronized boolean isTakingSnapshot(final TableName tableName) { + public boolean isTakingSnapshot(final TableName tableName) { SnapshotSentinel handler = this.snapshotHandlers.get(tableName); return handler != null && !handler.isFinished(); }
hbase git commit: HBASE-21559 The RestoreSnapshotFromClientTestBase related UT are flaky
Repository: hbase Updated Branches: refs/heads/branch-2 1a1a65b56 -> 5cb8c3e9c HBASE-21559 The RestoreSnapshotFromClientTestBase related UT are flaky Signed-off-by: zhangduo Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5cb8c3e9 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5cb8c3e9 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5cb8c3e9 Branch: refs/heads/branch-2 Commit: 5cb8c3e9c75acf2d9be2f8429b4494eee67049c5 Parents: 1a1a65b Author: huzheng Authored: Thu Dec 6 20:35:30 2018 +0800 Committer: zhangduo Committed: Fri Dec 7 08:25:49 2018 +0800 -- .../apache/hadoop/hbase/master/snapshot/SnapshotManager.java| 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/5cb8c3e9/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java index ae9b6fb..2b963b2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java @@ -27,6 +27,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -150,7 +151,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable // The map is always accessed and modified under the object lock using synchronized. // snapshotTable() will insert an Handler in the table. // isSnapshotDone() will remove the handler requested if the operation is finished. - private Map snapshotHandlers = new HashMap<>(); + private Map snapshotHandlers = new ConcurrentHashMap<>(); // Restore map, with table name as key, procedure ID as value. // The map is always accessed and modified under the object lock using synchronized. @@ -419,7 +420,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable * @param tableName name of the table being snapshotted. * @return true if there is a snapshot in progress on the specified table. */ - public synchronized boolean isTakingSnapshot(final TableName tableName) { + public boolean isTakingSnapshot(final TableName tableName) { SnapshotSentinel handler = this.snapshotHandlers.get(tableName); return handler != null && !handler.isFinished(); }
hbase git commit: HBASE-21559 The RestoreSnapshotFromClientTestBase related UT are flaky
Repository: hbase Updated Branches: refs/heads/master 170df27b8 -> dfb9ae8e0 HBASE-21559 The RestoreSnapshotFromClientTestBase related UT are flaky Signed-off-by: zhangduo Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/dfb9ae8e Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/dfb9ae8e Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/dfb9ae8e Branch: refs/heads/master Commit: dfb9ae8e0e69ad84962b8768190f6891827767fa Parents: 170df27 Author: huzheng Authored: Thu Dec 6 20:35:30 2018 +0800 Committer: zhangduo Committed: Fri Dec 7 08:22:22 2018 +0800 -- .../apache/hadoop/hbase/master/snapshot/SnapshotManager.java| 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/dfb9ae8e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java index ae9b6fb..2b963b2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java @@ -27,6 +27,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -150,7 +151,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable // The map is always accessed and modified under the object lock using synchronized. // snapshotTable() will insert an Handler in the table. // isSnapshotDone() will remove the handler requested if the operation is finished. - private Map snapshotHandlers = new HashMap<>(); + private Map snapshotHandlers = new ConcurrentHashMap<>(); // Restore map, with table name as key, procedure ID as value. // The map is always accessed and modified under the object lock using synchronized. @@ -419,7 +420,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable * @param tableName name of the table being snapshotted. * @return true if there is a snapshot in progress on the specified table. */ - public synchronized boolean isTakingSnapshot(final TableName tableName) { + public boolean isTakingSnapshot(final TableName tableName) { SnapshotSentinel handler = this.snapshotHandlers.get(tableName); return handler != null && !handler.isFinished(); }
hbase git commit: HBASE-21414 : StoreFileSize growth rate metric
Repository: hbase Updated Branches: refs/heads/master 12e75a8a6 -> 170df27b8 HBASE-21414 : StoreFileSize growth rate metric Signed-off-by: Sergey Shelukhin Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/170df27b Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/170df27b Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/170df27b Branch: refs/heads/master Commit: 170df27b8858c93ea22a38f28f653c347cb8ce7f Parents: 12e75a8 Author: Tommy Li Authored: Thu Dec 6 13:43:13 2018 -0800 Committer: Sergey Shelukhin Committed: Thu Dec 6 14:43:17 2018 -0800 -- .../regionserver/MetricsRegionServerSource.java | 3 +++ .../regionserver/MetricsRegionServerWrapper.java | 5 + .../MetricsRegionServerSourceImpl.java | 2 ++ .../MetricsRegionServerWrapperImpl.java | 19 ++- .../MetricsRegionServerWrapperStub.java | 7 ++- .../regionserver/TestMetricsRegionServer.java| 1 + 6 files changed, 31 insertions(+), 6 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/170df27b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java -- diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java index b94ee2d..6d9ce54 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java @@ -234,6 +234,7 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String MEMSTORE_SIZE = "memStoreSize"; String MEMSTORE_SIZE_DESC = "Size of the memstore"; String STOREFILE_SIZE = "storeFileSize"; + String STOREFILE_SIZE_GROWTH_RATE = "storeFileSizeGrowthRate"; String MAX_STORE_FILE_AGE = "maxStoreFileAge"; String MIN_STORE_FILE_AGE = "minStoreFileAge"; String AVG_STORE_FILE_AGE = "avgStoreFileAge"; @@ -243,6 +244,8 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String AVG_STORE_FILE_AGE_DESC = "Average age of store files hosted on this RegionServer"; String NUM_REFERENCE_FILES_DESC = "Number of reference file on this RegionServer"; String STOREFILE_SIZE_DESC = "Size of storefiles being served."; + String STOREFILE_SIZE_GROWTH_RATE_DESC = +"Bytes per second by which the size of storefiles being served grows."; String TOTAL_REQUEST_COUNT = "totalRequestCount"; String TOTAL_REQUEST_COUNT_DESC = "Total number of requests this RegionServer has answered; increments the count once for " + http://git-wip-us.apache.org/repos/asf/hbase/blob/170df27b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java -- diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java index b3a8dac..03ebc4c 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java @@ -101,6 +101,11 @@ public interface MetricsRegionServerWrapper { long getStoreFileSize(); /** + * Get the growth rate of the store files this region server is serving from. + */ + double getStoreFileSizeGrowthRate(); + + /** * @return Max age of store files hosted on this region server */ long getMaxStoreFileAge(); http://git-wip-us.apache.org/repos/asf/hbase/blob/170df27b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java -- diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java index 44dbc79..58c42a5 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java @@ -482,6 +482,8 @@ public class MetricsRegionServerSourceImpl rsWrap.getNumStoreFiles()) .addGauge(Interns.info(MEMSTORE_SIZE, MEMSTORE_SIZE_DES
[07/34] hbase-site git commit: Published site at 12e75a8a635785b279900b6905c86a1617526c72.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/testdevapidocs/src-html/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.CPMasterObserver.html -- diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.CPMasterObserver.html b/testdevapidocs/src-html/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.CPMasterObserver.html index 26de44b..ec473e3 100644 --- a/testdevapidocs/src-html/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.CPMasterObserver.html +++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.CPMasterObserver.html @@ -102,1611 +102,1621 @@ 094 095 public static class CPMasterObserver implements MasterCoprocessor, MasterObserver { 096 -097private boolean preCreateTableCalled; -098private boolean postCreateTableCalled; -099private boolean preDeleteTableCalled; -100private boolean postDeleteTableCalled; -101private boolean preTruncateTableCalled; -102private boolean postTruncateTableCalled; -103private boolean preModifyTableCalled; -104private boolean postModifyTableCalled; -105private boolean preCreateNamespaceCalled; -106private boolean postCreateNamespaceCalled; -107private boolean preDeleteNamespaceCalled; -108private boolean postDeleteNamespaceCalled; -109private boolean preModifyNamespaceCalled; -110private boolean postModifyNamespaceCalled; -111private boolean preGetNamespaceDescriptorCalled; -112private boolean postGetNamespaceDescriptorCalled; -113private boolean preListNamespaceDescriptorsCalled; -114private boolean postListNamespaceDescriptorsCalled; -115private boolean preAddColumnCalled; -116private boolean postAddColumnCalled; -117private boolean preModifyColumnCalled; -118private boolean postModifyColumnCalled; -119private boolean preDeleteColumnCalled; -120private boolean postDeleteColumnCalled; -121private boolean preEnableTableCalled; -122private boolean postEnableTableCalled; -123private boolean preDisableTableCalled; -124private boolean postDisableTableCalled; -125private boolean preAbortProcedureCalled; -126private boolean postAbortProcedureCalled; -127private boolean preGetProceduresCalled; -128private boolean postGetProceduresCalled; -129private boolean preGetLocksCalled; -130private boolean postGetLocksCalled; -131private boolean preMoveCalled; -132private boolean postMoveCalled; -133private boolean preAssignCalled; -134private boolean postAssignCalled; -135private boolean preUnassignCalled; -136private boolean postUnassignCalled; -137private boolean preRegionOfflineCalled; -138private boolean postRegionOfflineCalled; -139private boolean preBalanceCalled; -140private boolean postBalanceCalled; -141private boolean preBalanceSwitchCalled; -142private boolean postBalanceSwitchCalled; -143private boolean preShutdownCalled; -144private boolean preStopMasterCalled; -145private boolean preMasterInitializationCalled; -146private boolean postStartMasterCalled; -147private boolean startCalled; -148private boolean stopCalled; -149private boolean preSnapshotCalled; -150private boolean postSnapshotCalled; -151private boolean preListSnapshotCalled; -152private boolean postListSnapshotCalled; -153private boolean preCloneSnapshotCalled; -154private boolean postCloneSnapshotCalled; -155private boolean preRestoreSnapshotCalled; -156private boolean postRestoreSnapshotCalled; -157private boolean preDeleteSnapshotCalled; -158private boolean postDeleteSnapshotCalled; -159private boolean preCreateTableActionCalled; -160private boolean postCompletedCreateTableActionCalled; -161private boolean preDeleteTableActionCalled; -162private boolean postCompletedDeleteTableActionCalled; -163private boolean preTruncateTableActionCalled; -164private boolean postCompletedTruncateTableActionCalled; -165private boolean preAddColumnFamilyActionCalled; -166private boolean postCompletedAddColumnFamilyActionCalled; -167private boolean preModifyColumnFamilyActionCalled; -168private boolean postCompletedModifyColumnFamilyActionCalled; -169private boolean preDeleteColumnFamilyActionCalled; -170private boolean postCompletedDeleteColumnFamilyActionCalled; -171private boolean preEnableTableActionCalled; -172private boolean postCompletedEnableTableActionCalled; -173private boolean preDisableTableActionCalled; -174private boolean postCompletedDisableTableActionCalled; -175private boolean preModifyTableActionCalled; -176private boolean postCompletedModifyTableActionCalled; -177private boolean preGetTableDescriptorsCalled; -178private boolean postGetTableDescriptorsCa
[22/34] hbase-site git commit: Published site at 12e75a8a635785b279900b6905c86a1617526c72.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/MasterObserver.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/MasterObserver.html b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/MasterObserver.html index b253dfa..c4b2628 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/MasterObserver.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/MasterObserver.html @@ -78,206 +78,206 @@ 070@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) 071@InterfaceStability.Evolving 072public interface MasterObserver { -073 /** -074 * Called before a new table is created by -075 * {@link org.apache.hadoop.hbase.master.HMaster}. Called as part of create -076 * table RPC call. +073 +074 /** +075 * Called before we create the region infos for this table. Called as part of create table RPC +076 * call. 077 * @param ctx the environment to interact with the framework and master 078 * @param desc the TableDescriptor for the table -079 * @param regions the initial regions created for the table -080 */ -081 default void preCreateTable(final ObserverContextctx, -082 TableDescriptor desc, RegionInfo[] regions) throws IOException {} -083 -084 /** -085 * Called after the createTable operation has been requested. Called as part -086 * of create table RPC call. -087 * @param ctx the environment to interact with the framework and master -088 * @param desc the TableDescriptor for the table -089 * @param regions the initial regions created for the table -090 */ -091 default void postCreateTable(final ObserverContext ctx, -092 TableDescriptor desc, RegionInfo[] regions) throws IOException {} -093 -094 /** -095 * Called before a new table is created by -096 * {@link org.apache.hadoop.hbase.master.HMaster}. Called as part of create -097 * table procedure and it is async to the create RPC call. -098 * -099 * @param ctx the environment to interact with the framework and master -100 * @param desc the TableDescriptor for the table -101 * @param regions the initial regions created for the table -102 */ -103 default void preCreateTableAction( -104 final ObserverContext ctx, -105 final TableDescriptor desc, -106 final RegionInfo[] regions) throws IOException {} -107 -108 /** -109 * Called after the createTable operation has been requested. Called as part -110 * of create table RPC call. Called as part of create table procedure and -111 * it is async to the create RPC call. -112 * -113 * @param ctx the environment to interact with the framework and master -114 * @param desc the TableDescriptor for the table -115 * @param regions the initial regions created for the table -116 */ -117 default void postCompletedCreateTableAction( -118 final ObserverContext ctx, -119 final TableDescriptor desc, -120 final RegionInfo[] regions) throws IOException {} -121 -122 /** -123 * Called before {@link org.apache.hadoop.hbase.master.HMaster} deletes a -124 * table. Called as part of delete table RPC call. -125 * @param ctx the environment to interact with the framework and master -126 * @param tableName the name of the table -127 */ -128 default void preDeleteTable(final ObserverContext ctx, -129 TableName tableName) throws IOException {} -130 -131 /** -132 * Called after the deleteTable operation has been requested. Called as part -133 * of delete table RPC call. -134 * @param ctx the environment to interact with the framework and master -135 * @param tableName the name of the table -136 */ -137 default void postDeleteTable(final ObserverContext ctx, -138 TableName tableName) throws IOException {} -139 -140 /** -141 * Called before {@link org.apache.hadoop.hbase.master.HMaster} deletes a -142 * table. Called as part of delete table procedure and -143 * it is async to the delete RPC call. -144 * -145 * @param ctx the environment to interact with the framework and master -146 * @param tableName the name of the table -147 */ -148 default void preDeleteTableAction( -149 final ObserverContext ctx, final TableName tableName) -150 throws IOException {} -151 -152 /** -153 * Called after {@link org.apache.hadoop.hbase.master.HMaster} deletes a -154 * table. Called as part of delete table procedure and it is async to the -155 * delete RPC call. -156 * -157 * @param ctx the environment to interact with the framework and master -158 * @param tableName the name of the table -159 */ -160 default void
[30/34] hbase-site git commit: Published site at 12e75a8a635785b279900b6905c86a1617526c72.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ExampleMasterObserverWithMetrics.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ExampleMasterObserverWithMetrics.html b/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ExampleMasterObserverWithMetrics.html index 291d5be..3a98266 100644 --- a/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ExampleMasterObserverWithMetrics.html +++ b/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ExampleMasterObserverWithMetrics.html @@ -288,7 +288,7 @@ implements MasterObserver -postAbortProcedure, postAddReplicationPeer, postAddRSGroup, postAssign, postBalance, postBalanceRSGroup, postBalanceSwitch, postClearDeadServers, postCloneSnapshot, postCompletedCreateTableAction, postCompletedDeleteTableAction, postCompletedDisableTableAction, postCompletedEnableTableAction, postCompletedMergeRegionsAction, postCompletedModifyTableAction, postCompletedModifyTableAction, postCompletedSplitRegionAction, postCompletedTruncateTableAction, postCreateNamespace, postDecommissionRegionServers, postDeleteNamespace, postDeleteSnapshot, postDeleteTable, postDisableReplicationPeer, postDisableTable, postEnableReplicationPeer, postEnableTable, postGetClusterMetrics, postGetLocks, postGetNamespaceDescriptor, postGetProcedures, postGetReplicationPeerConfig, postGetRSGroupInfo, postGetRSGroupInfoOfServer, postGetRSGroupInfoOfTable, postGetTableDescriptors, postGetTableNames, postListDecommissionedRegionServers, postListNamespaceDescriptors, postListReplicationPeers, postListRSGroups, postListSnapshot, postLockHeartbeat, postMergeRegions, postMergeRegionsCommitAction, postModifyNamespace, postModifyNamespace, postModifyTable, postModifyTable, postMove, postMoveServers, postMoveServersAndTables, postMoveTables, postRecommissionRegionServer, postRegionOffline, postRemoveReplicationPeer, postRemoveRSGroup, postRemoveServers, postRequestLock, postRestoreSnapshot, postRollBackMergeRegionsAction, postRollBackSplitRegionAction, postSetNamespaceQuota, postSetSplitOrMergeEnabled, pos tSetTableQuota, postSetUserQuota, postSetUserQuota, postSetUserQuota, postSnapshot, postStartMaster, postTableFlush, postTransitReplicationPeerSyncReplicationState, postTruncateTable, postUnassign, postUpdateReplicationPeerConfig, preAbortProcedure, preAddReplicationPeer, preAddRSGroup, preAssign, preBalance, preBalanceRSGroup, preBalanceSwitch, preClearDeadServers, preCloneSnapshot, preCreateNamespace, preCreateTableAction, preDecommissionRegionServers, preDeleteNamespace, preDeleteSnapshot, preDeleteTable, preDeleteTableAction, preDisableReplicationPeer, preDisableTableAction, preEnableReplicationPeer, preEnableTable, preEnabl eTableAction, preGetClusterMetrics, preGetLocks, preGetNamespaceDescriptor, preGetProcedures, preGetReplicationPeerConfig, preGetRSGroupInfo, preGetRSGroupInfoOfServer, preGetRSGroupInfoOfTable, preGetTableDescriptors, preGetTableNames, preListDecommissionedRegionServers, preListNamespaceDescriptors, preListReplicationPeers, preListRSGroups, preListSnapshot, preLockHeartbeat, preMasterInitialization, preMergeRegions, preMergeRegionsAction, preMergeRegionsCommitAction, preModifyNamespace, preModifyNamespace, preModifyTable, preModifyTable, preModifyTableAction, preModifyTableAction, preMove, preMoveServers, preMoveServersAndTables, preMoveTables, preRecommissionR egionServer, preRegionOffline, preRemoveReplicationPeer, preRemoveRSGroup, preRemoveServers, preRequestLock, preRestoreSnapshot, preSetNamespaceQuota, preSetSplitOrMergeEnabled, preSetTableQuota, preSetUserQuota, preSetUserQuota, preSetUserQuota, preShutdown, preSnapshot, preSplitRegion, preSplitRegionAction, preSplitRegionAfterMETAA ction, preSplitRegionBeforeMETAAction, preStopMaster, preTableFlush, preTransitReplicationPeerSyncReplicationState, preTruncateTable, preTruncateTableAction, preUnassign, preUpdateReplicationPeerConfig +postAbortProcedure, postAddReplicationPeer, postAddRSGroup, postAssign, postBalance, postBalanceRSGroup, postBalanceSwitch, postClearDeadServers, postCloneSnapshot, postCompletedCreateTableAction, postCompletedDeleteTableAction, postCompletedDisableTableAction, postCompletedEnableTableAction, postCompletedMergeRegionsAction, postCompletedModifyTableAction, postCompletedModifyTableAction, postCompletedSplitRegionAction, postCompletedTruncateTableAction, postCreateNamespace, postDecommissionRegionServers, postDeleteNamespace, postDeleteSnapshot, postDeleteTable, postDisableReplicationPeer, postDisableTable, postEnableReplicationPeer, postEnableTable, postGetClusterMetrics, postGetLocks, postGetNamespaceDescriptor, postGetProcedures, postGetReplicationPeerConfig,
[18/34] hbase-site git commit: Published site at 12e75a8a635785b279900b6905c86a1617526c72.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterObserverOperation.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterObserverOperation.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterObserverOperation.html index 8e27b8f..0c3f93a 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterObserverOperation.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterObserverOperation.html @@ -325,1447 +325,1461 @@ 317 318 /* Implementation of hooks for invoking MasterObservers */ 319 -320 public void preCreateTable(final TableDescriptor htd, final RegionInfo[] regions) -321 throws IOException { -322 execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { -323 @Override -324 public void call(MasterObserver observer) throws IOException { -325observer.preCreateTable(this, htd, regions); -326 } -327}); -328 } -329 -330 public void postCreateTable(final TableDescriptor htd, final RegionInfo[] regions) -331 throws IOException { -332 execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { -333 @Override -334 public void call(MasterObserver observer) throws IOException { -335observer.postCreateTable(this, htd, regions); -336 } -337}); -338 } -339 -340 public void preCreateTableAction(final TableDescriptor htd, final RegionInfo[] regions, -341 final User user) throws IOException { -342 execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { -343 @Override -344 public void call(MasterObserver observer) throws IOException { -345 observer.preCreateTableAction(this, htd, regions); -346 } -347}); -348 } -349 -350 public void postCompletedCreateTableAction( -351 final TableDescriptor htd, final RegionInfo[] regions, final User user) throws IOException { -352 execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { -353 @Override -354 public void call(MasterObserver observer) throws IOException { -355 observer.postCompletedCreateTableAction(this, htd, regions); -356 } -357}); -358 } -359 -360 public void preDeleteTable(final TableName tableName) throws IOException { -361 execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { -362 @Override -363 public void call(MasterObserver observer) throws IOException { -364observer.preDeleteTable(this, tableName); -365 } -366}); -367 } -368 -369 public void postDeleteTable(final TableName tableName) throws IOException { -370 execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { -371 @Override -372 public void call(MasterObserver observer) throws IOException { -373observer.postDeleteTable(this, tableName); -374 } -375}); -376 } -377 -378 public void preDeleteTableAction(final TableName tableName, final User user) throws IOException { -379 execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { -380 @Override -381 public void call(MasterObserver observer) throws IOException { -382 observer.preDeleteTableAction(this, tableName); -383 } -384}); -385 } -386 -387 public void postCompletedDeleteTableAction(final TableName tableName, final User user) -388 throws IOException { -389 execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { -390 @Override -391 public void call(MasterObserver observer) throws IOException { -392 observer.postCompletedDeleteTableAction(this, tableName); -393 } -394}); -395 } -396 -397 public void preTruncateTable(final TableName tableName) throws IOException { -398 execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { -399 @Override -400 public void call(MasterObserver observer) throws IOException { -401observer.preTruncateTable(this, tableName); -402 } -403}); -404 } -405 -406 public void postTruncateTable(final TableName tableName) throws IOException { -407 execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { -408 @Override -409 public void call(MasterObserver observer) throws IOException { -410observer.postTruncateTable(this, tableName); -411 } -412}); -413 } -414 -415 public void preTruncateTableAction(final TableName tableName, final User user) -416 throws IOException { -417 execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(
[26/34] hbase-site git commit: Published site at 12e75a8a635785b279900b6905c86a1617526c72.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/StoreFileReader.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/StoreFileReader.html b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/StoreFileReader.html index 455997d..4cb87d7 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/StoreFileReader.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/StoreFileReader.html @@ -236,7 +236,7 @@ -private https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true"; title="class or interface in java.util">Set+(package private) https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true"; title="class or interface in java.util">Set HStoreFile.streamReaders http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html index 39721a5..ead8bd1 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html @@ -716,20 +716,20 @@ java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true"; title="class or interface in java.lang">Enum (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true"; title="class or interface in java.lang">Comparable , java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true"; title="class or interface in java.io">Serializable) -org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope -org.apache.hadoop.hbase.regionserver.MemStoreCompactionStrategy.Action -org.apache.hadoop.hbase.regionserver.HRegion.FlushResult.Result +org.apache.hadoop.hbase.regionserver.CompactingMemStore.IndexType +org.apache.hadoop.hbase.regionserver.FlushType org.apache.hadoop.hbase.regionserver.DefaultHeapMemoryTuner.StepDirection -org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor.Status -org.apache.hadoop.hbase.regionserver.BloomType +org.apache.hadoop.hbase.regionserver.TimeRangeTracker.Type +org.apache.hadoop.hbase.regionserver.Region.Operation +org.apache.hadoop.hbase.regionserver.HRegion.FlushResult.Result org.apache.hadoop.hbase.regionserver.ChunkCreator.ChunkType +org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor.Status org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl.FactoryStorage -org.apache.hadoop.hbase.regionserver.ScannerContext.NextState -org.apache.hadoop.hbase.regionserver.CompactingMemStore.IndexType +org.apache.hadoop.hbase.regionserver.MemStoreCompactionStrategy.Action +org.apache.hadoop.hbase.regionserver.BloomType org.apache.hadoop.hbase.regionserver.ScanType -org.apache.hadoop.hbase.regionserver.Region.Operation -org.apache.hadoop.hbase.regionserver.FlushType -org.apache.hadoop.hbase.regionserver.TimeRangeTracker.Type +org.apache.hadoop.hbase.regionserver.ScannerContext.NextState +org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html index 3bd22b5..23060c2 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html @@ -130,9 +130,9 @@ java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true"; title="class or interface in java.lang">Enum (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true"; title="class or interface in java.lang">Comparable , java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true"; title="class or interface in java.io">Serializable) -org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher.MatchCode org.apache.hadoop.hbase.regionserver.querymatcher.StripeCompactionScanQueryMatcher.DropDeletesInOutput org.apache.hadoop.hbase.regionserver.querymatcher.DeleteTracker.DeleteResult +org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher.MatchCode http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/devapidocs/org/apache/hadoop/hbase/regionserv
[12/34] hbase-site git commit: Published site at 12e75a8a635785b279900b6905c86a1617526c72.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/testdevapidocs/org/apache/hadoop/hbase/client/TestEnableTable.MasterSyncObserver.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/client/TestEnableTable.MasterSyncObserver.html b/testdevapidocs/org/apache/hadoop/hbase/client/TestEnableTable.MasterSyncObserver.html index ac717c1..2267be2 100644 --- a/testdevapidocs/org/apache/hadoop/hbase/client/TestEnableTable.MasterSyncObserver.html +++ b/testdevapidocs/org/apache/hadoop/hbase/client/TestEnableTable.MasterSyncObserver.html @@ -235,7 +235,7 @@ implements org.apache.hadoop.hbase.coprocessor.MasterCoprocessor, org.apache.had Methods inherited from interface org.apache.hadoop.hbase.coprocessor.MasterObserver -postAbortProcedure, postAddReplicationPeer, postAddRSGroup, postAssign, postBalance, postBalanceRSGroup, postBalanceSwitch, postClearDeadServers, postCloneSnapshot, postCompletedDisableTableAction, postCompletedEnableTableAction, postCompletedMergeRegionsAction, postCompletedModifyTableAction, postCompletedModifyTableAction, postCompletedSplitRegionAction, postCompletedTruncateTableAction, postCreateNamespace, postCreateTable, postDecommissionRegionServers, postDeleteNamespace, postDeleteSnapshot, postDeleteTable, postDisableReplicationPeer, postDisableTable, postEnableReplicationPeer, postEnableTable, postGetClusterMetrics, postGetLocks, postGetNamespaceDescriptor, postGetProcedures, postGetReplicationPeerConfig, postGetRSGroupInfo, postGetRSGroupInfoOfServer, postGetRSGroupInfoOfTable, postGetTableDescriptors, postGetTableNames, postListDecommissionedRegionServers, postListNamespaceDescriptors, postListReplicationPeers, postListRSGroups, postListSnapshot, postLockHeartbeat, postMergeRegions, postMergeRegionsCommitAction, postModifyNamespace, postModifyNamespace, postModifyTable, postModifyTable, postMove, postMoveServers, postMoveServersAndTables, postMoveTables, postRecommissionRegionServer, postRegionOffline, postRemoveReplicationPeer, postRemoveRSGroup, postRemoveServers, postRequestLock, postRestoreSnapshot, postRollBackMergeRegionsAction, postRollBackSplitRegionAction, postSetNamespaceQuota, postSetSplitOrMergeEnabled, postSetTableQuota, postSetUserQuota, postSetUserQuota, postSetUserQuota, postSnapshot, postStartMaster, postTableFlush, postTransitReplicationPeerSyncReplicationState, postTruncateTable, postUnassign, postUpdateReplicationPeerConfig, preAbortProcedure, preAddReplicationPeer, preAddRSGroup, preAssign, preBalance, preBalanceRSGroup, preBalanceSwitch, preClearDeadServers, preCloneSnapshot, preCreateNamespace, preCreateTable, preCreateTableAction, preDecommissionRegionServers, preDeleteNamespace, preDeleteSnapshot, preDeleteTable, preDe leteTableAction, preDisableReplicationPeer, preDisableTable, preDisableTableAction, preEnableReplicationPeer, preEnableTable, preEnableTableAction, preGetClusterMetrics, preGetLocks, preGetNamespaceDescriptor, preGetProcedures, preGetReplicationPeerConfig, preGetRSGroupInfo, preGetRSGroupInfoOfServer, preGetRSGroupInfoOfTable, preGetTableDescriptors, preGetTableNames, preListDecommissionedRegionServers, preListNamespaceDescriptors, preListReplicationPeers, preListRSGroups, preListSnapshot, preLockHeartbeat, preMasterInitialization, preMergeRegions, preMergeRegionsAction, preMergeRegionsCommitAction, preModifyNamespace, preModifyNamespace, preModifyTable, preModifyTable, preModifyTableAction, preModifyTableAction, preMove, preMoveServers, preMoveServersAndTables, preMoveTables, preRecommissionRegionServer, preRegionOffline, preRemoveReplicationPeer, preRemoveRSGroup, preRemoveServers, preRequestLock, preRestoreSnapshot, preSetNamespaceQuota, preSetSplitOrMergeEnabled, preSetTableQuot a, preSetUserQuota, preSetUserQuota, preSetUserQuota, preShutdown, preSnapshot, preSplitRegion, preSplitRegionAction, preSplitRegionAfterMETAAction, preSplitRegionBeforeMETAAction, preStopMaster, preTableFlush, preTransitReplicationPeerSyncReplicationState, preTruncateTable, preTruncateTableAction, preUnassign, preUpdateReplicationPeerConfig +postAbortProcedure, postAddReplicationPeer, postAddRSGroup, postAssign, postBalance, postBalanceRSGroup, postBalanceSwitch, postClearDeadServers, postCloneSnapshot, postCompletedDisableTableAction, postCompletedEnableTableAction, postCompletedMergeRegionsAction, postCompletedModifyTableAction, postCompletedModifyTableAction, postCompletedSplitRegionAction, postCompletedTruncateTableAction, postCreateNamespace, postCreateTable, postDecommissionRegionServers, postDeleteNamespace, postDeleteSnapshot, postDeleteTable, postDisableReplicationPeer, postDisableTable, postEnableReplicationPeer, postEnableTable, postGetClusterMetrics, postGetLocks, postGetNamespaceDescriptor, postGetProcedures, postGetReplicationPeerConfig, postGetRSGroupInfo, postGetRSGroupInfoOfServer, postGetRSGrou
[34/34] hbase-site git commit: Published site at 12e75a8a635785b279900b6905c86a1617526c72.
Published site at 12e75a8a635785b279900b6905c86a1617526c72. Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/8d58bba5 Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/8d58bba5 Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/8d58bba5 Branch: refs/heads/asf-site Commit: 8d58bba524e38da142168460086cf498688e75a5 Parents: 0f7d611 Author: jenkins Authored: Thu Dec 6 14:52:25 2018 + Committer: jenkins Committed: Thu Dec 6 14:52:25 2018 + -- acid-semantics.html |4 +- apache_hbase_reference_guide.pdf|4 +- book.html |2 +- bulk-loads.html |4 +- checkstyle-aggregate.html | 184 +- checkstyle.rss |4 +- coc.html|4 +- dependencies.html |4 +- dependency-convergence.html |4 +- dependency-info.html|4 +- dependency-management.html |4 +- devapidocs/constant-values.html |4 +- devapidocs/index-all.html |6 + .../hadoop/hbase/backup/package-tree.html |4 +- .../hbase/client/class-use/TableDescriptor.html | 51 +- .../hadoop/hbase/client/package-tree.html | 22 +- .../hbase/coprocessor/MasterObserver.html | 474 +-- .../class-use/MasterCoprocessorEnvironment.html | 139 +- .../coprocessor/class-use/ObserverContext.html | 211 +- .../ExampleMasterObserverWithMetrics.html |2 +- .../hadoop/hbase/coprocessor/package-tree.html |2 +- .../hadoop/hbase/executor/package-tree.html |2 +- .../hadoop/hbase/filter/package-tree.html | 10 +- .../hadoop/hbase/io/hfile/package-tree.html |6 +- .../apache/hadoop/hbase/ipc/package-tree.html |2 +- .../hadoop/hbase/mapreduce/package-tree.html|4 +- .../hbase/master/MasterCoprocessorHost.html | 424 +-- .../hadoop/hbase/master/package-tree.html |6 +- .../hbase/master/procedure/package-tree.html|6 +- .../hadoop/hbase/monitoring/package-tree.html |2 +- .../org/apache/hadoop/hbase/package-tree.html | 14 +- .../hadoop/hbase/procedure2/package-tree.html |6 +- .../hbase/quotas/MasterQuotasObserver.html |2 +- .../hadoop/hbase/quotas/package-tree.html |6 +- .../hadoop/hbase/regionserver/HStoreFile.html | 110 +- .../regionserver/StoreFileReader.Listener.html |4 +- .../hbase/regionserver/StoreFileReader.html | 92 +- .../regionserver/class-use/StoreFileReader.html |2 +- .../hadoop/hbase/regionserver/package-tree.html | 20 +- .../regionserver/querymatcher/package-tree.html |2 +- .../hbase/regionserver/wal/package-tree.html|4 +- .../hadoop/hbase/replication/package-tree.html |2 +- .../replication/regionserver/package-tree.html |2 +- .../hadoop/hbase/rest/model/package-tree.html |2 +- .../hbase/rsgroup/RSGroupAdminEndpoint.html |2 +- .../hbase/security/access/AccessController.html |2 +- .../CoprocessorWhitelistMasterObserver.html |2 +- .../hbase/security/access/package-tree.html |4 +- .../hadoop/hbase/security/package-tree.html |2 +- .../visibility/VisibilityController.html|2 +- .../apache/hadoop/hbase/util/package-tree.html | 12 +- .../org/apache/hadoop/hbase/Version.html|4 +- .../hbase/coprocessor/MasterObserver.html | 2779 - .../master/HMaster.InitializationMonitor.html | 64 +- .../master/HMaster.MasterStoppedException.html | 64 +- .../hbase/master/HMaster.RedirectServlet.html | 64 +- .../master/HMaster.TableDescriptorGetter.html | 64 +- .../org/apache/hadoop/hbase/master/HMaster.html | 64 +- ...MasterCoprocessorHost.MasterEnvironment.html | 2844 - ...st.MasterEnvironmentForCoreCoprocessors.html | 2844 - ...CoprocessorHost.MasterObserverOperation.html | 2844 - .../hbase/master/MasterCoprocessorHost.html | 2844 - .../hadoop/hbase/regionserver/HStoreFile.html | 1001 +++--- .../regionserver/StoreFileReader.Listener.html | 1155 +++ .../hbase/regionserver/StoreFileReader.html | 1155 +++ downloads.html |6 +- export_control.html |4 +- index.html |4 +- integration.html|4 +- issue-tracking.html |4 +- license.html|4 +- mail-lists.html |4 +- metrics.html
[27/34] hbase-site git commit: Published site at 12e75a8a635785b279900b6905c86a1617526c72.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html b/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html index 1815591..5557f8f 100644 --- a/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html +++ b/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html @@ -229,13 +229,13 @@ java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true"; title="class or interface in java.lang">Enum(implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true"; title="class or interface in java.lang">Comparable , java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true"; title="class or interface in java.io">Serializable) +org.apache.hadoop.hbase.quotas.RpcThrottlingException.Type org.apache.hadoop.hbase.quotas.QuotaType -org.apache.hadoop.hbase.quotas.ThrottlingException.Type +org.apache.hadoop.hbase.quotas.ThrottleType org.apache.hadoop.hbase.quotas.OperationQuota.OperationType -org.apache.hadoop.hbase.quotas.RpcThrottlingException.Type org.apache.hadoop.hbase.quotas.QuotaScope +org.apache.hadoop.hbase.quotas.ThrottlingException.Type org.apache.hadoop.hbase.quotas.SpaceViolationPolicy -org.apache.hadoop.hbase.quotas.ThrottleType http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/devapidocs/org/apache/hadoop/hbase/regionserver/HStoreFile.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HStoreFile.html b/devapidocs/org/apache/hadoop/hbase/regionserver/HStoreFile.html index 9f0e3d8..c9f93f1 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/HStoreFile.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HStoreFile.html @@ -301,7 +301,7 @@ implements STORE_FILE_READER_NO_READAHEAD -private https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true"; title="class or interface in java.util">Set +(package private) https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true"; title="class or interface in java.util">Set streamReaders @@ -782,7 +782,7 @@ implements streamReaders -private final https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true"; title="class or interface in java.util">Set streamReaders +final https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true"; title="class or interface in java.util">Set streamReaders @@ -791,7 +791,7 @@ implements noReadahead -private final boolean noReadahead +private final boolean noReadahead @@ -800,7 +800,7 @@ implements primaryReplica -private final boolean primaryReplica +private final boolean primaryReplica @@ -809,7 +809,7 @@ implements compactedAway -private volatile boolean compactedAway +private volatile boolean compactedAway @@ -818,7 +818,7 @@ implements sequenceid -private long sequenceid +private long sequenceid @@ -827,7 +827,7 @@ implements maxMemstoreTS -private long maxMemstoreTS +private long maxMemstoreTS @@ -836,7 +836,7 @@ implements firstKey -private https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true"; title="class or interface in java.util">Optional firstKey +private https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true"; title="class or interface in java.util">Optional | firstKey @@ -845,7 +845,7 @@ implements lastKey -private https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true"; title="class or interface in java.util">Optional | lastKey +private https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true"; title="class or interface in java.util">Optional | lastKey @@ -854,7 +854,7 @@ implements comparator -private CellComparator comparator +private CellComparator comparator @@ -863,7 +863,7 @@ implements majorCompaction -private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicBoolean.html?is-external=true"; title="class or interface in java.util.concurrent.atomic">AtomicBoolean majorCompaction +private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicBoolean.html?is-external=true"; title="class or interface in java.util.concurrent.atomic">AtomicBoolean majorCompaction @@ -872,7 +872,7 @@ implements excludeFromMinorCompaction -private boolean excludeFromMinorCompaction +private boolean excludeFromMinorCompaction @@ -881,7 +881,7 @@ implements metadataMap -private https://docs.oracle.com/javase/8/docs/api/java/util/Ma |
[24/34] hbase-site git commit: Published site at 12e75a8a635785b279900b6905c86a1617526c72.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/devapidocs/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.html b/devapidocs/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.html index ce837e5..37ab88a 100644 --- a/devapidocs/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.html +++ b/devapidocs/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.html @@ -255,7 +255,7 @@ implements MasterObserver -postAbortProcedure, postAddReplicationPeer, postAddRSGroup, postAssign, postBalance, postBalanceRSGroup, postBalanceSwitch, postClearDeadServers, postCloneSnapshot, postCompletedCreateTableAction, postCompletedDeleteTableAction, postCompletedDisableTableAction, postCompletedEnableTableAction, postCompletedMergeRegionsAction, postCompletedModifyTableAction, postCompletedModifyTableAction, postCompletedSplitRegionAction, postCompletedTruncateTableAction, postCreateNamespace, postCreateTable, postDecommissionRegionServers, postDeleteNamespace, postDeleteSnapshot, postDeleteTable, postDisableReplicationPeer, postDisableTable, postEnableReplicationPeer, postEnableTable, postGetClusterMetrics, postGetLocks, postGetNamespaceDescriptor, postGetProcedures, postGetReplicationPeerConfig, postGetRSGroupInfo, postGetRSGroupInfoOfServer, postGetRSGroupInfoOfTable, postGetTableDescriptors, postGetTableNames, postListDecommissionedRegionServers, postListNamespaceDescriptors, postListReplicationPeers, postListRSGroups, postListSnapshot, postLockHeartbeat, postMergeRegions, postMergeRegionsCommitAction, postModifyNamespace, postModifyNamespace, postModifyTable, postModifyTable, postMove, postMoveServers, postMoveServersAndTables, postMoveTables, postRecommissionRegionServer, postRegionOffline, postRemoveReplicationPeer, postRemoveRSGroup, postRemoveServers, postRequestLock, postRestoreSnapshot, postRollBackMergeRegionsAction, postRollBackSplitRegionAction, postSetNamespaceQuota, postSetSpl itOrMergeEnabled, postSetTableQuota, postSetUserQuota, postSetUserQuota, postSetUserQuota, postSnapshot, postStartMaster, postTableFlush, postTransitReplicationPeerSyncReplicationState, postTruncateTable, postUnassign, postUpdateReplicationPeerConfig, preAbortProcedure, preAddReplicationPeer, preAddRSGroup, preAssign, preBalance, preBalanceRSGroup, preBalanceSwitch, preClearDeadServers, preCloneSnapshot, preCreateNamespace, preCreateTableAction, preDecommissionRegionServers, preDeleteNamespace, preDeleteSnapshot, preDeleteTable, preDeleteTableAction, preDisableReplicationPeer, preDisableTable, preDisableTableAction, preEnableReplicationPeer, preEnableTable, preEnableTableAction, preGetClusterMetrics, preGetLocks, preGetNamespaceDescriptor, preGetProcedures, preGetReplicationPeerConfig, preGetRSGroupInfo, preGetRSGroupInfoOfServer, preGetRSGroupInfoOfTable, preGetTableDescriptors, preGetTableNames, preListDecommissionedRegionServers, preListNamespaceDescriptors, preListReplicationPeers, preListRSGroups, preListSnapshot, preLockHeartbeat, preMasterInitialization, preMergeRegions, preMergeRegionsAction, preMergeRegionsCommitAction, preModifyNamespace, preModifyNamespace, preModifyTable, preModifyTableAction, preModifyTableAction, preMove, preMoveServers, preMoveServersAndTables, preMoveTables, preRecommissionRegionServer, preRegionOffline, preRemoveReplicationPeer, preRemoveRSGroup, preRemoveServers, href="../../../../../../org/apache/hadoop/hbase/coprocessor/MasterObserver.html#preRequestLock-org.apache.hadoop.hbase.coprocessor.ObserverContext-java.lang.String-org.apache.hadoop.hbase.TableName-org.apache.hadoop.hbase.client.RegionInfo:A-java.lang.String-">preRequestLock, > href="../../../../../../org/apache/hadoop/hbase/coprocessor/MasterObserver.html#preRestoreSnapshot-org.apache.hadoop.hbase.coprocessor.ObserverContext-org.apache.hadoop.hbase.client.SnapshotDescription-org.apache.hadoop.hbase.client.TableDescriptor-">preRestoreSnapshot, > href="../../../../../../org/apache/hadoop/hbase/coprocessor/MasterObserver.html#preSetNamespaceQuota-org.apache.hadoop.hbase.coprocessor.ObserverContext-java.lang.String-org.apache.hadoop.hbase.quotas.GlobalQuotaSettings-">preSetNamespaceQuota, > href="../../../../../../org/apache/hadoop/hbase/coprocessor/MasterObserver.html#preSetSplitOrMergeEnabled-org.apache.hadoop.hbase.coprocessor.ObserverContext-boolean-org.apache. hadoop.hbase.client.MasterSwitchType-">preSetSplitOrMergeEnabled, preSetTableQuota, preSetUserQuota, preSetUserQuota, preSetUserQuota, preShutdown, preSnapshot, preSplitRegion, preSplitRegionAction, preSplitRegionAfterMETAAction, preSplitRegionBeforeMETAAction, preSt
[23/34] hbase-site git commit: Published site at 12e75a8a635785b279900b6905c86a1617526c72.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityController.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityController.html b/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityController.html index 6b14298..a0769bf 100644 --- a/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityController.html +++ b/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityController.html @@ -540,7 +540,7 @@ implements MasterObserver -postAbortProcedure, postAddReplicationPeer, postAddRSGroup, postAssign, postBalance, postBalanceRSGroup, postBalanceSwitch, postClearDeadServers, postCloneSnapshot, postCompletedCreateTableAction, postCompletedDeleteTableAction, postCompletedDisableTableAction, postCompletedEnableTableAction, postCompletedMergeRegionsAction, postCompletedModifyTableAction, postCompletedModifyTableAction, postCompletedSplitRegionAction, postCompletedTruncateTableAction, postCreateNamespace, postCreateTable, postDecommissionRegionServers, postDeleteNamespace, postDeleteSnapshot, postDeleteTable, postDisableReplicationPeer, postDisableTable, postEnableReplicationPeer, postEnableTable, postGetClusterMetrics, postGetLocks, postGetNamespaceDescriptor, postGetProcedures, postGetReplicationPeerConfig, postGetRSGroupInfo, postGetRSGroupInfoOfServer, postGetRSGroupInfoOfTable, postGetTableDescriptors, postGetTableNames, postListDecommissionedRegionServers, postListNamespaceDescriptors, postListReplicationPeers, postListRSGroups, postListSnapshot, postLockHeartbeat, postMergeRegions, postMergeRegionsCommitAction, postModifyNamespace, postModifyNamespace, postModifyTable, postModifyTable, postMove, postMoveServers, postMoveServersAndTables, postMoveTables, postRecommissionRegionServer, postRegionOffline, postRemoveReplicationPeer, postRemoveRSGroup, postRemoveServers, postRequestLock, postRestoreSnapshot, postRollBackMergeRegionsAction, postRollBackSplitRegionAction, postSetNamespaceQuota, postSetTableQuota, postSetUserQuota, postSetUserQuota, postSetUserQuota, postSnapshot, postTableFlush, postTransitReplicationPeerSyncReplicationState, postTruncateTable, postUnassign, postUpdateReplicationPeerConfig, preAbortProcedure, preAddReplicationPeer, preAddRSGroup , preAssign, preBalance, preBalanceRSGroup, preBalanceSwitch, preClearDeadServers, preCloneSnapshot, preCreateNamespace, preCreateTable, preCreateTableAction, preDecommissionRegionServers, preDeleteNamespace, preDeleteSnapshot, preDeleteTable, preDeleteTableAction, preDisableReplicationPeer, preDisableTableAction, preEnableReplicationPeer, preEnableTable, preEnableTableAction, preGetClusterMetrics, preGetLocks, preGetNamespaceDescriptor, preGetProcedures, preGetReplicationPeerConfig, preGetRSGroupInfo, preGetRSGroupInfoOfServer, preGetRSGroupInfoOfTable, preGetTableDescriptors, preGetTableNames, preListDecommissionedRegionServers, preListNamespaceDescriptors, preListReplicationPeers, preListRSGroups, preListSnapshot, preLockHeartbeat, preMasterInitialization, preMergeRegions, preMergeRegionsAction, preMergeRegionsCommitAction, preModifyNamespace, preModifyNamespace, preModifyTable, preModifyTableAction, preModifyTableAction, preMove, preMoveServers, preMoveServersAndTables, preMoveTables, preRecommissionRegionServer, preRegionOffline, preRemoveReplicationPeer, preRemoveRSGroup, preRemoveServers, preRequestLock, preRestoreSnapshot, preSetNamespaceQuota, preSetSplitOrMergeEnabled, preSetTableQuota, preSetUserQuota, preSetUserQuota, preSetUserQuota, preShutdown, preSnapshot, preSplitRegion, preSplitRegionAction, preSplitRegionAfterMETAAction, preSplitRegionBeforeMETAAction, preStopMaster, preTableFlush, preTransitReplicationPeerSyncReplicationState, preTruncateTable, preTruncateTableAction, preUnassign, preUpdateReplicationPeerConfig +postAbortProcedure, postAddReplicationPeer, postAddRSGroup, postAssign, postBalance, postBalanceRSGroup, postBalanceSwitch, postClearDeadServers, postCloneSnapshot, postCompletedCreateTableAction, postCompletedDeleteTableAction, postCompletedDisableTableAction, postCompletedEnableTableAction, postCompletedMergeRegionsAction, postCompletedModifyTableAction, postCompletedModifyTableAction, postCompletedSplitRegionAction, postCompletedTruncateTableAction, postCreateNamespace, postCreateTable, postDecommissionRegionServers, postDeleteNamespace, postDeleteSnapshot, postDeleteTable, postDisableReplicationPeer, postDisableTable, postEnableReplicationPeer, postEnableTable, postGetClusterMetrics, postGetLocks, postGetNamespaceDescriptor, postGetProcedures, postGetReplicationPeerConfig, postGetRSGroupInfo, postGetRSGroupInfoOfServer, postGetRSGroupInfoOfTab
[14/34] hbase-site git commit: Published site at 12e75a8a635785b279900b6905c86a1617526c72.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFileReader.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFileReader.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFileReader.html index 397c3a6..97cba93 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFileReader.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFileReader.html @@ -194,588 +194,591 @@ 186if (!shared) { 187 try { 188reader.close(false); -189 } catch (IOException e) { -190LOG.warn("failed to close stream reader", e); -191 } -192} -193 } -194 -195 /** -196 * @deprecated Do not write further code which depends on this call. Instead -197 * use getStoreFileScanner() which uses the StoreFileScanner class/interface -198 * which is the preferred way to scan a store with higher level concepts. -199 * -200 * @param cacheBlocks should we cache the blocks? -201 * @param pread use pread (for concurrent small readers) -202 * @return the underlying HFileScanner -203 */ -204 @Deprecated -205 public HFileScanner getScanner(boolean cacheBlocks, boolean pread) { -206return getScanner(cacheBlocks, pread, false); -207 } -208 -209 /** -210 * @deprecated Do not write further code which depends on this call. Instead -211 * use getStoreFileScanner() which uses the StoreFileScanner class/interface -212 * which is the preferred way to scan a store with higher level concepts. -213 * -214 * @param cacheBlocks -215 * should we cache the blocks? -216 * @param pread -217 * use pread (for concurrent small readers) -218 * @param isCompaction -219 * is scanner being used for compaction? -220 * @return the underlying HFileScanner -221 */ -222 @Deprecated -223 public HFileScanner getScanner(boolean cacheBlocks, boolean pread, -224 boolean isCompaction) { -225return reader.getScanner(cacheBlocks, pread, isCompaction); -226 } -227 -228 public void close(boolean evictOnClose) throws IOException { -229synchronized (this) { -230 if (closed) { -231return; -232 } -233 reader.close(evictOnClose); -234 closed = true; -235} -236if (listener != null) { -237 listener.storeFileReaderClosed(this); +189if (this.listener != null) { +190 this.listener.storeFileReaderClosed(this); +191} +192 } catch (IOException e) { +193LOG.warn("failed to close stream reader", e); +194 } +195} +196 } +197 +198 /** +199 * @deprecated Do not write further code which depends on this call. Instead +200 * use getStoreFileScanner() which uses the StoreFileScanner class/interface +201 * which is the preferred way to scan a store with higher level concepts. +202 * +203 * @param cacheBlocks should we cache the blocks? +204 * @param pread use pread (for concurrent small readers) +205 * @return the underlying HFileScanner +206 */ +207 @Deprecated +208 public HFileScanner getScanner(boolean cacheBlocks, boolean pread) { +209return getScanner(cacheBlocks, pread, false); +210 } +211 +212 /** +213 * @deprecated Do not write further code which depends on this call. Instead +214 * use getStoreFileScanner() which uses the StoreFileScanner class/interface +215 * which is the preferred way to scan a store with higher level concepts. +216 * +217 * @param cacheBlocks +218 * should we cache the blocks? +219 * @param pread +220 * use pread (for concurrent small readers) +221 * @param isCompaction +222 * is scanner being used for compaction? +223 * @return the underlying HFileScanner +224 */ +225 @Deprecated +226 public HFileScanner getScanner(boolean cacheBlocks, boolean pread, +227 boolean isCompaction) { +228return reader.getScanner(cacheBlocks, pread, isCompaction); +229 } +230 +231 public void close(boolean evictOnClose) throws IOException { +232synchronized (this) { +233 if (closed) { +234return; +235 } +236 reader.close(evictOnClose); +237 closed = true; 238} -239 } -240 -241 /** -242 * Check if this storeFile may contain keys within the TimeRange that -243 * have not expired (i.e. not older than oldestUnexpiredTS). -244 * @param tr the timeRange to restrict -245 * @param oldestUnexpiredTS the oldest timestamp that is not expired, as -246 * determined by the column family's TTL -247 * @return false if queried keys definitely don't exist in this StoreFile -248 */ -249 boolean passesTimerangeFilter(TimeRange tr, long oldestUnexpiredTS) { -250return this.timeRange == null? true: -251 this.timeRange.includesTimeRange(tr) && this.timeRang
[04/34] hbase-site git commit: Published site at 12e75a8a635785b279900b6905c86a1617526c72.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.MatchLastRowCellNextRowFilter.html -- diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.MatchLastRowCellNextRowFilter.html b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.MatchLastRowCellNextRowFilter.html index 6f38b3f..d1b8ba8 100644 --- a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.MatchLastRowCellNextRowFilter.html +++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.MatchLastRowCellNextRowFilter.html @@ -31,243 +31,293 @@ 023 024import java.io.IOException; 025import java.util.ArrayList; -026import java.util.List; -027import java.util.concurrent.ThreadLocalRandom; -028import org.apache.hadoop.hbase.Cell; -029import org.apache.hadoop.hbase.HBaseClassTestRule; -030import org.apache.hadoop.hbase.HBaseTestingUtility; -031import org.apache.hadoop.hbase.TableName; -032import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; -033import org.apache.hadoop.hbase.client.Put; -034import org.apache.hadoop.hbase.client.Result; -035import org.apache.hadoop.hbase.client.Scan; -036import org.apache.hadoop.hbase.client.TableDescriptorBuilder; -037import org.apache.hadoop.hbase.filter.Filter; -038import org.apache.hadoop.hbase.filter.FilterBase; -039import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl; -040import org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope; -041import org.apache.hadoop.hbase.testclassification.MediumTests; -042import org.apache.hadoop.hbase.testclassification.RegionServerTests; -043import org.apache.hadoop.hbase.util.Bytes; -044import org.junit.AfterClass; -045import org.junit.BeforeClass; -046import org.junit.ClassRule; -047import org.junit.Ignore; -048import org.junit.Test; -049import org.junit.experimental.categories.Category; -050 -051@Category({ RegionServerTests.class, MediumTests.class }) -052public class TestSwitchToStreamRead { -053 -054 @ClassRule -055 public static final HBaseClassTestRule CLASS_RULE = -056 HBaseClassTestRule.forClass(TestSwitchToStreamRead.class); +026import java.util.Collection; +027import java.util.Collections; +028import java.util.List; +029import java.util.Set; +030import java.util.concurrent.ThreadLocalRandom; +031import java.util.stream.Collectors; +032 +033import org.apache.hadoop.hbase.Cell; +034import org.apache.hadoop.hbase.HBaseClassTestRule; +035import org.apache.hadoop.hbase.HBaseTestingUtility; +036import org.apache.hadoop.hbase.TableName; +037import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +038import org.apache.hadoop.hbase.client.Put; +039import org.apache.hadoop.hbase.client.Result; +040import org.apache.hadoop.hbase.client.Scan; +041import org.apache.hadoop.hbase.client.Scan.ReadType; +042import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +043import org.apache.hadoop.hbase.filter.Filter; +044import org.apache.hadoop.hbase.filter.FilterBase; +045import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl; +046import org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope; +047import org.apache.hadoop.hbase.testclassification.MediumTests; +048import org.apache.hadoop.hbase.testclassification.RegionServerTests; +049import org.apache.hadoop.hbase.util.Bytes; +050import org.junit.After; +051import org.junit.Assert; +052import org.junit.Before; +053import org.junit.ClassRule; +054import org.junit.Ignore; +055import org.junit.Test; +056import org.junit.experimental.categories.Category; 057 -058 private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); -059 -060 private static TableName TABLE_NAME = TableName.valueOf("stream"); -061 -062 private static byte[] FAMILY = Bytes.toBytes("cf"); -063 -064 private static byte[] QUAL = Bytes.toBytes("cq"); -065 -066 private static String VALUE_PREFIX; -067 -068 private static HRegion REGION; -069 -070 @BeforeClass -071 public static void setUp() throws IOException { -072 UTIL.getConfiguration().setLong(StoreScanner.STORESCANNER_PREAD_MAX_BYTES, 2048); -073StringBuilder sb = new StringBuilder(256); -074for (int i = 0; i < 255; i++) { -075 sb.append((char) ThreadLocalRandom.current().nextInt('A', 'z' + 1)); -076} -077VALUE_PREFIX = sb.append("-").toString(); -078REGION = UTIL.createLocalHRegion( -079 TableDescriptorBuilder.newBuilder(TABLE_NAME) -080.setColumnFamily( -081 ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setBlocksize(1024).build()) -082.build(), -083 null, null); -084for (int i = 0; i < 900; i++) { -085 REGION -086.put(new Put(Bytes.toBytes(i)).addColumn(FAMILY, QUAL, Bytes.toBy
[10/34] hbase-site git commit: Published site at 12e75a8a635785b279900b6905c86a1617526c72.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/testdevapidocs/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.html b/testdevapidocs/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.html index 27857f8..c8e960f 100644 --- a/testdevapidocs/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.html +++ b/testdevapidocs/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.html @@ -349,7 +349,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html UTIL -private static HBaseTestingUtility UTIL +private static HBaseTestingUtility UTIL @@ -358,7 +358,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html TEST_SNAPSHOT -private static byte[] TEST_SNAPSHOT +private static byte[] TEST_SNAPSHOT @@ -367,7 +367,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html TEST_CLONE -private static org.apache.hadoop.hbase.TableName TEST_CLONE +private static org.apache.hadoop.hbase.TableName TEST_CLONE @@ -376,7 +376,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html TEST_FAMILY -private static byte[] TEST_FAMILY +private static byte[] TEST_FAMILY @@ -385,7 +385,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html TEST_FAMILY2 -private static byte[] TEST_FAMILY2 +private static byte[] TEST_FAMILY2 @@ -394,7 +394,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html name -public org.junit.rules.TestName name +public org.junit.rules.TestName name @@ -428,7 +428,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html setupBeforeClass -public static void setupBeforeClass() +public static void setupBeforeClass() throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true"; title="class or interface in java.lang">Exception Throws: @@ -442,7 +442,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html tearDownAfterClass -public static void tearDownAfterClass() +public static void tearDownAfterClass() throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true"; title="class or interface in java.lang">Exception Throws: @@ -456,7 +456,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html testStarted -public void testStarted() +public void testStarted() throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true"; title="class or interface in java.lang">Exception Throws: @@ -470,7 +470,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html testTableOperations -public void testTableOperations() +public void testTableOperations() throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true"; title="class or interface in java.lang">Exception Throws: @@ -484,7 +484,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html testSnapshotOperations -public void testSnapshotOperations() +public void testSnapshotOperations() throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true"; title="class or interface in java.lang">Exception Throws: @@ -498,7 +498,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html testNamespaceOperations -public void testNamespaceOperations() +public void testNamespaceOperations() throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true"; title="class or interface in java.lang">Exception Throws: @@ -512,7 +512,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html modifyTableSync -private void modifyTableSync(org.apache.hadoop.hbase.client.Admin admin, +private void modifyTableSync(org.apache.hadoop.hbase.client.Admin admin, org.apache.hadoop.hbase.TableName tableName, org.apache.hadoop.hbase.HTableDescriptor htd) throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true"; title="class or interface in java.io">IOException @@ -528,7 +528,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html testRegionTransitionOperations -public void testRegionTransitionOperations() +public void testRegionTransitionOperations() throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true"; title="class or interface in java.lang">Exception Throws: @@ -542,7 +542,7 @@ extends https:/
[02/34] hbase-site git commit: Published site at 12e75a8a635785b279900b6905c86a1617526c72.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.MatchLastRowKeyFilter.html -- diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.MatchLastRowKeyFilter.html b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.MatchLastRowKeyFilter.html index 6f38b3f..d1b8ba8 100644 --- a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.MatchLastRowKeyFilter.html +++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.MatchLastRowKeyFilter.html @@ -31,243 +31,293 @@ 023 024import java.io.IOException; 025import java.util.ArrayList; -026import java.util.List; -027import java.util.concurrent.ThreadLocalRandom; -028import org.apache.hadoop.hbase.Cell; -029import org.apache.hadoop.hbase.HBaseClassTestRule; -030import org.apache.hadoop.hbase.HBaseTestingUtility; -031import org.apache.hadoop.hbase.TableName; -032import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; -033import org.apache.hadoop.hbase.client.Put; -034import org.apache.hadoop.hbase.client.Result; -035import org.apache.hadoop.hbase.client.Scan; -036import org.apache.hadoop.hbase.client.TableDescriptorBuilder; -037import org.apache.hadoop.hbase.filter.Filter; -038import org.apache.hadoop.hbase.filter.FilterBase; -039import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl; -040import org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope; -041import org.apache.hadoop.hbase.testclassification.MediumTests; -042import org.apache.hadoop.hbase.testclassification.RegionServerTests; -043import org.apache.hadoop.hbase.util.Bytes; -044import org.junit.AfterClass; -045import org.junit.BeforeClass; -046import org.junit.ClassRule; -047import org.junit.Ignore; -048import org.junit.Test; -049import org.junit.experimental.categories.Category; -050 -051@Category({ RegionServerTests.class, MediumTests.class }) -052public class TestSwitchToStreamRead { -053 -054 @ClassRule -055 public static final HBaseClassTestRule CLASS_RULE = -056 HBaseClassTestRule.forClass(TestSwitchToStreamRead.class); +026import java.util.Collection; +027import java.util.Collections; +028import java.util.List; +029import java.util.Set; +030import java.util.concurrent.ThreadLocalRandom; +031import java.util.stream.Collectors; +032 +033import org.apache.hadoop.hbase.Cell; +034import org.apache.hadoop.hbase.HBaseClassTestRule; +035import org.apache.hadoop.hbase.HBaseTestingUtility; +036import org.apache.hadoop.hbase.TableName; +037import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +038import org.apache.hadoop.hbase.client.Put; +039import org.apache.hadoop.hbase.client.Result; +040import org.apache.hadoop.hbase.client.Scan; +041import org.apache.hadoop.hbase.client.Scan.ReadType; +042import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +043import org.apache.hadoop.hbase.filter.Filter; +044import org.apache.hadoop.hbase.filter.FilterBase; +045import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl; +046import org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope; +047import org.apache.hadoop.hbase.testclassification.MediumTests; +048import org.apache.hadoop.hbase.testclassification.RegionServerTests; +049import org.apache.hadoop.hbase.util.Bytes; +050import org.junit.After; +051import org.junit.Assert; +052import org.junit.Before; +053import org.junit.ClassRule; +054import org.junit.Ignore; +055import org.junit.Test; +056import org.junit.experimental.categories.Category; 057 -058 private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); -059 -060 private static TableName TABLE_NAME = TableName.valueOf("stream"); -061 -062 private static byte[] FAMILY = Bytes.toBytes("cf"); -063 -064 private static byte[] QUAL = Bytes.toBytes("cq"); -065 -066 private static String VALUE_PREFIX; -067 -068 private static HRegion REGION; -069 -070 @BeforeClass -071 public static void setUp() throws IOException { -072 UTIL.getConfiguration().setLong(StoreScanner.STORESCANNER_PREAD_MAX_BYTES, 2048); -073StringBuilder sb = new StringBuilder(256); -074for (int i = 0; i < 255; i++) { -075 sb.append((char) ThreadLocalRandom.current().nextInt('A', 'z' + 1)); -076} -077VALUE_PREFIX = sb.append("-").toString(); -078REGION = UTIL.createLocalHRegion( -079 TableDescriptorBuilder.newBuilder(TABLE_NAME) -080.setColumnFamily( -081 ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setBlocksize(1024).build()) -082.build(), -083 null, null); -084for (int i = 0; i < 900; i++) { -085 REGION -086.put(new Put(Bytes.toBytes(i)).addColumn(FAMILY, QUAL, Bytes.toBytes(VALUE_PREFIX + i))); -087} -088
[17/34] hbase-site git commit: Published site at 12e75a8a635785b279900b6905c86a1617526c72.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html index 8e27b8f..0c3f93a 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html @@ -325,1447 +325,1461 @@ 317 318 /* Implementation of hooks for invoking MasterObservers */ 319 -320 public void preCreateTable(final TableDescriptor htd, final RegionInfo[] regions) -321 throws IOException { -322 execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { -323 @Override -324 public void call(MasterObserver observer) throws IOException { -325observer.preCreateTable(this, htd, regions); -326 } -327}); -328 } -329 -330 public void postCreateTable(final TableDescriptor htd, final RegionInfo[] regions) -331 throws IOException { -332 execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { -333 @Override -334 public void call(MasterObserver observer) throws IOException { -335observer.postCreateTable(this, htd, regions); -336 } -337}); -338 } -339 -340 public void preCreateTableAction(final TableDescriptor htd, final RegionInfo[] regions, -341 final User user) throws IOException { -342 execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { -343 @Override -344 public void call(MasterObserver observer) throws IOException { -345 observer.preCreateTableAction(this, htd, regions); -346 } -347}); -348 } -349 -350 public void postCompletedCreateTableAction( -351 final TableDescriptor htd, final RegionInfo[] regions, final User user) throws IOException { -352 execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { -353 @Override -354 public void call(MasterObserver observer) throws IOException { -355 observer.postCompletedCreateTableAction(this, htd, regions); -356 } -357}); -358 } -359 -360 public void preDeleteTable(final TableName tableName) throws IOException { -361 execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { -362 @Override -363 public void call(MasterObserver observer) throws IOException { -364observer.preDeleteTable(this, tableName); -365 } -366}); -367 } -368 -369 public void postDeleteTable(final TableName tableName) throws IOException { -370 execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { -371 @Override -372 public void call(MasterObserver observer) throws IOException { -373observer.postDeleteTable(this, tableName); -374 } -375}); -376 } -377 -378 public void preDeleteTableAction(final TableName tableName, final User user) throws IOException { -379 execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { -380 @Override -381 public void call(MasterObserver observer) throws IOException { -382 observer.preDeleteTableAction(this, tableName); -383 } -384}); -385 } -386 -387 public void postCompletedDeleteTableAction(final TableName tableName, final User user) -388 throws IOException { -389 execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { -390 @Override -391 public void call(MasterObserver observer) throws IOException { -392 observer.postCompletedDeleteTableAction(this, tableName); -393 } -394}); -395 } -396 -397 public void preTruncateTable(final TableName tableName) throws IOException { -398 execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { -399 @Override -400 public void call(MasterObserver observer) throws IOException { -401observer.preTruncateTable(this, tableName); -402 } -403}); -404 } -405 -406 public void postTruncateTable(final TableName tableName) throws IOException { -407 execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { -408 @Override -409 public void call(MasterObserver observer) throws IOException { -410observer.postTruncateTable(this, tableName); -411 } -412}); -413 } -414 -415 public void preTruncateTableAction(final TableName tableName, final User user) -416 throws IOException { -417 execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { -418 @Override -419 public void call(MasterObserver observer) throws IOException { -420 observ
[06/34] hbase-site git commit: Published site at 12e75a8a635785b279900b6905c86a1617526c72.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/testdevapidocs/src-html/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.html -- diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.html b/testdevapidocs/src-html/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.html index 26de44b..ec473e3 100644 --- a/testdevapidocs/src-html/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.html +++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.html @@ -102,1611 +102,1621 @@ 094 095 public static class CPMasterObserver implements MasterCoprocessor, MasterObserver { 096 -097private boolean preCreateTableCalled; -098private boolean postCreateTableCalled; -099private boolean preDeleteTableCalled; -100private boolean postDeleteTableCalled; -101private boolean preTruncateTableCalled; -102private boolean postTruncateTableCalled; -103private boolean preModifyTableCalled; -104private boolean postModifyTableCalled; -105private boolean preCreateNamespaceCalled; -106private boolean postCreateNamespaceCalled; -107private boolean preDeleteNamespaceCalled; -108private boolean postDeleteNamespaceCalled; -109private boolean preModifyNamespaceCalled; -110private boolean postModifyNamespaceCalled; -111private boolean preGetNamespaceDescriptorCalled; -112private boolean postGetNamespaceDescriptorCalled; -113private boolean preListNamespaceDescriptorsCalled; -114private boolean postListNamespaceDescriptorsCalled; -115private boolean preAddColumnCalled; -116private boolean postAddColumnCalled; -117private boolean preModifyColumnCalled; -118private boolean postModifyColumnCalled; -119private boolean preDeleteColumnCalled; -120private boolean postDeleteColumnCalled; -121private boolean preEnableTableCalled; -122private boolean postEnableTableCalled; -123private boolean preDisableTableCalled; -124private boolean postDisableTableCalled; -125private boolean preAbortProcedureCalled; -126private boolean postAbortProcedureCalled; -127private boolean preGetProceduresCalled; -128private boolean postGetProceduresCalled; -129private boolean preGetLocksCalled; -130private boolean postGetLocksCalled; -131private boolean preMoveCalled; -132private boolean postMoveCalled; -133private boolean preAssignCalled; -134private boolean postAssignCalled; -135private boolean preUnassignCalled; -136private boolean postUnassignCalled; -137private boolean preRegionOfflineCalled; -138private boolean postRegionOfflineCalled; -139private boolean preBalanceCalled; -140private boolean postBalanceCalled; -141private boolean preBalanceSwitchCalled; -142private boolean postBalanceSwitchCalled; -143private boolean preShutdownCalled; -144private boolean preStopMasterCalled; -145private boolean preMasterInitializationCalled; -146private boolean postStartMasterCalled; -147private boolean startCalled; -148private boolean stopCalled; -149private boolean preSnapshotCalled; -150private boolean postSnapshotCalled; -151private boolean preListSnapshotCalled; -152private boolean postListSnapshotCalled; -153private boolean preCloneSnapshotCalled; -154private boolean postCloneSnapshotCalled; -155private boolean preRestoreSnapshotCalled; -156private boolean postRestoreSnapshotCalled; -157private boolean preDeleteSnapshotCalled; -158private boolean postDeleteSnapshotCalled; -159private boolean preCreateTableActionCalled; -160private boolean postCompletedCreateTableActionCalled; -161private boolean preDeleteTableActionCalled; -162private boolean postCompletedDeleteTableActionCalled; -163private boolean preTruncateTableActionCalled; -164private boolean postCompletedTruncateTableActionCalled; -165private boolean preAddColumnFamilyActionCalled; -166private boolean postCompletedAddColumnFamilyActionCalled; -167private boolean preModifyColumnFamilyActionCalled; -168private boolean postCompletedModifyColumnFamilyActionCalled; -169private boolean preDeleteColumnFamilyActionCalled; -170private boolean postCompletedDeleteColumnFamilyActionCalled; -171private boolean preEnableTableActionCalled; -172private boolean postCompletedEnableTableActionCalled; -173private boolean preDisableTableActionCalled; -174private boolean postCompletedDisableTableActionCalled; -175private boolean preModifyTableActionCalled; -176private boolean postCompletedModifyTableActionCalled; -177private boolean preGetTableDescriptorsCalled; -178private boolean postGetTableDescriptorsCalled; -179private boolean postGetTableNamesCalled; -180private boolean preG
[20/34] hbase-site git commit: Published site at 12e75a8a635785b279900b6905c86a1617526c72.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironment.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironment.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironment.html index 8e27b8f..0c3f93a 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironment.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironment.html @@ -325,1447 +325,1461 @@ 317 318 /* Implementation of hooks for invoking MasterObservers */ 319 -320 public void preCreateTable(final TableDescriptor htd, final RegionInfo[] regions) -321 throws IOException { -322 execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { -323 @Override -324 public void call(MasterObserver observer) throws IOException { -325observer.preCreateTable(this, htd, regions); -326 } -327}); -328 } -329 -330 public void postCreateTable(final TableDescriptor htd, final RegionInfo[] regions) -331 throws IOException { -332 execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { -333 @Override -334 public void call(MasterObserver observer) throws IOException { -335observer.postCreateTable(this, htd, regions); -336 } -337}); -338 } -339 -340 public void preCreateTableAction(final TableDescriptor htd, final RegionInfo[] regions, -341 final User user) throws IOException { -342 execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { -343 @Override -344 public void call(MasterObserver observer) throws IOException { -345 observer.preCreateTableAction(this, htd, regions); -346 } -347}); -348 } -349 -350 public void postCompletedCreateTableAction( -351 final TableDescriptor htd, final RegionInfo[] regions, final User user) throws IOException { -352 execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { -353 @Override -354 public void call(MasterObserver observer) throws IOException { -355 observer.postCompletedCreateTableAction(this, htd, regions); -356 } -357}); -358 } -359 -360 public void preDeleteTable(final TableName tableName) throws IOException { -361 execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { -362 @Override -363 public void call(MasterObserver observer) throws IOException { -364observer.preDeleteTable(this, tableName); -365 } -366}); -367 } -368 -369 public void postDeleteTable(final TableName tableName) throws IOException { -370 execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { -371 @Override -372 public void call(MasterObserver observer) throws IOException { -373observer.postDeleteTable(this, tableName); -374 } -375}); -376 } -377 -378 public void preDeleteTableAction(final TableName tableName, final User user) throws IOException { -379 execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { -380 @Override -381 public void call(MasterObserver observer) throws IOException { -382 observer.preDeleteTableAction(this, tableName); -383 } -384}); -385 } -386 -387 public void postCompletedDeleteTableAction(final TableName tableName, final User user) -388 throws IOException { -389 execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { -390 @Override -391 public void call(MasterObserver observer) throws IOException { -392 observer.postCompletedDeleteTableAction(this, tableName); -393 } -394}); -395 } -396 -397 public void preTruncateTable(final TableName tableName) throws IOException { -398 execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { -399 @Override -400 public void call(MasterObserver observer) throws IOException { -401observer.preTruncateTable(this, tableName); -402 } -403}); -404 } -405 -406 public void postTruncateTable(final TableName tableName) throws IOException { -407 execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { -408 @Override -409 public void call(MasterObserver observer) throws IOException { -410observer.postTruncateTable(this, tableName); -411 } -412}); -413 } -414 -415 public void preTruncateTableAction(final TableName tableName, final User user) -416 throws IOException { -417 execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { -418 @Override -4
[33/34] hbase-site git commit: Published site at 12e75a8a635785b279900b6905c86a1617526c72.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterObserver.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterObserver.html b/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterObserver.html index 759cc79..2bec15b 100644 --- a/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterObserver.html +++ b/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterObserver.html @@ -18,7 +18,7 @@ catch(err) { } //--> -var methods = {"i0":18,"i1":18,"i2":18,"i3":18,"i4":18,"i5":18,"i6":18,"i7":18,"i8":18,"i9":18,"i10":18,"i11":18,"i12":18,"i13":18,"i14":50,"i15":18,"i16":18,"i17":18,"i18":18,"i19":18,"i20":18,"i21":18,"i22":18,"i23":18,"i24":18,"i25":18,"i26":18,"i27":18,"i28":18,"i29":18,"i30":18,"i31":18,"i32":18,"i33":18,"i34":18,"i35":18,"i36":18,"i37":18,"i38":18,"i39":18,"i40":18,"i41":18,"i42":18,"i43":18,"i44":18,"i45":18,"i46":50,"i47":18,"i48":50,"i49":18,"i50":18,"i51":18,"i52":18,"i53":18,"i54":18,"i55":18,"i56":18,"i57":18,"i58":18,"i59":18,"i60":18,"i61":18,"i62":18,"i63":18,"i64":18,"i65":18,"i66":18,"i67":18,"i68":18,"i69":18,"i70":18,"i71":18,"i72":18,"i73":18,"i74":18,"i75":18,"i76":18,"i77":18,"i78":18,"i79":18,"i80":18,"i81":18,"i82":18,"i83":18,"i84":18,"i85":18,"i86":18,"i87":18,"i88":18,"i89":18,"i90":18,"i91":18,"i92":18,"i93":18,"i94":18,"i95":18,"i96":18,"i97":18,"i98":18,"i99":18,"i100":18,"i101":18,"i102":18,"i103":18,"i104":18,"i105":18,"i106":18,"i107":18,"i108":18,"i 109":18,"i110":18,"i111":18,"i112":18,"i113":18,"i114":18,"i115":18,"i116":18,"i117":18,"i118":18,"i119":50,"i120":18,"i121":50,"i122":18,"i123":50,"i124":18,"i125":18,"i126":18,"i127":18,"i128":18,"i129":18,"i130":18,"i131":18,"i132":18,"i133":18,"i134":18,"i135":18,"i136":18,"i137":18,"i138":18,"i139":18,"i140":18,"i141":18,"i142":18,"i143":18,"i144":18,"i145":18,"i146":18,"i147":18,"i148":18,"i149":18,"i150":18,"i151":18,"i152":18,"i153":18,"i154":18}; +var methods = {"i0":18,"i1":18,"i2":18,"i3":18,"i4":18,"i5":18,"i6":18,"i7":18,"i8":18,"i9":18,"i10":18,"i11":18,"i12":18,"i13":18,"i14":50,"i15":18,"i16":18,"i17":18,"i18":18,"i19":18,"i20":18,"i21":18,"i22":18,"i23":18,"i24":18,"i25":18,"i26":18,"i27":18,"i28":18,"i29":18,"i30":18,"i31":18,"i32":18,"i33":18,"i34":18,"i35":18,"i36":18,"i37":18,"i38":18,"i39":18,"i40":18,"i41":18,"i42":18,"i43":18,"i44":18,"i45":18,"i46":50,"i47":18,"i48":50,"i49":18,"i50":18,"i51":18,"i52":18,"i53":18,"i54":18,"i55":18,"i56":18,"i57":18,"i58":18,"i59":18,"i60":18,"i61":18,"i62":18,"i63":18,"i64":18,"i65":18,"i66":18,"i67":18,"i68":18,"i69":18,"i70":18,"i71":18,"i72":18,"i73":18,"i74":18,"i75":18,"i76":18,"i77":18,"i78":18,"i79":18,"i80":18,"i81":18,"i82":18,"i83":18,"i84":18,"i85":18,"i86":18,"i87":18,"i88":18,"i89":18,"i90":18,"i91":18,"i92":18,"i93":18,"i94":18,"i95":18,"i96":18,"i97":18,"i98":18,"i99":18,"i100":18,"i101":18,"i102":18,"i103":18,"i104":18,"i105":18,"i106":18,"i107":18,"i108":18,"i 109":18,"i110":18,"i111":18,"i112":18,"i113":18,"i114":18,"i115":18,"i116":18,"i117":18,"i118":18,"i119":18,"i120":50,"i121":18,"i122":50,"i123":18,"i124":50,"i125":18,"i126":18,"i127":18,"i128":18,"i129":18,"i130":18,"i131":18,"i132":18,"i133":18,"i134":18,"i135":18,"i136":18,"i137":18,"i138":18,"i139":18,"i140":18,"i141":18,"i142":18,"i143":18,"i144":18,"i145":18,"i146":18,"i147":18,"i148":18,"i149":18,"i150":18,"i151":18,"i152":18,"i153":18,"i154":18,"i155":18}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],16:["t5","Default Methods"],32:["t6","Deprecated Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -819,6 +819,13 @@ public interface +default TableDescriptor +preCreateTableRegionsInfos(ObserverContextctx, + TableDescriptor desc) +Called before we create the region infos for this table. + + + default void preDecommissionRegionServers(ObserverContext ctx, https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true"; title="class or interface in java.util">List servers, @@ -826,7 +833,7 @@ public interface Called before decommission region servers. - + default void preDeleteNamespace(ObserverContext ctx, https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String namespace) @@ -834,14 +841,14 @@ public interface + default void preDeleteSnapshot(ObserverContext ctx, SnapshotDescription snapshot) Called before a snapshot is deleted. - + default void preDeleteTable(ObserverContext ctx, TableName tableName) @@ -849,7 +856,7 @@ public interface + default void preDeleteTableAction(ObserverContext
[25/34] hbase-site git commit: Published site at 12e75a8a635785b279900b6905c86a1617526c72.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/devapidocs/org/apache/hadoop/hbase/security/access/AccessController.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/security/access/AccessController.html b/devapidocs/org/apache/hadoop/hbase/security/access/AccessController.html index 5d7328c..0a5dfe1 100644 --- a/devapidocs/org/apache/hadoop/hbase/security/access/AccessController.html +++ b/devapidocs/org/apache/hadoop/hbase/security/access/AccessController.html @@ -1386,7 +1386,7 @@ implements MasterObserver -postAddReplicationPeer, postAddRSGroup, postAssign, postBalance, postBalanceRSGroup, postBalanceSwitch, postClearDeadServers, postCloneSnapshot, postCompletedDeleteTableAction, postCompletedDisableTableAction, postCompletedEnableTableAction, postCompletedMergeRegionsAction, postCompletedModifyTableAction, postCompletedModifyTableAction, postCompletedSplitRegionAction, postCompletedTruncateTableAction, postCreateNamespace, postCreateTable, postDecommissionRegionServers, postDeleteSnapshot, postDisableReplicationPeer, postDisableTable, postEnableReplicationPeer, postEnableTable, postGetClusterMetrics, postGetLocks, postGetNamespaceDescriptor, postGetProcedures, postGetReplicationPeerConfig, postGetRSGroupInfo, postGetRSGroupInfoOfServer, postGetRSGroupInfoOfTable, postListDecommissionedRegionServers, postListReplicationPeers, postListRSGroups, postListSnapshot, postLockHeartbeat, postMergeRegions, postMergeRegionsCommitAction, postModifyNamespace, postModifyNamespace, postModifyTable, postMove, postMoveServers, postMoveServersAndTables, postMoveTables, postRecommissionRegionServer, postRegionOffline, postRemoveReplicationPeer, postRemoveRSGroup, postRemoveServers, postRequestLock, postRestoreSnapshot, postRollBackMergeRegionsAction, postRollBackSplitRegionAction, postSetNamespaceQuota, postSetSplitOrMergeEnabled, postSetTableQuota, postSetUserQuota, postSetUserQuota, postSetUserQuota, postSnapshot, postTableFlush, postTransitReplicationPeerSyncReplicationState, postUnassign, postUpdateReplicationPeerConfig, preAddRSGroup, preBalanceRSGroup, preCreateTableAction, preDeleteTableAction, preDisableTableAction, preEnableTableAction, preGetClusterMetrics, preGetRSGroupInfo, preGetRSGroupInfoOfServer, preGetRSGroupInfoOfTable, preGetTableNames, preListNamespaceDescriptors, preListRSGroups, preMasterInitialization, preMergeRegionsAction, preMergeRegionsCommitAction, preModifyNamespace, preModifyTable, preModifyTableAction, preModifyTableAction, preMoveServers, preMoveServersAndTables, preMoveTables, preRemoveRSGroup, preRemoveServers, preSplitRegionAction, preSplitRegionAfterMETAAction, preSplitRegionBeforeMETAAction, preTruncateTableAction +postAddReplicationPeer, postAddRSGroup, postAssign, postBalance, postBalanceRSGroup, postBalanceSwitch, postClearDeadServers, postCloneSnapshot, postCompletedDeleteTableAction, postCompletedDisableTableAction, postCompletedEnableTableAction, postCompletedMergeRegionsAction, postCompletedModifyTableAction, postCompletedModifyTableAction, postCompletedSplitRegionAction, postCompletedTruncateTableAction, postCreateNamespace, postCreateTable, postDecommissionRegionServers, postDeleteSnapshot, postDisableReplicationPeer, postDisableTable, postEnableReplicationPeer, postEnableTable, postGetClusterMetrics, postGetLocks, postGetNamespaceDescriptor, postGetProcedures, postGetReplicationPeerConfig, postGetRSGroupInfo, postGetRSGroupInfoOfServer, postGetRSGroupInfoOfTable, postListDecommissionedRegionServers, postListReplicationPeers, postListRSGroups, postListSnapshot, postLockHeartbeat, postMergeRegions, postMergeRegionsCommitAction, postModifyNamespace, postModifyNamespace, postModifyTable, postMove, postMoveServers, postMoveServersAndTables, postMoveTables, postRecommissionRegionServer, postRegionOffline, postRemoveReplicationPeer, postRemoveRSGroup, postRemoveServers, postRequestLock, postRestoreSnapshot, postRollBackMergeRegionsAction, postRollBackSplitRegionAction, postSetNamespaceQuota, postSetSplitOrMergeEnabled, postSetTableQuota, postSetUserQuota, postSetUserQuota, postSetUserQuota, postSnapshot, postTableFlush, postTransitReplicationPeerSyncReplicationState, postUnassign, postUpdateReplicationPeerConfig, preAddRSGroup, preBalanceRSGroup, preCreateTableAction, preCreateTableRegionsInfos, preDeleteTableAction, preDisableTableAction, preEnableTableAction, preGetClusterMetrics, preGetRSGroupInfo, preGetRSGroupInfoOfServer, preGetRSGroupInfoOfTable, preGetTableNames, preListNamespaceDescriptors, preListRSGroups, preMasterInitialization, preMergeRegionsAction, preMergeRegionsCommitAction, preModifyNamespace, preModifyTable, preModifyTableAction, preModifyTableAction, preMoveServers, preMoveServersAndTables, preMoveTa
[32/34] hbase-site git commit: Published site at 12e75a8a635785b279900b6905c86a1617526c72.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/MasterCoprocessorEnvironment.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/MasterCoprocessorEnvironment.html b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/MasterCoprocessorEnvironment.html index 0bbdf30..49cbc86 100644 --- a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/MasterCoprocessorEnvironment.html +++ b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/MasterCoprocessorEnvironment.html @@ -797,6 +797,13 @@ +default TableDescriptor +MasterObserver.preCreateTableRegionsInfos(ObserverContextctx, + TableDescriptor desc) +Called before we create the region infos for this table. + + + default void MasterObserver.preDecommissionRegionServers(ObserverContext ctx, https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true"; title="class or interface in java.util">List servers, @@ -804,7 +811,7 @@ Called before decommission region servers. - + default void MasterObserver.preDeleteNamespace(ObserverContext ctx, https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String namespace) @@ -812,14 +819,14 @@ namespace - + default void MasterObserver.preDeleteSnapshot(ObserverContext ctx, SnapshotDescription snapshot) Called before a snapshot is deleted. - + default void MasterObserver.preDeleteTable(ObserverContext ctx, TableName tableName) @@ -827,7 +834,7 @@ table. - + default void MasterObserver.preDeleteTableAction(ObserverContext ctx, TableName tableName) @@ -835,102 +842,102 @@ table. - + default void MasterObserver.preDisableReplicationPeer(ObserverContext ctx, https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String peerId) Called before disable a replication peer - + default void MasterObserver.preDisableTable(ObserverContext ctx, TableName tableName) Called prior to disabling a table. - + default void MasterObserver.preDisableTableAction(ObserverContext ctx, TableName tableName) Called prior to disabling a table. - + default void MasterObserver.preEnableReplicationPeer(ObserverContext ctx, https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String peerId) Called before enable a replication peer - + default void MasterObserver.preEnableTable(ObserverContext ctx, TableName tableName) Called prior to enabling a table. - + default void MasterObserver.preEnableTableAction(ObserverContext ctx, TableName tableName) Called prior to enabling a table. - + default void MasterObserver.preGetClusterMetrics(ObserverContext ctx) Called before get cluster status. - + default void MasterObserver.preGetLocks(ObserverContext ctx) Called before a getLocks request has been processed. - + default void MasterObserver.preGetNamespaceDescriptor(ObserverContext ctx, https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String namespace) Called before a getNamespaceDescriptor request has been processed. - + default void MasterObserver.preGetProcedures(ObserverContext ctx) Called before a getProcedures request has been processed. - + default void MasterObserver.preGetReplicationPeerConfig(ObserverContext ctx, https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String peerId) Called before get the configured ReplicationPeerConfig for the specified peer - + default void MasterObserver.preGetRSGroupInfo(ObserverContext ctx, https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String groupName) Called before getting region server group info of the passed gro
[16/34] hbase-site git commit: Published site at 12e75a8a635785b279900b6905c86a1617526c72.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStoreFile.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStoreFile.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStoreFile.html index 2e803fd..0e6cac8 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStoreFile.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStoreFile.html @@ -134,506 +134,507 @@ 126 private final AtomicInteger refCount = new AtomicInteger(0); 127 128 // Set implementation must be of concurrent type -129 private final SetstreamReaders; -130 -131 private final boolean noReadahead; -132 -133 private final boolean primaryReplica; -134 -135 // Indicates if the file got compacted -136 private volatile boolean compactedAway = false; -137 -138 // Keys for metadata stored in backing HFile. -139 // Set when we obtain a Reader. -140 private long sequenceid = -1; -141 -142 // max of the MemstoreTS in the KV's in this store -143 // Set when we obtain a Reader. -144 private long maxMemstoreTS = -1; -145 -146 // firstKey, lastkey and cellComparator will be set when openReader. -147 private Optional firstKey; -148 -149 private Optional | lastKey; -150 -151 private CellComparator comparator; -152 -153 public CacheConfig getCacheConf() { -154return cacheConf; -155 } -156 -157 @Override -158 public Optional | getFirstKey() { -159return firstKey; -160 } -161 -162 @Override -163 public Optional | getLastKey() { -164return lastKey; -165 } -166 -167 @Override -168 public CellComparator getComparator() { -169return comparator; -170 } -171 -172 @Override -173 public long getMaxMemStoreTS() { -174return maxMemstoreTS; -175 } -176 -177 // If true, this file was product of a major compaction. Its then set -178 // whenever you get a Reader. -179 private AtomicBoolean majorCompaction = null; -180 -181 // If true, this file should not be included in minor compactions. -182 // It's set whenever you get a Reader. -183 private boolean excludeFromMinorCompaction = false; -184 -185 /** -186 * Map of the metadata entries in the corresponding HFile. Populated when Reader is opened -187 * after which it is not modified again. -188 */ -189 private Map | metadataMap; -190 -191 // StoreFile.Reader -192 private volatile StoreFileReader reader; -193 -194 /** -195 * Bloom filter type specified in column family configuration. Does not -196 * necessarily correspond to the Bloom filter type present in the HFile. -197 */ -198 private final BloomType cfBloomType; -199 -200 /** -201 * Constructor, loads a reader and it's indices, etc. May allocate a substantial amount of ram -202 * depending on the underlying files (10-20MB?). -203 * @param fs The current file system to use. -204 * @param p The path of the file. -205 * @param conf The current configuration. -206 * @param cacheConf The cache configuration and block cache reference. -207 * @param cfBloomType The bloom type to use for this store file as specified by column family -208 * configuration. This may or may not be the same as the Bloom filter type actually -209 * present in the HFile, because column family configuration might change. If this is -210 * {@link BloomType#NONE}, the existing Bloom filter is ignored. -211 * @param primaryReplica true if this is a store file for primary replica, otherwise false. -212 * @throws IOException -213 */ -214 public HStoreFile(FileSystem fs, Path p, Configuration conf, CacheConfig cacheConf, -215 BloomType cfBloomType, boolean primaryReplica) throws IOException { -216this(fs, new StoreFileInfo(conf, fs, p), conf, cacheConf, cfBloomType, primaryReplica); -217 } -218 -219 /** -220 * Constructor, loads a reader and it's indices, etc. May allocate a substantial amount of ram -221 * depending on the underlying files (10-20MB?). -222 * @param fs fs The current file system to use. -223 * @param fileInfo The store file information. -224 * @param conf The current configuration. -225 * @param cacheConf The cache configuration and block cache reference. -226 * @param cfBloomType The bloom type to use for this store file as specified by column -227 * family configuration. This may or may not be the same as the Bloom filter type -228 * actually present in the HFile, because column family configuration might change. If -229 * this is {@link BloomType#NONE}, the existing Bloom filter is ignored. -230 * @param primaryReplica true if this is a store file for primary replica, otherwise false. -231 */ -232 public HStoreFile(FileSystem fs, StoreFileInfo fileInfo, Configuration conf, CacheConfig c
[28/34] hbase-site git commit: Published site at 12e75a8a635785b279900b6905c86a1617526c72.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/devapidocs/org/apache/hadoop/hbase/master/package-tree.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/master/package-tree.html b/devapidocs/org/apache/hadoop/hbase/master/package-tree.html index 9b187b9..f21ad9b 100644 --- a/devapidocs/org/apache/hadoop/hbase/master/package-tree.html +++ b/devapidocs/org/apache/hadoop/hbase/master/package-tree.html @@ -347,10 +347,10 @@ java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true"; title="class or interface in java.lang">Enum(implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true"; title="class or interface in java.lang">Comparable , java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true"; title="class or interface in java.io">Serializable) -org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus -org.apache.hadoop.hbase.master.SplitLogManager.ResubmitDirective -org.apache.hadoop.hbase.master.RegionState.State org.apache.hadoop.hbase.master.MetricsMasterSourceFactoryImpl.FactoryStorage +org.apache.hadoop.hbase.master.RegionState.State +org.apache.hadoop.hbase.master.SplitLogManager.ResubmitDirective +org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html b/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html index 86fe2b3..f76d995 100644 --- a/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html +++ b/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html @@ -215,10 +215,10 @@ java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true"; title="class or interface in java.lang">Enum (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true"; title="class or interface in java.lang">Comparable , java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true"; title="class or interface in java.io">Serializable) -org.apache.hadoop.hbase.master.procedure.PeerProcedureInterface.PeerOperationType -org.apache.hadoop.hbase.master.procedure.MetaProcedureInterface.MetaOperationType -org.apache.hadoop.hbase.master.procedure.ServerProcedureInterface.ServerOperationType org.apache.hadoop.hbase.master.procedure.TableProcedureInterface.TableOperationType +org.apache.hadoop.hbase.master.procedure.ServerProcedureInterface.ServerOperationType +org.apache.hadoop.hbase.master.procedure.MetaProcedureInterface.MetaOperationType +org.apache.hadoop.hbase.master.procedure.PeerProcedureInterface.PeerOperationType http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/devapidocs/org/apache/hadoop/hbase/monitoring/package-tree.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/monitoring/package-tree.html b/devapidocs/org/apache/hadoop/hbase/monitoring/package-tree.html index 7b435d2..4b549da 100644 --- a/devapidocs/org/apache/hadoop/hbase/monitoring/package-tree.html +++ b/devapidocs/org/apache/hadoop/hbase/monitoring/package-tree.html @@ -127,8 +127,8 @@ java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true"; title="class or interface in java.lang">Enum (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true"; title="class or interface in java.lang">Comparable , java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true"; title="class or interface in java.io">Serializable) -org.apache.hadoop.hbase.monitoring.MonitoredTask.State org.apache.hadoop.hbase.monitoring.TaskMonitor.TaskFilter.TaskType +org.apache.hadoop.hbase.monitoring.MonitoredTask.State http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/devapidocs/org/apache/hadoop/hbase/package-tree.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/package-tree.html b/devapidocs/org/apache/hadoop/hbase/package-tree.html index 0fa5060..a4a3612 100644 --- a/devapidocs/org/apache/hadoop/hbase/package-tree.html +++ b/devapidocs/org/apache/hadoop/hbase/package-tree.html @@ -432,19 +432,19 @@ java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true"; title="class or interface in java.lang">Enum (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.htm
[19/34] hbase-site git commit: Published site at 12e75a8a635785b279900b6905c86a1617526c72.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironmentForCoreCoprocessors.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironmentForCoreCoprocessors.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironmentForCoreCoprocessors.html index 8e27b8f..0c3f93a 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironmentForCoreCoprocessors.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironmentForCoreCoprocessors.html @@ -325,1447 +325,1461 @@ 317 318 /* Implementation of hooks for invoking MasterObservers */ 319 -320 public void preCreateTable(final TableDescriptor htd, final RegionInfo[] regions) -321 throws IOException { -322 execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { -323 @Override -324 public void call(MasterObserver observer) throws IOException { -325observer.preCreateTable(this, htd, regions); -326 } -327}); -328 } -329 -330 public void postCreateTable(final TableDescriptor htd, final RegionInfo[] regions) -331 throws IOException { -332 execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { -333 @Override -334 public void call(MasterObserver observer) throws IOException { -335observer.postCreateTable(this, htd, regions); -336 } -337}); -338 } -339 -340 public void preCreateTableAction(final TableDescriptor htd, final RegionInfo[] regions, -341 final User user) throws IOException { -342 execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { -343 @Override -344 public void call(MasterObserver observer) throws IOException { -345 observer.preCreateTableAction(this, htd, regions); -346 } -347}); -348 } -349 -350 public void postCompletedCreateTableAction( -351 final TableDescriptor htd, final RegionInfo[] regions, final User user) throws IOException { -352 execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { -353 @Override -354 public void call(MasterObserver observer) throws IOException { -355 observer.postCompletedCreateTableAction(this, htd, regions); -356 } -357}); -358 } -359 -360 public void preDeleteTable(final TableName tableName) throws IOException { -361 execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { -362 @Override -363 public void call(MasterObserver observer) throws IOException { -364observer.preDeleteTable(this, tableName); -365 } -366}); -367 } -368 -369 public void postDeleteTable(final TableName tableName) throws IOException { -370 execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { -371 @Override -372 public void call(MasterObserver observer) throws IOException { -373observer.postDeleteTable(this, tableName); -374 } -375}); -376 } -377 -378 public void preDeleteTableAction(final TableName tableName, final User user) throws IOException { -379 execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { -380 @Override -381 public void call(MasterObserver observer) throws IOException { -382 observer.preDeleteTableAction(this, tableName); -383 } -384}); -385 } -386 -387 public void postCompletedDeleteTableAction(final TableName tableName, final User user) -388 throws IOException { -389 execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation(user) { -390 @Override -391 public void call(MasterObserver observer) throws IOException { -392 observer.postCompletedDeleteTableAction(this, tableName); -393 } -394}); -395 } -396 -397 public void preTruncateTable(final TableName tableName) throws IOException { -398 execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { -399 @Override -400 public void call(MasterObserver observer) throws IOException { -401observer.preTruncateTable(this, tableName); -402 } -403}); -404 } -405 -406 public void postTruncateTable(final TableName tableName) throws IOException { -407 execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() { -408 @Override -409 public void call(MasterObserver observer) throws IOException { -410observer.postTruncateTable(this, tableName); -411 } -412}); -413 } -414 -415 public void preTruncateTableAction(final TableName tableName, final User user) -416 throws IOException { -417 execOperation(co
[29/34] hbase-site git commit: Published site at 12e75a8a635785b279900b6905c86a1617526c72.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html b/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html index 4f26d3d..8eb25f0 100644 --- a/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html +++ b/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html @@ -18,7 +18,7 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i 109":10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":10,"i149":10,"i150":10}; +var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i 109":10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":10,"i149":10,"i150":10,"i151":10}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -656,137 +656,141 @@ extends User user) +TableDescriptor +preCreateTableRegionsInfos(TableDescriptor desc) + + void preDecommissionRegionServers(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true"; title="class or interface in java.util">Listservers, boolean offload) - + void preDeleteNamespace(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String namespaceName) - + void preDeleteSnapshot(SnapshotDescription snapshot) - + void preDeleteTable(TableName tableName) - + void preDeleteTableAction(TableName tableName, User user) - + void preDisableReplicationPeer(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String peerId) - + void preDisableTable(TableName tableName) - + void preDisableTableAction(TableName tableName, User user) - + void preEnableReplicationPeer(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String peerId) - + void preEnableTable(TableName tableName) - + void preEnableTableAction(TableName tableName, User user) - + void preGetClusterMetric
[21/34] hbase-site git commit: Published site at 12e75a8a635785b279900b6905c86a1617526c72.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html index 1606c61..7cdc8d2 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html @@ -2038,45 +2038,45 @@ 2030 } 2031 2032 @Override -2033 public long createTable( -2034 final TableDescriptor tableDescriptor, -2035 final byte [][] splitKeys, -2036 final long nonceGroup, -2037 final long nonce) throws IOException { -2038checkInitialized(); -2039 -2040String namespace = tableDescriptor.getTableName().getNamespaceAsString(); +2033 public long createTable(final TableDescriptor tableDescriptor, final byte[][] splitKeys, +2034 final long nonceGroup, final long nonce) throws IOException { +2035checkInitialized(); +2036TableDescriptor desc = getMasterCoprocessorHost().preCreateTableRegionsInfos(tableDescriptor); +2037if (desc == null) { +2038 throw new IOException("Creation for " + tableDescriptor + " is canceled by CP"); +2039} +2040String namespace = desc.getTableName().getNamespaceAsString(); 2041 this.clusterSchemaService.getNamespace(namespace); 2042 -2043RegionInfo[] newRegions = ModifyRegionUtils.createRegionInfos(tableDescriptor, splitKeys); -2044 sanityCheckTableDescriptor(tableDescriptor); +2043RegionInfo[] newRegions = ModifyRegionUtils.createRegionInfos(desc, splitKeys); +2044sanityCheckTableDescriptor(desc); 2045 -2046return MasterProcedureUtil.submitProcedure( -2047new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { -2048 @Override -2049 protected void run() throws IOException { -2050 getMaster().getMasterCoprocessorHost().preCreateTable(tableDescriptor, newRegions); +2046return MasterProcedureUtil +2047 .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { +2048@Override +2049protected void run() throws IOException { +2050 getMaster().getMasterCoprocessorHost().preCreateTable(desc, newRegions); 2051 -2052 LOG.info(getClientIdAuditPrefix() + " create " + tableDescriptor); +2052 LOG.info(getClientIdAuditPrefix() + " create " + desc); 2053 -2054// TODO: We can handle/merge duplicate requests, and differentiate the case of -2055// TableExistsException by saying if the schema is the same or not. -2056// -2057// We need to wait for the procedure to potentially fail due to "prepare" sanity -2058// checks. This will block only the beginning of the procedure. See HBASE-19953. -2059ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch(); -2060submitProcedure(new CreateTableProcedure( -2061 procedureExecutor.getEnvironment(), tableDescriptor, newRegions, latch)); -2062latch.await(); +2054 // TODO: We can handle/merge duplicate requests, and differentiate the case of +2055 // TableExistsException by saying if the schema is the same or not. +2056 // +2057 // We need to wait for the procedure to potentially fail due to "prepare" sanity +2058 // checks. This will block only the beginning of the procedure. See HBASE-19953. +2059 ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch(); +2060 submitProcedure( +2061new CreateTableProcedure(procedureExecutor.getEnvironment(), desc, newRegions, latch)); +2062 latch.await(); 2063 -2064 getMaster().getMasterCoprocessorHost().postCreateTable(tableDescriptor, newRegions); -2065 } +2064 getMaster().getMasterCoprocessorHost().postCreateTable(desc, newRegions); +2065} 2066 -2067 @Override -2068 protected String getDescription() { -2069return "CreateTableProcedure"; -2070 } -2071}); +2067@Override +2068protected String getDescription() { +2069 return "CreateTableProcedure"; +2070} +2071 }); 2072 } 2073 2074 @Override http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html index 1606c61..7cdc8d2 100644 --- a/devapidocs/src-html/org/
[15/34] hbase-site git commit: Published site at 12e75a8a635785b279900b6905c86a1617526c72.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFileReader.Listener.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFileReader.Listener.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFileReader.Listener.html index 397c3a6..97cba93 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFileReader.Listener.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFileReader.Listener.html @@ -194,588 +194,591 @@ 186if (!shared) { 187 try { 188reader.close(false); -189 } catch (IOException e) { -190LOG.warn("failed to close stream reader", e); -191 } -192} -193 } -194 -195 /** -196 * @deprecated Do not write further code which depends on this call. Instead -197 * use getStoreFileScanner() which uses the StoreFileScanner class/interface -198 * which is the preferred way to scan a store with higher level concepts. -199 * -200 * @param cacheBlocks should we cache the blocks? -201 * @param pread use pread (for concurrent small readers) -202 * @return the underlying HFileScanner -203 */ -204 @Deprecated -205 public HFileScanner getScanner(boolean cacheBlocks, boolean pread) { -206return getScanner(cacheBlocks, pread, false); -207 } -208 -209 /** -210 * @deprecated Do not write further code which depends on this call. Instead -211 * use getStoreFileScanner() which uses the StoreFileScanner class/interface -212 * which is the preferred way to scan a store with higher level concepts. -213 * -214 * @param cacheBlocks -215 * should we cache the blocks? -216 * @param pread -217 * use pread (for concurrent small readers) -218 * @param isCompaction -219 * is scanner being used for compaction? -220 * @return the underlying HFileScanner -221 */ -222 @Deprecated -223 public HFileScanner getScanner(boolean cacheBlocks, boolean pread, -224 boolean isCompaction) { -225return reader.getScanner(cacheBlocks, pread, isCompaction); -226 } -227 -228 public void close(boolean evictOnClose) throws IOException { -229synchronized (this) { -230 if (closed) { -231return; -232 } -233 reader.close(evictOnClose); -234 closed = true; -235} -236if (listener != null) { -237 listener.storeFileReaderClosed(this); +189if (this.listener != null) { +190 this.listener.storeFileReaderClosed(this); +191} +192 } catch (IOException e) { +193LOG.warn("failed to close stream reader", e); +194 } +195} +196 } +197 +198 /** +199 * @deprecated Do not write further code which depends on this call. Instead +200 * use getStoreFileScanner() which uses the StoreFileScanner class/interface +201 * which is the preferred way to scan a store with higher level concepts. +202 * +203 * @param cacheBlocks should we cache the blocks? +204 * @param pread use pread (for concurrent small readers) +205 * @return the underlying HFileScanner +206 */ +207 @Deprecated +208 public HFileScanner getScanner(boolean cacheBlocks, boolean pread) { +209return getScanner(cacheBlocks, pread, false); +210 } +211 +212 /** +213 * @deprecated Do not write further code which depends on this call. Instead +214 * use getStoreFileScanner() which uses the StoreFileScanner class/interface +215 * which is the preferred way to scan a store with higher level concepts. +216 * +217 * @param cacheBlocks +218 * should we cache the blocks? +219 * @param pread +220 * use pread (for concurrent small readers) +221 * @param isCompaction +222 * is scanner being used for compaction? +223 * @return the underlying HFileScanner +224 */ +225 @Deprecated +226 public HFileScanner getScanner(boolean cacheBlocks, boolean pread, +227 boolean isCompaction) { +228return reader.getScanner(cacheBlocks, pread, isCompaction); +229 } +230 +231 public void close(boolean evictOnClose) throws IOException { +232synchronized (this) { +233 if (closed) { +234return; +235 } +236 reader.close(evictOnClose); +237 closed = true; 238} -239 } -240 -241 /** -242 * Check if this storeFile may contain keys within the TimeRange that -243 * have not expired (i.e. not older than oldestUnexpiredTS). -244 * @param tr the timeRange to restrict -245 * @param oldestUnexpiredTS the oldest timestamp that is not expired, as -246 * determined by the column family's TTL -247 * @return false if queried keys definitely don't exist in this StoreFile -248 */ -249 boolean passesTimerangeFilter(TimeRange tr, long oldestUnexpiredTS) { -250return this.timeRange == null? true: -251 this.ti
[31/34] hbase-site git commit: Published site at 12e75a8a635785b279900b6905c86a1617526c72.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/ObserverContext.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/ObserverContext.html b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/ObserverContext.html index 8b57e14..82d70a3 100644 --- a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/ObserverContext.html +++ b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/ObserverContext.html @@ -1411,6 +1411,13 @@ +default TableDescriptor +MasterObserver.preCreateTableRegionsInfos(ObserverContextctx, + TableDescriptor desc) +Called before we create the region infos for this table. + + + default void MasterObserver.preDecommissionRegionServers(ObserverContext ctx, https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true"; title="class or interface in java.util">List servers, @@ -1418,7 +1425,7 @@ Called before decommission region servers. - + default void RegionObserver.preDelete(ObserverContext c, Delete delete, @@ -1427,14 +1434,14 @@ Called before the client deletes a value. - + void MetaTableMetrics.ExampleRegionObserverMeta.preDelete(ObserverContext e, Delete delete, WALEdit edit, Durability durability) - + default void MasterObserver.preDeleteNamespace(ObserverContext ctx, https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String namespace) @@ -1442,14 +1449,14 @@ namespace - + default void MasterObserver.preDeleteSnapshot(ObserverContext ctx, SnapshotDescription snapshot) Called before a snapshot is deleted. - + default void MasterObserver.preDeleteTable(ObserverContext ctx, TableName tableName) @@ -1457,7 +1464,7 @@ table. - + default void MasterObserver.preDeleteTableAction(ObserverContext ctx, TableName tableName) @@ -1465,49 +1472,49 @@ table. - + default void MasterObserver.preDisableReplicationPeer(ObserverContext ctx, https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String peerId) Called before disable a replication peer - + default void MasterObserver.preDisableTable(ObserverContext ctx, TableName tableName) Called prior to disabling a table. - + default void MasterObserver.preDisableTableAction(ObserverContext ctx, TableName tableName) Called prior to disabling a table. - + default void MasterObserver.preEnableReplicationPeer(ObserverContext ctx, https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true"; title="class or interface in java.lang">String peerId) Called before enable a replication peer - + default void MasterObserver.preEnableTable(ObserverContext ctx, TableName tableName) Called prior to enabling a table. - + default void MasterObserver.preEnableTableAction(ObserverContext ctx, TableName tableName) Called prior to enabling a table. - + default com.google.protobuf.Message EndpointObserver.preEndpointInvocation(ObserverContext ctx, com.google.protobuf.Service service, @@ -1516,13 +1523,13 @@ Called before an Endpoint service method is invoked. - + default void RegionServerObserver.preExecuteProcedures(ObserverContext ctx) This will be called before executing procedures - + default boolean RegionObserver.preExists(ObserverContext c, Get get, @@ -1530,14 +1537,14 @@ Called before the client tests for existence using a Get. - + default void RegionObserver.preFlush(ObserverContext c, FlushLifeCycleTracker tracker) Called before the memstore is flushed to disk. - + default InternalScanner RegionObserver.preFlush(ObserverContext c, Store store, @@ -1546,7 +1553,7 @@ Called before a Store's memstore is flushed to disk. - + default void RegionObserver.preFlushScannerOpen(ObserverContext c, Store store, @@ -1555,2
[13/34] hbase-site git commit: Published site at 12e75a8a635785b279900b6905c86a1617526c72.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/downloads.html -- diff --git a/downloads.html b/downloads.html index 74fbea3..9bfc158 100644 --- a/downloads.html +++ b/downloads.html @@ -7,7 +7,7 @@ - + Apache HBase – Apache HBase Downloads @@ -306,6 +306,8 @@ under the License. --> how to verify your mirrored downloads. +NOTE: 2.1.1 and 2.0.3 have a serious memory leak. See HBASE-21551. We are working on replacement releases. + Releases @@ -461,7 +463,7 @@ under the License. --> https://www.apache.org/";>The Apache Software Foundation. All rights reserved. - Last Published: 2018-12-05 + Last Published: 2018-12-06 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/export_control.html -- diff --git a/export_control.html b/export_control.html index 81b761e..d0728d7 100644 --- a/export_control.html +++ b/export_control.html @@ -7,7 +7,7 @@ - + Apache HBase – Export Control @@ -341,7 +341,7 @@ for more details. https://www.apache.org/";>The Apache Software Foundation. All rights reserved. - Last Published: 2018-12-05 + Last Published: 2018-12-06 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/index.html -- diff --git a/index.html b/index.html index 615b028..1b6362f 100644 --- a/index.html +++ b/index.html @@ -7,7 +7,7 @@ - + Apache HBase – Apache HBase⢠Home @@ -421,7 +421,7 @@ Apache HBase is an open-source, distributed, versioned, non-relational database https://www.apache.org/";>The Apache Software Foundation. All rights reserved. - Last Published: 2018-12-05 + Last Published: 2018-12-06 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/integration.html -- diff --git a/integration.html b/integration.html index 8f64b99..d0d4c3b 100644 --- a/integration.html +++ b/integration.html @@ -7,7 +7,7 @@ - + Apache HBase – CI Management @@ -301,7 +301,7 @@ https://www.apache.org/";>The Apache Software Foundation. All rights reserved. - Last Published: 2018-12-05 + Last Published: 2018-12-06 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/issue-tracking.html -- diff --git a/issue-tracking.html b/issue-tracking.html index 4d1f7f5..d231a8a 100644 --- a/issue-tracking.html +++ b/issue-tracking.html @@ -7,7 +7,7 @@ - + Apache HBase – Issue Management @@ -298,7 +298,7 @@ https://www.apache.org/";>The Apache Software Foundation. All rights reserved. - Last Published: 2018-12-05 + Last Published: 2018-12-06 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/license.html -- diff --git a/license.html b/license.html index f8b9b5e..9157d70 100644 --- a/license.html +++ b/license.html @@ -7,7 +7,7 @@ - + Apache HBase – Project Licenses @@ -501,7 +501,7 @@ https://www.apache.org/";>The Apache Software Foundation. All rights reserved. - Last Published: 2018-12-05 + Last Published: 2018-12-06 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/mail-lists.html -- diff --git a/mail-lists.html b/mail-lists.html index d2e9b4b..13c3832 100644 --- a/mail-lists.html +++ b/mail-lists.html @@ -7,7 +7,7 @@ - + Apache HBase – Project Mailing Lists @@ -351,7 +351,7 @@ https://www.apache.org/";>The Apache Software Foundation. All rights reserved. - Last Published: 2018-12-05 + Last Published: 2018-12-06 http://git-wip-us.apach
hbase-site git commit: INFRA-10751 Empty commit
Repository: hbase-site Updated Branches: refs/heads/asf-site 8d58bba52 -> bafdf0715 INFRA-10751 Empty commit Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/bafdf071 Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/bafdf071 Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/bafdf071 Branch: refs/heads/asf-site Commit: bafdf0715464f186c29b99657daade19edcb67b6 Parents: 8d58bba Author: jenkins Authored: Thu Dec 6 14:52:46 2018 + Committer: jenkins Committed: Thu Dec 6 14:52:46 2018 + -- --
[08/34] hbase-site git commit: Published site at 12e75a8a635785b279900b6905c86a1617526c72.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/testdevapidocs/org/apache/hadoop/hbase/wal/package-tree.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/wal/package-tree.html b/testdevapidocs/org/apache/hadoop/hbase/wal/package-tree.html index f62b3ca..2981b5a 100644 --- a/testdevapidocs/org/apache/hadoop/hbase/wal/package-tree.html +++ b/testdevapidocs/org/apache/hadoop/hbase/wal/package-tree.html @@ -147,8 +147,8 @@ java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true"; title="class or interface in java.lang">Enum(implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true"; title="class or interface in java.lang">Comparable , java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true"; title="class or interface in java.io">Serializable) -org.apache.hadoop.hbase.wal.FaultyFSLog.FailureType org.apache.hadoop.hbase.wal.IOTestProvider.AllowedOperations +org.apache.hadoop.hbase.wal.FaultyFSLog.FailureType org.apache.hadoop.hbase.wal.TestWALSplit.Corruptions
[11/34] hbase-site git commit: Published site at 12e75a8a635785b279900b6905c86a1617526c72.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/testdevapidocs/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.CPMasterObserver.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.CPMasterObserver.html b/testdevapidocs/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.CPMasterObserver.html index df02083..fe10c2e 100644 --- a/testdevapidocs/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.CPMasterObserver.html +++ b/testdevapidocs/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.CPMasterObserver.html @@ -18,7 +18,7 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i 109":10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":10,"i149":10,"i150":10,"i151":10,"i152":10,"i153":10,"i154":10,"i155":10,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":10,"i167":10,"i168":10,"i169":10,"i170":10,"i171":10,"i172":10,"i173":10,"i174":10,"i175":10,"i176":10,"i177":10,"i178":10,"i179":10,"i180":10,"i181":10,"i182":10}; +var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i 109":10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":10,"i149":10,"i150":10,"i151":10,"i152":10,"i153":10,"i154":10,"i155":10,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":10,"i167":10,"i168":10,"i169":10,"i170":10,"i171":10,"i172":10,"i173":10,"i174":10,"i175":10,"i176":10,"i177":10,"i178":10,"i179":10,"i180":10,"i181":10,"i182":10,"i183":10}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -367,150 +367,154 @@ implements org.apache.hadoop.hbase.coprocessor.MasterCoprocessor, org.apache.had private boolean -preDeleteColumnCalled +preCreateTableRegionInfosCalled private boolean -preDeleteColumnFamilyActionCalled +preDeleteColumnCalled private boolean -preDeleteNamespaceCalled +preDeleteColumnFamilyActionCalled private boolean -preDeleteSnapshotCalled +preDeleteNamespaceCalled private boolean -preDeleteTableActionCalled +preDeleteSnapshotCalled private boolean -preDeleteTableCalled +preDeleteTableActionCalled private boolean -preDisableTableActionCalled +preDeleteTableCall
[09/34] hbase-site git commit: Published site at 12e75a8a635785b279900b6905c86a1617526c72.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.FailingSplitMasterObserver.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.FailingSplitMasterObserver.html b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.FailingSplitMasterObserver.html index c7b3079..fc52301 100644 --- a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.FailingSplitMasterObserver.html +++ b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.FailingSplitMasterObserver.html @@ -230,7 +230,7 @@ implements org.apache.hadoop.hbase.coprocessor.MasterCoprocessor, org.apache.had Methods inherited from interface org.apache.hadoop.hbase.coprocessor.MasterObserver -postAbortProcedure, postAddReplicationPeer, postAddRSGroup, postAssign, postBalance, postBalanceRSGroup, postBalanceSwitch, postClearDeadServers, postCloneSnapshot, postCompletedCreateTableAction, postCompletedDeleteTableAction, postCompletedDisableTableAction, postCompletedEnableTableAction, postCompletedMergeRegionsAction, postCompletedModifyTableAction, postCompletedModifyTableAction, postCompletedSplitRegionAction, postCompletedTruncateTableAction, postCreateNamespace, postCreateTable, postDecommissionRegionServers, postDeleteNamespace, postDeleteSnapshot, postDeleteTable, postDisableReplicationPeer, postDisableTable, postEnableReplicationPeer, postEnableTable, postGetClusterMetrics, postGetLocks, postGetNamespaceDescriptor, postGetProcedures, postGetReplicationPeerConfig, postGetRSGroupInfo, postGetRSGroupInfoOfServer, postGetRSGroupInfoOfTable, postGetTableDescriptors, postGetTableNames, postListDecommissionedRegionServers, postListNamespaceDescriptors, postListReplicati onPeers, postListRSGroups, postListSnapshot, postLockHeartbeat, postMergeRegions, postMergeRegionsCommitAction, postModifyNamespace, postModifyNamespace, postModifyTable, postModifyTable, postMove, postMoveServers, postMoveServersAndTables, postMoveTables, postRecommissionRegionServer, postRegionOffline, postRemoveReplicationPeer, postRemoveRSGroup, postRemoveServers, postRequestLock, postRestoreSnapshot, postRollBackMergeRegionsAction, postRollBackSplitRegionAction, postSetNamespaceQuota, postSetSplitOrMergeEnabled, postSetTableQuota, postSetUserQuota, postSetUserQuota, postSetUserQuota, postSnapshot, postStartMaster, postTableFlush, postTransitReplicationPeerSyncReplicationState, postTruncateTable, postUnassign, postUpdateReplicationPeerConfig, preAbortProcedure, preAddReplicationPeer, preAddRSGroup, preAssign, preBalance, preBalanceRSGroup, preBalanceSwitch, preClearDeadServers, preCloneSnapshot, preCreateNamespace, preCreateTable, preCreateTableAction, preDecommissionRegionServe rs, preDeleteNamespace, preDeleteSnapshot, preDeleteTable, preDeleteTableAction, preDisableReplicationPeer, preDisableTable, preDisableTableAction, preEnableReplicationPeer, preEnableTable, preEnableTableAction, preGetClusterMetrics, preGetLocks, preGetNamespaceDescriptor, preGetProcedures, preGetReplicationPeerConfig, preGetRSGroupInfo, preGetRSGroupInfoOfServer, preGetRSGroupInfoOfTable, preGetTableDescriptors, preGetTableNames, preListDecommissionedRegionServers, preListNamespaceDescriptors, preListReplicationPeers, preListRSGroups, preListSnapshot, preLockHeartbeat, preMasterInitialization, preMergeRegions, preMergeRegionsAction, preMergeRegionsCommitAction, preModifyNamespace, preModifyNamespace, preModifyTable, preModifyTable, preModifyTableAction, preModifyTableAction, preMove, preMoveServers, preMoveServersAndTables, preMoveTables, preRecommissionRegionServer, preRegionOffline, preRemoveReplicationPeer, preRemoveRSGroup, preRemoveServers, preRequestLock, preRestoreSnapshot, preSetNamespaceQuota, preSetSplitOrMergeEnabled, preSetTableQuota, preSetUserQuota, preSetUserQuota, preSetUserQuota, preShutdown, preSnapshot, preSplitRegion, preSplitRegionAction, preSplitRegionAfterMETAAction, preStopMaster, preTableFlush, preTransitReplicationPeerSyncReplicationState, preTruncateTable, preTruncateTableAction, preUnassign, preUpdateReplicationPeerConfig +postAbortProcedure, postAddReplicationPeer, postAddRSGroup, postAssign, postBalance, postBalanceRSGroup, postBalanceSwitch, postClearDeadServers, postCloneSnapshot, postCompletedCreateTableAction, postCompletedDeleteTableAction, postCompletedDisableTableAction, postCompletedEnableTableAction, postCompletedMergeRegionsAction, postCompletedModifyTableAction, postCompletedModifyTableAction, postCompletedSplitRegionAction, postCompletedTruncateTableAction, postCreateNamespace, postCreateTable, postDecommissionRegionServers, postDeleteNamespace, postDeleteSnapshot, postDeleteTable, postDisableReplicationPeer, po
[03/34] hbase-site git commit: Published site at 12e75a8a635785b279900b6905c86a1617526c72.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.MatchLastRowFilterRowFilter.html -- diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.MatchLastRowFilterRowFilter.html b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.MatchLastRowFilterRowFilter.html index 6f38b3f..d1b8ba8 100644 --- a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.MatchLastRowFilterRowFilter.html +++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.MatchLastRowFilterRowFilter.html @@ -31,243 +31,293 @@ 023 024import java.io.IOException; 025import java.util.ArrayList; -026import java.util.List; -027import java.util.concurrent.ThreadLocalRandom; -028import org.apache.hadoop.hbase.Cell; -029import org.apache.hadoop.hbase.HBaseClassTestRule; -030import org.apache.hadoop.hbase.HBaseTestingUtility; -031import org.apache.hadoop.hbase.TableName; -032import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; -033import org.apache.hadoop.hbase.client.Put; -034import org.apache.hadoop.hbase.client.Result; -035import org.apache.hadoop.hbase.client.Scan; -036import org.apache.hadoop.hbase.client.TableDescriptorBuilder; -037import org.apache.hadoop.hbase.filter.Filter; -038import org.apache.hadoop.hbase.filter.FilterBase; -039import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl; -040import org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope; -041import org.apache.hadoop.hbase.testclassification.MediumTests; -042import org.apache.hadoop.hbase.testclassification.RegionServerTests; -043import org.apache.hadoop.hbase.util.Bytes; -044import org.junit.AfterClass; -045import org.junit.BeforeClass; -046import org.junit.ClassRule; -047import org.junit.Ignore; -048import org.junit.Test; -049import org.junit.experimental.categories.Category; -050 -051@Category({ RegionServerTests.class, MediumTests.class }) -052public class TestSwitchToStreamRead { -053 -054 @ClassRule -055 public static final HBaseClassTestRule CLASS_RULE = -056 HBaseClassTestRule.forClass(TestSwitchToStreamRead.class); +026import java.util.Collection; +027import java.util.Collections; +028import java.util.List; +029import java.util.Set; +030import java.util.concurrent.ThreadLocalRandom; +031import java.util.stream.Collectors; +032 +033import org.apache.hadoop.hbase.Cell; +034import org.apache.hadoop.hbase.HBaseClassTestRule; +035import org.apache.hadoop.hbase.HBaseTestingUtility; +036import org.apache.hadoop.hbase.TableName; +037import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +038import org.apache.hadoop.hbase.client.Put; +039import org.apache.hadoop.hbase.client.Result; +040import org.apache.hadoop.hbase.client.Scan; +041import org.apache.hadoop.hbase.client.Scan.ReadType; +042import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +043import org.apache.hadoop.hbase.filter.Filter; +044import org.apache.hadoop.hbase.filter.FilterBase; +045import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl; +046import org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope; +047import org.apache.hadoop.hbase.testclassification.MediumTests; +048import org.apache.hadoop.hbase.testclassification.RegionServerTests; +049import org.apache.hadoop.hbase.util.Bytes; +050import org.junit.After; +051import org.junit.Assert; +052import org.junit.Before; +053import org.junit.ClassRule; +054import org.junit.Ignore; +055import org.junit.Test; +056import org.junit.experimental.categories.Category; 057 -058 private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); -059 -060 private static TableName TABLE_NAME = TableName.valueOf("stream"); -061 -062 private static byte[] FAMILY = Bytes.toBytes("cf"); -063 -064 private static byte[] QUAL = Bytes.toBytes("cq"); -065 -066 private static String VALUE_PREFIX; -067 -068 private static HRegion REGION; -069 -070 @BeforeClass -071 public static void setUp() throws IOException { -072 UTIL.getConfiguration().setLong(StoreScanner.STORESCANNER_PREAD_MAX_BYTES, 2048); -073StringBuilder sb = new StringBuilder(256); -074for (int i = 0; i < 255; i++) { -075 sb.append((char) ThreadLocalRandom.current().nextInt('A', 'z' + 1)); -076} -077VALUE_PREFIX = sb.append("-").toString(); -078REGION = UTIL.createLocalHRegion( -079 TableDescriptorBuilder.newBuilder(TABLE_NAME) -080.setColumnFamily( -081 ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setBlocksize(1024).build()) -082.build(), -083 null, null); -084for (int i = 0; i < 900; i++) { -085 REGION -086.put(new Put(Bytes.toBytes(i)).addColumn(FAMILY, QUAL, Bytes.toBytes(VALUE_
[01/34] hbase-site git commit: Published site at 12e75a8a635785b279900b6905c86a1617526c72.
Repository: hbase-site Updated Branches: refs/heads/asf-site 0f7d611e9 -> 8d58bba52 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.html -- diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.html b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.html index 6f38b3f..d1b8ba8 100644 --- a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.html +++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.html @@ -31,243 +31,293 @@ 023 024import java.io.IOException; 025import java.util.ArrayList; -026import java.util.List; -027import java.util.concurrent.ThreadLocalRandom; -028import org.apache.hadoop.hbase.Cell; -029import org.apache.hadoop.hbase.HBaseClassTestRule; -030import org.apache.hadoop.hbase.HBaseTestingUtility; -031import org.apache.hadoop.hbase.TableName; -032import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; -033import org.apache.hadoop.hbase.client.Put; -034import org.apache.hadoop.hbase.client.Result; -035import org.apache.hadoop.hbase.client.Scan; -036import org.apache.hadoop.hbase.client.TableDescriptorBuilder; -037import org.apache.hadoop.hbase.filter.Filter; -038import org.apache.hadoop.hbase.filter.FilterBase; -039import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl; -040import org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope; -041import org.apache.hadoop.hbase.testclassification.MediumTests; -042import org.apache.hadoop.hbase.testclassification.RegionServerTests; -043import org.apache.hadoop.hbase.util.Bytes; -044import org.junit.AfterClass; -045import org.junit.BeforeClass; -046import org.junit.ClassRule; -047import org.junit.Ignore; -048import org.junit.Test; -049import org.junit.experimental.categories.Category; -050 -051@Category({ RegionServerTests.class, MediumTests.class }) -052public class TestSwitchToStreamRead { -053 -054 @ClassRule -055 public static final HBaseClassTestRule CLASS_RULE = -056 HBaseClassTestRule.forClass(TestSwitchToStreamRead.class); +026import java.util.Collection; +027import java.util.Collections; +028import java.util.List; +029import java.util.Set; +030import java.util.concurrent.ThreadLocalRandom; +031import java.util.stream.Collectors; +032 +033import org.apache.hadoop.hbase.Cell; +034import org.apache.hadoop.hbase.HBaseClassTestRule; +035import org.apache.hadoop.hbase.HBaseTestingUtility; +036import org.apache.hadoop.hbase.TableName; +037import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +038import org.apache.hadoop.hbase.client.Put; +039import org.apache.hadoop.hbase.client.Result; +040import org.apache.hadoop.hbase.client.Scan; +041import org.apache.hadoop.hbase.client.Scan.ReadType; +042import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +043import org.apache.hadoop.hbase.filter.Filter; +044import org.apache.hadoop.hbase.filter.FilterBase; +045import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl; +046import org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope; +047import org.apache.hadoop.hbase.testclassification.MediumTests; +048import org.apache.hadoop.hbase.testclassification.RegionServerTests; +049import org.apache.hadoop.hbase.util.Bytes; +050import org.junit.After; +051import org.junit.Assert; +052import org.junit.Before; +053import org.junit.ClassRule; +054import org.junit.Ignore; +055import org.junit.Test; +056import org.junit.experimental.categories.Category; 057 -058 private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); -059 -060 private static TableName TABLE_NAME = TableName.valueOf("stream"); -061 -062 private static byte[] FAMILY = Bytes.toBytes("cf"); -063 -064 private static byte[] QUAL = Bytes.toBytes("cq"); -065 -066 private static String VALUE_PREFIX; -067 -068 private static HRegion REGION; -069 -070 @BeforeClass -071 public static void setUp() throws IOException { -072 UTIL.getConfiguration().setLong(StoreScanner.STORESCANNER_PREAD_MAX_BYTES, 2048); -073StringBuilder sb = new StringBuilder(256); -074for (int i = 0; i < 255; i++) { -075 sb.append((char) ThreadLocalRandom.current().nextInt('A', 'z' + 1)); -076} -077VALUE_PREFIX = sb.append("-").toString(); -078REGION = UTIL.createLocalHRegion( -079 TableDescriptorBuilder.newBuilder(TABLE_NAME) -080.setColumnFamily( -081 ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setBlocksize(1024).build()) -082.build(), -083 null, null); -084for (int i = 0; i < 900; i++) { -085 REGION -086.put(new Put(Bytes.toBytes(i)).addColumn(FAMILY, QUAL, Bytes.toBytes(VALUE_PREFIX + i))); -087} -088REGION.flush(true)
[05/34] hbase-site git commit: Published site at 12e75a8a635785b279900b6905c86a1617526c72.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8d58bba5/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.MatchLastRowCellNextColFilter.html -- diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.MatchLastRowCellNextColFilter.html b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.MatchLastRowCellNextColFilter.html index 6f38b3f..d1b8ba8 100644 --- a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.MatchLastRowCellNextColFilter.html +++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.MatchLastRowCellNextColFilter.html @@ -31,243 +31,293 @@ 023 024import java.io.IOException; 025import java.util.ArrayList; -026import java.util.List; -027import java.util.concurrent.ThreadLocalRandom; -028import org.apache.hadoop.hbase.Cell; -029import org.apache.hadoop.hbase.HBaseClassTestRule; -030import org.apache.hadoop.hbase.HBaseTestingUtility; -031import org.apache.hadoop.hbase.TableName; -032import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; -033import org.apache.hadoop.hbase.client.Put; -034import org.apache.hadoop.hbase.client.Result; -035import org.apache.hadoop.hbase.client.Scan; -036import org.apache.hadoop.hbase.client.TableDescriptorBuilder; -037import org.apache.hadoop.hbase.filter.Filter; -038import org.apache.hadoop.hbase.filter.FilterBase; -039import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl; -040import org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope; -041import org.apache.hadoop.hbase.testclassification.MediumTests; -042import org.apache.hadoop.hbase.testclassification.RegionServerTests; -043import org.apache.hadoop.hbase.util.Bytes; -044import org.junit.AfterClass; -045import org.junit.BeforeClass; -046import org.junit.ClassRule; -047import org.junit.Ignore; -048import org.junit.Test; -049import org.junit.experimental.categories.Category; -050 -051@Category({ RegionServerTests.class, MediumTests.class }) -052public class TestSwitchToStreamRead { -053 -054 @ClassRule -055 public static final HBaseClassTestRule CLASS_RULE = -056 HBaseClassTestRule.forClass(TestSwitchToStreamRead.class); +026import java.util.Collection; +027import java.util.Collections; +028import java.util.List; +029import java.util.Set; +030import java.util.concurrent.ThreadLocalRandom; +031import java.util.stream.Collectors; +032 +033import org.apache.hadoop.hbase.Cell; +034import org.apache.hadoop.hbase.HBaseClassTestRule; +035import org.apache.hadoop.hbase.HBaseTestingUtility; +036import org.apache.hadoop.hbase.TableName; +037import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +038import org.apache.hadoop.hbase.client.Put; +039import org.apache.hadoop.hbase.client.Result; +040import org.apache.hadoop.hbase.client.Scan; +041import org.apache.hadoop.hbase.client.Scan.ReadType; +042import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +043import org.apache.hadoop.hbase.filter.Filter; +044import org.apache.hadoop.hbase.filter.FilterBase; +045import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl; +046import org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope; +047import org.apache.hadoop.hbase.testclassification.MediumTests; +048import org.apache.hadoop.hbase.testclassification.RegionServerTests; +049import org.apache.hadoop.hbase.util.Bytes; +050import org.junit.After; +051import org.junit.Assert; +052import org.junit.Before; +053import org.junit.ClassRule; +054import org.junit.Ignore; +055import org.junit.Test; +056import org.junit.experimental.categories.Category; 057 -058 private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); -059 -060 private static TableName TABLE_NAME = TableName.valueOf("stream"); -061 -062 private static byte[] FAMILY = Bytes.toBytes("cf"); -063 -064 private static byte[] QUAL = Bytes.toBytes("cq"); -065 -066 private static String VALUE_PREFIX; -067 -068 private static HRegion REGION; -069 -070 @BeforeClass -071 public static void setUp() throws IOException { -072 UTIL.getConfiguration().setLong(StoreScanner.STORESCANNER_PREAD_MAX_BYTES, 2048); -073StringBuilder sb = new StringBuilder(256); -074for (int i = 0; i < 255; i++) { -075 sb.append((char) ThreadLocalRandom.current().nextInt('A', 'z' + 1)); -076} -077VALUE_PREFIX = sb.append("-").toString(); -078REGION = UTIL.createLocalHRegion( -079 TableDescriptorBuilder.newBuilder(TABLE_NAME) -080.setColumnFamily( -081 ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setBlocksize(1024).build()) -082.build(), -083 null, null); -084for (int i = 0; i < 900; i++) { -085 REGION -086.put(new Put(Bytes.toBytes(i)).addColumn(FAMILY, QUAL, Bytes.toBy