svn commit: r24174 - in /dev/hbase: hbase-2.0.0-beta-1-RC2/ hbase-2.0.0-beta-1-RC3/
Author: stack Date: Sat Jan 13 04:44:07 2018 New Revision: 24174 Log: Remove 2.0.0-beta-1 RC2 and put in place RC3 Added: dev/hbase/hbase-2.0.0-beta-1-RC3/ dev/hbase/hbase-2.0.0-beta-1-RC3/hbase-2.0.0-beta-1-bin.tar.gz (with props) dev/hbase/hbase-2.0.0-beta-1-RC3/hbase-2.0.0-beta-1-bin.tar.gz.asc dev/hbase/hbase-2.0.0-beta-1-RC3/hbase-2.0.0-beta-1-bin.tar.gz.md5 dev/hbase/hbase-2.0.0-beta-1-RC3/hbase-2.0.0-beta-1-bin.tar.gz.sha dev/hbase/hbase-2.0.0-beta-1-RC3/hbase-2.0.0-beta-1-src.tar.gz (with props) dev/hbase/hbase-2.0.0-beta-1-RC3/hbase-2.0.0-beta-1-src.tar.gz.asc dev/hbase/hbase-2.0.0-beta-1-RC3/hbase-2.0.0-beta-1-src.tar.gz.md5 dev/hbase/hbase-2.0.0-beta-1-RC3/hbase-2.0.0-beta-1-src.tar.gz.sha Removed: dev/hbase/hbase-2.0.0-beta-1-RC2/ Added: dev/hbase/hbase-2.0.0-beta-1-RC3/hbase-2.0.0-beta-1-bin.tar.gz == Binary file - no diff available. Propchange: dev/hbase/hbase-2.0.0-beta-1-RC3/hbase-2.0.0-beta-1-bin.tar.gz -- svn:mime-type = application/octet-stream Added: dev/hbase/hbase-2.0.0-beta-1-RC3/hbase-2.0.0-beta-1-bin.tar.gz.asc == --- dev/hbase/hbase-2.0.0-beta-1-RC3/hbase-2.0.0-beta-1-bin.tar.gz.asc (added) +++ dev/hbase/hbase-2.0.0-beta-1-RC3/hbase-2.0.0-beta-1-bin.tar.gz.asc Sat Jan 13 04:44:07 2018 @@ -0,0 +1,16 @@ +-BEGIN PGP SIGNATURE- + +iQIcBAABCAAGBQJaWYbUAAoJEJgWx/yKzJPSzSIP/2IsC9NyjIKhwQOEK2XIxfbA +BECurBiJjzxvoG6RHWka+1PDi+3Wiq4d9/dyLcHVmlYVjoKRGUOwG1HlBvYrTeIf +8RmCbQTX6TSPLlLow0xmk34aYXro3K1CnORGObwk7djOd9pRj7VqYE9vf4f+Cc9b +/QquNRuoDLDse8+vogVPVwSxKNzyUDtaKo/dTYkL39Yl/5mzbWoEffM3rd8wPsvl +XBNgezFMqKf2hBjJUslQJlsGkh3nJQYYktfxUYwSo6p1ZYrm5afqrmiIUzjMSRGK +BGdlMP9lCXT6OqjureElJahzKddU/G/0ezIItTr4OD7vCFmFSJNQ0cZYgtnK0ZVH +b3Onn1oUDRZgTJwUTnsyWtABDeKKeDj9h6y21mrjCg/9+biiiXYDpvZKhCjNHRZl +ig5DZ0WdEoUHJ8IgGHWY5lNNXNkEcOOCjLr6iK9gc0IbeBk3xZqs7SvS9azyAiTV +zu3EapwYXKXj8XJLnJyytegAdz7qUNp35rSg3Wqc8dnb+YAGVCoMBnVGLSqlXIG1 +qX17lrcFgMRhRaE/HVu1lAMeaxKf9z1Tgc2FpXBb/tZzxmgl99q0qdAsUZKGQMxQ +fsUmNzs9PGivNXYVQcKK2xhHdLOqJWiI1ISsD0gFICzumlXSdayINrEyfLe/DGkX +r/aWIiA7BPkUseLNAG2k +=OQgo +-END PGP SIGNATURE- Added: dev/hbase/hbase-2.0.0-beta-1-RC3/hbase-2.0.0-beta-1-bin.tar.gz.md5 == --- dev/hbase/hbase-2.0.0-beta-1-RC3/hbase-2.0.0-beta-1-bin.tar.gz.md5 (added) +++ dev/hbase/hbase-2.0.0-beta-1-RC3/hbase-2.0.0-beta-1-bin.tar.gz.md5 Sat Jan 13 04:44:07 2018 @@ -0,0 +1 @@ +hbase-2.0.0-beta-1-bin.tar.gz: 47 68 61 49 84 2A 82 C6 83 F2 68 64 63 81 36 45 Added: dev/hbase/hbase-2.0.0-beta-1-RC3/hbase-2.0.0-beta-1-bin.tar.gz.sha == --- dev/hbase/hbase-2.0.0-beta-1-RC3/hbase-2.0.0-beta-1-bin.tar.gz.sha (added) +++ dev/hbase/hbase-2.0.0-beta-1-RC3/hbase-2.0.0-beta-1-bin.tar.gz.sha Sat Jan 13 04:44:07 2018 @@ -0,0 +1,4 @@ +hbase-2.0.0-beta-1-bin.tar.gz: D54EBA7D FD3C8DD7 2ACE6CCE 1D73E8C9 96341EC9 + 4C8D1A99 0128BBB2 1B594C81 19068C10 9177353C + D7D7098F DFD7AECE DCEE5595 4574B8DA D620B3D2 + 9EAFFEA5 Added: dev/hbase/hbase-2.0.0-beta-1-RC3/hbase-2.0.0-beta-1-src.tar.gz == Binary file - no diff available. Propchange: dev/hbase/hbase-2.0.0-beta-1-RC3/hbase-2.0.0-beta-1-src.tar.gz -- svn:mime-type = application/octet-stream Added: dev/hbase/hbase-2.0.0-beta-1-RC3/hbase-2.0.0-beta-1-src.tar.gz.asc == --- dev/hbase/hbase-2.0.0-beta-1-RC3/hbase-2.0.0-beta-1-src.tar.gz.asc (added) +++ dev/hbase/hbase-2.0.0-beta-1-RC3/hbase-2.0.0-beta-1-src.tar.gz.asc Sat Jan 13 04:44:07 2018 @@ -0,0 +1,16 @@ +-BEGIN PGP SIGNATURE- + +iQIcBAABCAAGBQJaWYbVAAoJEJgWx/yKzJPSGp8QAJfVaUmLqUWuxLSpVAVP7Pc6 +0xwGXK+CRnvttYOPIfO51wiSyqRmuh+KO9g1Xi1l/S591JOo/51umPSAOs9wUYod +Ov24mNIcGWKo/Ltz3l2BQBt/ToK7yU8/+1Jl4qCPtE3CVZYX5E96kG1tF9RDdNEu +uZg1qw3/IjRJtwUqtBNd26zCUTgljcp0baKuslLnJrnkDk1LmNg6EF9YaJSO4Fge +iqddzvfRG9Gx6rHwvECFfwVzWcoiBvKbaTDHxIGMASSf+FUS5WK84QESEtFEzO7R ++aShfsymXr6yCVWf1ALEB8F+ZSdGcmyvwwfZdii7PZ4DCMF8zJMnG+zcxHdwRuUp +Byrlw7acKg/J7ZBmrPadsX4NiuuHDzuKlb9kgqIuGjziBwsD5Jix1OpUCBtke1sn +EDdXEdgR/jFXVM2AeS0ODp/ra1JPD3vDiFgNOpH5TehyjbtzVTaq895tuhFJO+wo +VrVvDv73tRjMw7h/Utw/afh+jZbhOjV2HJYk18MbHNX9rRvKBKeqGlPtUCGvdL25 +fpj/8WGNu4NVRWDDEELrh2KWr4f8FwNRuVbT1ahZlqNoaU4WFIX3Hx6/Li9riheD +brB8xIKo/Qn11j4FBmSg032jn/B9cVo7a3JSc+cr1teg8xUNdCTFHVuXT3T23eZr +pgpvd/aH3yZ5br3MPu4E +=pqM6 +-END PGP SIGNATURE- Added:
svn commit: r24172 - /dev/hbase/hbase-1.3.2-SNAPSHOT/
Author: apurtell Date: Sat Jan 13 02:13:01 2018 New Revision: 24172 Log: Stage HBase 1.3.2 SNAPSHOT 20180113T013910Z Added: dev/hbase/hbase-1.3.2-SNAPSHOT/ dev/hbase/hbase-1.3.2-SNAPSHOT/compat-check-report.html dev/hbase/hbase-1.3.2-SNAPSHOT/hbase-1.3.2-SNAPSHOT-bin.tar.gz (with props) dev/hbase/hbase-1.3.2-SNAPSHOT/hbase-1.3.2-SNAPSHOT-bin.tar.gz.asc dev/hbase/hbase-1.3.2-SNAPSHOT/hbase-1.3.2-SNAPSHOT-bin.tar.gz.md5 dev/hbase/hbase-1.3.2-SNAPSHOT/hbase-1.3.2-SNAPSHOT-bin.tar.gz.mds dev/hbase/hbase-1.3.2-SNAPSHOT/hbase-1.3.2-SNAPSHOT-bin.tar.gz.sha dev/hbase/hbase-1.3.2-SNAPSHOT/hbase-1.3.2-SNAPSHOT-src.tar.gz (with props) dev/hbase/hbase-1.3.2-SNAPSHOT/hbase-1.3.2-SNAPSHOT-src.tar.gz.asc dev/hbase/hbase-1.3.2-SNAPSHOT/hbase-1.3.2-SNAPSHOT-src.tar.gz.md5 dev/hbase/hbase-1.3.2-SNAPSHOT/hbase-1.3.2-SNAPSHOT-src.tar.gz.mds dev/hbase/hbase-1.3.2-SNAPSHOT/hbase-1.3.2-SNAPSHOT-src.tar.gz.sha Added: dev/hbase/hbase-1.3.2-SNAPSHOT/compat-check-report.html == --- dev/hbase/hbase-1.3.2-SNAPSHOT/compat-check-report.html (added) +++ dev/hbase/hbase-1.3.2-SNAPSHOT/compat-check-report.html Sat Jan 13 02:13:01 2018 @@ -0,0 +1,1009 @@ + + +http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;> +http://www.w3.org/1999/xhtml; xml:lang="en" lang="en"> + + + + +hbase: rel/1.3.1 to b6f4f511a6 compatibility report + +body { +font-family:Arial, sans-serif; +background-color:White; +color:Black; +} +hr { +color:Black; +background-color:Black; +height:1px; +border:0; +} +h1 { +margin-bottom:0px; +padding-bottom:0px; +font-size:1.625em; +} +h2 { +margin-bottom:0px; +padding-bottom:0px; +font-size:1.25em; +white-space:nowrap; +} +div.symbols { +color:#003E69; +} +div.symbols i { +color:Brown; +} +span.section { +font-weight:bold; +cursor:pointer; +color:#003E69; +white-space:nowrap; +margin-left:0.3125em; +} +span:hover.section { +color:#336699; +} +span.sect_aff { +cursor:pointer; +padding-left:1.55em; +font-size:0.875em; +color:#cc3300; +} +span.ext { +font-weight:100; +} +span.jar { +color:#cc3300; +font-size:0.875em; +font-weight:bold; +} +div.jar_list { +padding-left:0.4em; +font-size:0.94em; +} +span.pkg_t { +color:#408080; +font-size:0.875em; +} +span.pkg { +color:#408080; +font-size:0.875em; +font-weight:bold; +} +span.cname { +color:Green; +font-size:0.875em; +font-weight:bold; +} +span.iname_b { +font-weight:bold; +} +span.iname_a { +color:#33; +font-weight:bold; +font-size:0.94em; +} +span.sym_p { +font-weight:normal; +white-space:normal; +} +span.sym_pd { +white-space:normal; +} +span.sym_p span, span.sym_pd span { +white-space:nowrap; +} +span.attr { +color:Black; +font-weight:100; +} +span.deprecated { +color:Red; +font-weight:bold; +font-family:Monaco, monospace; +} +div.affect { +padding-left:1em; +padding-bottom:10px; +font-size:0.87em; +font-style:italic; +line-height:0.9em; +} +div.affected { +padding-left:2em; +padding-top:10px; +} +table.ptable { +border-collapse:collapse; +border:1px outset black; +margin-left:0.95em; +margin-top:3px; +margin-bottom:3px; +width:56.25em; +} +table.ptable td { +border:1px solid Gray; +padding:3px; +font-size:0.875em; +text-align:left; +vertical-align:top; +max-width:28em; +word-wrap:break-word; +} +table.ptable th { +background-color:#ee; +font-weight:bold; +color:#33; +font-family:Verdana, Arial; +font-size:0.875em; +border:1px solid Gray; +text-align:center; +vertical-align:top; +white-space:nowrap; +padding:3px; +} +table.summary { +border-collapse:collapse; +border:1px outset black; +} +table.summary th { +background-color:#ee; +font-weight:100; +text-align:left; +font-size:0.94em; +white-space:nowrap; +border:1px inset Gray; +padding:3px; +} +table.summary td { +text-align:right; +white-space:nowrap; +border:1px inset Gray; +padding:3px 5px 3px 10px; +} +span.mngl { +padding-left:1em; +font-size:0.875em; +cursor:text; +color:#44; +font-weight:bold; +} +span.pleft { +padding-left:2.5em; +} +span.color_p { +font-style:italic; +color:Brown; +} +span.param { +font-style:italic; +} +span.focus_p { +font-style:italic; +background-color:#DCDCDC; +} +span.ttype { +font-weight:100; +} +span.nowrap { +white-space:nowrap; +} +span.value { +white-space:nowrap; +font-weight:bold; +} +.passed { +background-color:#CCFFCC; +font-weight:100; +} +.warning { +background-color:#F4F4AF; +font-weight:100; +} +.failed { +background-color:#FF; +font-weight:100; +} +.new { +
hbase git commit: HBASE-19790 Fix compatibility break in 1.3.2-SNAPSHOT
Repository: hbase Updated Branches: refs/heads/branch-1.3 bccc706d6 -> b6f4f511a HBASE-19790 Fix compatibility break in 1.3.2-SNAPSHOT Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b6f4f511 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b6f4f511 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b6f4f511 Branch: refs/heads/branch-1.3 Commit: b6f4f511a6ab4491df526efd70ef7736812eb924 Parents: bccc706 Author: Andrew PurtellAuthored: Fri Jan 12 17:02:43 2018 -0800 Committer: Andrew Purtell Committed: Fri Jan 12 17:29:59 2018 -0800 -- .../java/org/apache/hadoop/hbase/regionserver/HRegion.java| 1 - .../java/org/apache/hadoop/hbase/regionserver/Region.java | 7 --- .../regionserver/snapshot/FlushSnapshotSubprocedure.java | 2 +- 3 files changed, 1 insertion(+), 9 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/b6f4f511/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 9c75ef8..02ef800 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -8439,7 +8439,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi closeRegionOperation(Operation.ANY); } - @Override public void closeRegionOperation(Operation operation) throws IOException { if (operation == Operation.SNAPSHOT) { for (Store store: stores.values()) { http://git-wip-us.apache.org/repos/asf/hbase/blob/b6f4f511/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java index 59c1999..672d6ec 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java @@ -251,13 +251,6 @@ public interface Region extends ConfigurationObserver { */ void closeRegionOperation() throws IOException; - /** - * Closes the region operation lock. This needs to be called in the finally block corresponding - * to the try block of {@link #startRegionOperation(Operation)} - * @throws IOException - */ - void closeRegionOperation(Operation op) throws IOException; - // Row write locks /** http://git-wip-us.apache.org/repos/asf/hbase/blob/b6f4f511/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/FlushSnapshotSubprocedure.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/FlushSnapshotSubprocedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/FlushSnapshotSubprocedure.java index c56204d..4a4bb4f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/FlushSnapshotSubprocedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/FlushSnapshotSubprocedure.java @@ -143,7 +143,7 @@ public class FlushSnapshotSubprocedure extends Subprocedure { } } finally { LOG.debug("Closing snapshot operation on " + region); -region.closeRegionOperation(Operation.SNAPSHOT); +((HRegion)region).closeRegionOperation(Operation.SNAPSHOT); } return null; }
svn commit: r24171 - /dev/hbase/hbase-1.4.1-SNAPSHOT/
Author: apurtell Date: Sat Jan 13 00:42:58 2018 New Revision: 24171 Log: Update HBase 1.4.1 SNAPSHOT to 20180112T235345Z Added: dev/hbase/hbase-1.4.1-SNAPSHOT/compat-check-report.html Modified: dev/hbase/hbase-1.4.1-SNAPSHOT/hbase-1.4.1-SNAPSHOT-bin.tar.gz dev/hbase/hbase-1.4.1-SNAPSHOT/hbase-1.4.1-SNAPSHOT-bin.tar.gz.asc dev/hbase/hbase-1.4.1-SNAPSHOT/hbase-1.4.1-SNAPSHOT-bin.tar.gz.md5 dev/hbase/hbase-1.4.1-SNAPSHOT/hbase-1.4.1-SNAPSHOT-bin.tar.gz.sha dev/hbase/hbase-1.4.1-SNAPSHOT/hbase-1.4.1-SNAPSHOT-src.tar.gz dev/hbase/hbase-1.4.1-SNAPSHOT/hbase-1.4.1-SNAPSHOT-src.tar.gz.asc dev/hbase/hbase-1.4.1-SNAPSHOT/hbase-1.4.1-SNAPSHOT-src.tar.gz.md5 dev/hbase/hbase-1.4.1-SNAPSHOT/hbase-1.4.1-SNAPSHOT-src.tar.gz.sha Added: dev/hbase/hbase-1.4.1-SNAPSHOT/compat-check-report.html == --- dev/hbase/hbase-1.4.1-SNAPSHOT/compat-check-report.html (added) +++ dev/hbase/hbase-1.4.1-SNAPSHOT/compat-check-report.html Sat Jan 13 00:42:58 2018 @@ -0,0 +1,573 @@ + + +http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;> +http://www.w3.org/1999/xhtml; xml:lang="en" lang="en"> + + + + +hbase: rel/1.4.0 to e13e9029a1 compatibility report + +body { +font-family:Arial, sans-serif; +background-color:White; +color:Black; +} +hr { +color:Black; +background-color:Black; +height:1px; +border:0; +} +h1 { +margin-bottom:0px; +padding-bottom:0px; +font-size:1.625em; +} +h2 { +margin-bottom:0px; +padding-bottom:0px; +font-size:1.25em; +white-space:nowrap; +} +div.symbols { +color:#003E69; +} +div.symbols i { +color:Brown; +} +span.section { +font-weight:bold; +cursor:pointer; +color:#003E69; +white-space:nowrap; +margin-left:0.3125em; +} +span:hover.section { +color:#336699; +} +span.sect_aff { +cursor:pointer; +padding-left:1.55em; +font-size:0.875em; +color:#cc3300; +} +span.ext { +font-weight:100; +} +span.jar { +color:#cc3300; +font-size:0.875em; +font-weight:bold; +} +div.jar_list { +padding-left:0.4em; +font-size:0.94em; +} +span.pkg_t { +color:#408080; +font-size:0.875em; +} +span.pkg { +color:#408080; +font-size:0.875em; +font-weight:bold; +} +span.cname { +color:Green; +font-size:0.875em; +font-weight:bold; +} +span.iname_b { +font-weight:bold; +} +span.iname_a { +color:#33; +font-weight:bold; +font-size:0.94em; +} +span.sym_p { +font-weight:normal; +white-space:normal; +} +span.sym_pd { +white-space:normal; +} +span.sym_p span, span.sym_pd span { +white-space:nowrap; +} +span.attr { +color:Black; +font-weight:100; +} +span.deprecated { +color:Red; +font-weight:bold; +font-family:Monaco, monospace; +} +div.affect { +padding-left:1em; +padding-bottom:10px; +font-size:0.87em; +font-style:italic; +line-height:0.9em; +} +div.affected { +padding-left:2em; +padding-top:10px; +} +table.ptable { +border-collapse:collapse; +border:1px outset black; +margin-left:0.95em; +margin-top:3px; +margin-bottom:3px; +width:56.25em; +} +table.ptable td { +border:1px solid Gray; +padding:3px; +font-size:0.875em; +text-align:left; +vertical-align:top; +max-width:28em; +word-wrap:break-word; +} +table.ptable th { +background-color:#ee; +font-weight:bold; +color:#33; +font-family:Verdana, Arial; +font-size:0.875em; +border:1px solid Gray; +text-align:center; +vertical-align:top; +white-space:nowrap; +padding:3px; +} +table.summary { +border-collapse:collapse; +border:1px outset black; +} +table.summary th { +background-color:#ee; +font-weight:100; +text-align:left; +font-size:0.94em; +white-space:nowrap; +border:1px inset Gray; +padding:3px; +} +table.summary td { +text-align:right; +white-space:nowrap; +border:1px inset Gray; +padding:3px 5px 3px 10px; +} +span.mngl { +padding-left:1em; +font-size:0.875em; +cursor:text; +color:#44; +font-weight:bold; +} +span.pleft { +padding-left:2.5em; +} +span.color_p { +font-style:italic; +color:Brown; +} +span.param { +font-style:italic; +} +span.focus_p { +font-style:italic; +background-color:#DCDCDC; +} +span.ttype { +font-weight:100; +} +span.nowrap { +white-space:nowrap; +} +span.value { +white-space:nowrap; +font-weight:bold; +} +.passed { +background-color:#CCFFCC; +font-weight:100; +} +.warning { +background-color:#F4F4AF; +font-weight:100; +} +.failed { +background-color:#FF; +font-weight:100; +} +.new { +background-color:#C6DEFF; +font-weight:100; +} + +.compatible { +background-color:#CCFFCC; +font-weight:100; +} +.almost_compatible { +background-color:#FFDAA3; +font-weight:100;
hbase git commit: HBASE-19789 Exclude flaky tests from nightly branch-2 runs
Repository: hbase Updated Branches: refs/heads/branch-2 026f535a7 -> f91589d30 HBASE-19789 Exclude flaky tests from nightly branch-2 runs Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f91589d3 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f91589d3 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f91589d3 Branch: refs/heads/branch-2 Commit: f91589d3056f69a0eeac5bfd2736c271ec76ba95 Parents: 026f535 Author: Apekshit SharmaAuthored: Fri Jan 12 16:22:06 2018 -0800 Committer: Apekshit Sharma Committed: Fri Jan 12 16:24:45 2018 -0800 -- dev-support/Jenkinsfile| 6 -- dev-support/hbase_nightly_yetus.sh | 7 +-- 2 files changed, 9 insertions(+), 4 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/f91589d3/dev-support/Jenkinsfile -- diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile index dcef649..e5d33e0 100644 --- a/dev-support/Jenkinsfile +++ b/dev-support/Jenkinsfile @@ -44,8 +44,10 @@ pipeline { // These tests currently have known failures. Once they burn down to 0, remove from here so that new problems will cause a failure. TESTS_FILTER = 'cc,checkstyle,javac,javadoc,pylint,shellcheck,whitespace,perlcritic,ruby-lint,rubocop,mvnsite' BRANCH_SPECIFIC_DOCKERFILE = "${env.BASEDIR}/dev-support/docker/Dockerfile" -// Only used for master branch. -EXCLUDE_TESTS_URL = 'https://builds.apache.org/job/HBase-Find-Flaky-Tests/lastSuccessfulBuild/artifact/excludes/' +// Flaky urls for different branches. Replace '-' and '.' in branch name by '_' because those +// characters are not allowed in bash variable name. +EXCLUDE_TESTS_URL_master = 'https://builds.apache.org/job/HBase-Find-Flaky-Tests/lastSuccessfulBuild/artifact/excludes/' +EXCLUDE_TESTS_URL_branch_2 = 'https://builds.apache.org/job/HBase-Find-Flaky-Tests-branch2.0/lastSuccessfulBuild/artifact/excludes/' } parameters { booleanParam(name: 'USE_YETUS_PRERELEASE', defaultValue: false, description: '''Check to use the current HEAD of apache/yetus rather than our configured release. http://git-wip-us.apache.org/repos/asf/hbase/blob/f91589d3/dev-support/hbase_nightly_yetus.sh -- diff --git a/dev-support/hbase_nightly_yetus.sh b/dev-support/hbase_nightly_yetus.sh index e1175d2..4e67354 100755 --- a/dev-support/hbase_nightly_yetus.sh +++ b/dev-support/hbase_nightly_yetus.sh @@ -69,10 +69,13 @@ YETUS_ARGS=("--branch=${BRANCH_NAME}" "${YETUS_ARGS[@]}") YETUS_ARGS=("--tests-filter=${TESTS_FILTER}" "${YETUS_ARGS[@]}") # Currently, flaky list is calculated only for master branch. -if [[ -n "${EXCLUDE_TESTS_URL}" && "${BRANCH_NAME}" == "master" ]]; then +UNDERSCORED_BRANCH_NAME=$(echo ${BRANCH_NAME} | tr '.-' '_') +EXCLUDE_TESTS_URL=$(eval echo "\$EXCLUDE_TESTS_URL_${UNDERSCORED_BRANCH_NAME}") +INCLUDE_TESTS_URL=$(eval echo "\$INCLUDE_TESTS_URL_${UNDERSCORED_BRANCH_NAME}") +if [[ -n "${EXCLUDE_TESTS_URL}" ]]; then YETUS_ARGS=("--exclude-tests-url=${EXCLUDE_TESTS_URL}" "${YETUS_ARGS[@]}") fi -if [[ -n "${INCLUDE_TESTS_URL}" && "${BRANCH_NAME}" == "master" ]]; then +if [[ -n "${INCLUDE_TESTS_URL}" ]]; then YETUS_ARGS=("--include-tests-url=${INCLUDE_TESTS_URL}" "${YETUS_ARGS[@]}") fi
[hbase] Git Push Summary
Repository: hbase Updated Tags: refs/tags/2.0.0-beta-1-RC1.7 [created] f5c6b1dc0
hbase git commit: HBASE-19752 RSGroupBasedLoadBalancer#getMisplacedRegions() should handle the case where rs group cannot be determined
Repository: hbase Updated Branches: refs/heads/branch-1.4 e13e9029a -> feb361cd7 HBASE-19752 RSGroupBasedLoadBalancer#getMisplacedRegions() should handle the case where rs group cannot be determined Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/feb361cd Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/feb361cd Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/feb361cd Branch: refs/heads/branch-1.4 Commit: feb361cd7230fb41b7743c4caa1f10ac2cedb7fe Parents: e13e902 Author: tedyuAuthored: Fri Jan 12 15:40:41 2018 -0800 Committer: tedyu Committed: Fri Jan 12 15:40:41 2018 -0800 -- .../hbase/rsgroup/RSGroupBasedLoadBalancer.java | 19 ++- .../balancer/TestRSGroupBasedLoadBalancer.java | 13 + 2 files changed, 27 insertions(+), 5 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/feb361cd/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java -- diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java index 049e723..5c08fb7 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.rsgroup; +import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ArrayListMultimap; import com.google.common.collect.LinkedListMultimap; import com.google.common.collect.ListMultimap; @@ -311,7 +312,8 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer, LoadBalanc return finalList; } - private Set getMisplacedRegions( + @VisibleForTesting + public Set getMisplacedRegions( Map regions) throws IOException { Set misplacedRegions = new HashSet(); for(Map.Entry region : regions.entrySet()) { @@ -319,12 +321,19 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer, LoadBalanc ServerName assignedServer = region.getValue(); RSGroupInfo info = infoManager.getRSGroup(infoManager.getRSGroupOfTable(regionInfo.getTable())); - if (assignedServer != null && - (info == null || !info.containsServer(assignedServer.getAddress( { + if (assignedServer == null) { +LOG.debug("There is no assigned server for " + region); +continue; + } + RSGroupInfo otherInfo = infoManager.getRSGroupOfServer(assignedServer.getAddress()); + if (info == null && otherInfo == null) { +LOG.warn("Couldn't obtain rs group information for " + region + " on " + assignedServer); +continue; + } + if ((info == null || !info.containsServer(assignedServer.getAddress( { LOG.debug("Found misplaced region: " + regionInfo.getRegionNameAsString() + " on server: " + assignedServer + -" found in group: " + -infoManager.getRSGroupOfServer(assignedServer.getAddress()) + +" found in group: " + otherInfo + " outside of group: " + (info == null ? "UNKNOWN" : info.getName())); misplacedRegions.add(regionInfo); } http://git-wip-us.apache.org/repos/asf/hbase/blob/feb361cd/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java -- diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java index 2360ce8..e511d14 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java @@ -78,6 +78,7 @@ public class TestRSGroupBasedLoadBalancer { static String[] groups = new String[] { RSGroupInfo.DEFAULT_GROUP, "dg2", "dg3", "dg4" }; + static TableName table0 = TableName.valueOf("dt0"); static TableName[] tables = new TableName[] { TableName.valueOf("dt1"), TableName.valueOf("dt2"), @@ -222,6 +223,16 @@ public class TestRSGroupBasedLoadBalancer { assertRetainedAssignment(inputForTest, servers, newAssignment); } + @Test + public void testGetMisplacedRegions() throws Exception { +// Test case where region is not considered
hbase git commit: HBASE-11409 - Add more flexibility for input directory structure to LoadIncrementalHFiles
Repository: hbase Updated Branches: refs/heads/branch-1.4 de7b28419 -> e13e9029a HBASE-11409 - Add more flexibility for input directory structure to LoadIncrementalHFiles Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e13e9029 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e13e9029 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e13e9029 Branch: refs/heads/branch-1.4 Commit: e13e9029a171cc4d26b8903d2d158a0ece65382b Parents: de7b284 Author: Rahul GidwaniAuthored: Fri Jan 12 13:35:26 2018 -0800 Committer: Andrew Purtell Committed: Fri Jan 12 15:31:59 2018 -0800 -- .../hbase/mapreduce/LoadIncrementalHFiles.java | 50 + .../mapreduce/TestLoadIncrementalHFiles.java| 74 2 files changed, 79 insertions(+), 45 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/e13e9029/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java index 853b59d..9d7d80b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java @@ -18,6 +18,7 @@ */ package org.apache.hadoop.hbase.mapreduce; +import com.google.common.annotations.VisibleForTesting; import static java.lang.String.format; import java.io.FileNotFoundException; @@ -96,6 +97,7 @@ import org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint; import org.apache.hadoop.hbase.security.token.FsDelegationToken; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSHDFSUtils; +import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; @@ -135,6 +137,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { private String bulkToken; private UserProvider userProvider; private int nrThreads; + private int depth = 2; private LoadIncrementalHFiles() {} @@ -143,6 +146,10 @@ public class LoadIncrementalHFiles extends Configured implements Tool { initialize(); } + public void setDepth(int depth) { +this.depth = depth; + } + private void initialize() throws Exception { if (hbAdmin == null) { // make a copy, just to be sure we're not overriding someone else's config @@ -161,9 +168,11 @@ public class LoadIncrementalHFiles extends Configured implements Tool { } private void usage() { -System.err.println("usage: " + NAME + " /path/to/hfileoutputformat-output tablename" + "\n -D" -+ CREATE_TABLE_CONF_KEY + "=no - can be used to avoid creation of table by this tool\n" -+ " Note: if you set this to 'no', then the target table must already exist in HBase\n" +System.err.println("usage: " + NAME + " /path/to/hfileoutputformat-output tablename -loadTable" ++ "\n -D" + CREATE_TABLE_CONF_KEY + "=no - can be used to avoid creation of table by " ++ "this tool\n Note: if you set this to 'no', then the target table must already exist " ++ "in HBase\n -loadTable implies your baseDirectory to store file has a depth of 3 ,you" ++ " must have an existing table" + "\n"); } @@ -287,22 +296,32 @@ public class LoadIncrementalHFiles extends Configured implements Tool { private void discoverLoadQueue(final Deque ret, final Path hfofDir, final boolean validateHFile) throws IOException { fs = hfofDir.getFileSystem(getConf()); -visitBulkHFiles(fs, hfofDir, new BulkHFileVisitor () { - @Override - public byte[] bulkFamily(final byte[] familyName) { +BulkHFileVisitor visitor = new BulkHFileVisitor () { + @Override public byte[] bulkFamily(final byte[] familyName) { return familyName; } - @Override - public void bulkHFile(final byte[] family, final FileStatus hfile) throws IOException { + + @Override public void bulkHFile(final byte[] family, final FileStatus hfile) + throws IOException { long length = hfile.getLen(); -if (length > getConf().getLong(HConstants.HREGION_MAX_FILESIZE, -HConstants.DEFAULT_MAX_FILE_SIZE)) { - LOG.warn("Trying to bulk load hfile " + hfile.getPath() + " with size: " + - length + " bytes can be problematic as it may lead to oversplitting."); +if (length > getConf() +
hbase git commit: Revert "HBASE-19752 RSGroupBasedLoadBalancer#getMisplacedRegions() should handle the case where rs group cannot be determined"
Repository: hbase Updated Branches: refs/heads/branch-1.4 8e62eb22e -> de7b28419 Revert "HBASE-19752 RSGroupBasedLoadBalancer#getMisplacedRegions() should handle the case where rs group cannot be determined" This reverts commit 8e62eb22e974790d47056a41aa54f6dd009756ea. Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/de7b2841 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/de7b2841 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/de7b2841 Branch: refs/heads/branch-1.4 Commit: de7b2841971492b3c0710c81d3811d123b8ee9fd Parents: 8e62eb2 Author: Andrew PurtellAuthored: Fri Jan 12 15:28:24 2018 -0800 Committer: Andrew Purtell Committed: Fri Jan 12 15:28:24 2018 -0800 -- .../hbase/rsgroup/RSGroupBasedLoadBalancer.java| 13 - .../balancer/TestRSGroupBasedLoadBalancer.java | 17 - 2 files changed, 4 insertions(+), 26 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/de7b2841/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java -- diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java index b627c42..049e723 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.rsgroup; -import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ArrayListMultimap; import com.google.common.collect.LinkedListMultimap; import com.google.common.collect.ListMultimap; @@ -312,7 +311,6 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer, LoadBalanc return finalList; } - @VisibleForTesting private Set getMisplacedRegions( Map regions) throws IOException { Set misplacedRegions = new HashSet(); @@ -321,15 +319,12 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer, LoadBalanc ServerName assignedServer = region.getValue(); RSGroupInfo info = infoManager.getRSGroup(infoManager.getRSGroupOfTable(regionInfo.getTable())); - RSGroupInfo otherInfo = infoManager.getRSGroupOfServer(assignedServer.getAddress()); - if (info == null && otherInfo == null) { -LOG.warn("Couldn't obtain rs group information for " + region + " on " + assignedServer); -continue; - } - if ((info == null || !info.containsServer(assignedServer.getAddress( { + if (assignedServer != null && + (info == null || !info.containsServer(assignedServer.getAddress( { LOG.debug("Found misplaced region: " + regionInfo.getRegionNameAsString() + " on server: " + assignedServer + -" found in group: " + otherInfo + +" found in group: " + +infoManager.getRSGroupOfServer(assignedServer.getAddress()) + " outside of group: " + (info == null ? "UNKNOWN" : info.getName())); misplacedRegions.add(regionInfo); } http://git-wip-us.apache.org/repos/asf/hbase/blob/de7b2841/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java -- diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java index 359a873..2360ce8 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java @@ -78,7 +78,6 @@ public class TestRSGroupBasedLoadBalancer { static String[] groups = new String[] { RSGroupInfo.DEFAULT_GROUP, "dg2", "dg3", "dg4" }; - static TableName table0 = TableName.valueOf("dt0"); static TableName[] tables = new TableName[] { TableName.valueOf("dt1"), TableName.valueOf("dt2"), @@ -223,20 +222,6 @@ public class TestRSGroupBasedLoadBalancer { assertRetainedAssignment(inputForTest, servers, newAssignment); } - @Test - public void testGetMisplacedRegions() throws Exception { -// Test case where region is not considered misplaced if RSGroupInfo cannot be determined -Map inputForTest = new HashMap<>(); -RegionInfo ri =
hbase git commit: HBASE-19787 Fix or disable tests broken in branch-2 so can cut beta-1
Repository: hbase Updated Branches: refs/heads/master c799b0e69 -> 4ddfecac5 HBASE-19787 Fix or disable tests broken in branch-2 so can cut beta-1 M dev-support/make_rc.sh Disable checkstyle building site. Its an issue being fixed over in HBASE-19780 M hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java The clusterid was being set into the process only after the regionserver registers with the Master. That can be too late for some test clients in particular. e.g. TestZKAsyncRegistry needs it as soon as it goes to run which could be before Master had called its run method which is regionserver run method which then calls back to the master to register itself... and only then do we set the clusterid. HBASE-19694 changed start order which made it so this test failed. Setting the clusterid right after we set it in zk makes the test pass. Another change was that backup masters were not going down on stop. Backup masters were sleeping for the default zk period which is 90 seconds. They were not being woken up to check for stop. On stop master now tells active master manager. M hbase-server/src/test/java/org/apache/hadoop/hbase/TestJMXConnectorServer.java Prevent creation of acl table. Messes up our being able to go down promptly. M hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRegionsOnMasterOptions.java M hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java M hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReadRequestMetrics.java Disabled for now because it wants to run with regions on the Master... currently broke! M hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKAsyncRegistry.java Add a bit of debugging. M hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDLSAsyncFSWAL.java Disabled. Fails 40% of the time. M hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDLSFSHLog.java Disabled. Fails 33% of the time. Disabled stochastic load balancer for favored nodes because it fails on occasion and we are not doing favored nodes in branch-2. Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4ddfecac Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4ddfecac Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4ddfecac Branch: refs/heads/master Commit: 4ddfecac563384de6f087fc7fc8bc62c0362877d Parents: c799b0e Author: Michael StackAuthored: Fri Jan 12 13:39:32 2018 -0800 Committer: Michael Stack Committed: Fri Jan 12 14:09:56 2018 -0800 -- dev-support/make_rc.sh | 7 +-- .../org/apache/hadoop/hbase/master/HMaster.java | 21 ++-- .../hbase/regionserver/HRegionServer.java | 2 +- .../hadoop/hbase/util/JVMClusterUtil.java | 1 - .../hadoop/hbase/HBaseTestingUtility.java | 2 +- .../hadoop/hbase/TestJMXConnectorServer.java| 10 +- .../hadoop/hbase/client/TestMultiParallel.java | 2 ++ .../hbase/client/TestZKAsyncRegistry.java | 12 --- .../hadoop/hbase/master/TestDLSAsyncFSWAL.java | 2 ++ .../hadoop/hbase/master/TestDLSFSHLog.java | 2 ++ .../hbase/master/TestTableStateManager.java | 2 +- .../TestFavoredStochasticLoadBalancer.java | 1 + .../balancer/TestRegionsOnMasterOptions.java| 5 - .../TestRegionServerReadRequestMetrics.java | 4 +++- 14 files changed, 55 insertions(+), 18 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/4ddfecac/dev-support/make_rc.sh -- diff --git a/dev-support/make_rc.sh b/dev-support/make_rc.sh index 8bfdde2..f067ee9 100755 --- a/dev-support/make_rc.sh +++ b/dev-support/make_rc.sh @@ -75,9 +75,12 @@ function build_src { # Build bin tgz function build_bin { - MAVEN_OPTS="${mvnopts}" ${mvn} clean install -DskipTests -Papache-release -Prelease \ + MAVEN_OPTS="${mvnopts}" ${mvn} clean install -DskipTests \ +-Papache-release -Prelease \ -Dmaven.repo.local=${output_dir}/repository - MAVEN_OPTS="${mvnopts}" ${mvn} install -DskipTests site assembly:single -Papache-release -Prelease \ + MAVEN_OPTS="${mvnopts}" ${mvn} install -DskipTests \ +-Dcheckstyle.skip=true site assembly:single \ +-Papache-release -Prelease \ -Dmaven.repo.local=${output_dir}/repository mv ./hbase-assembly/target/hbase-*.tar.gz "${output_dir}" } http://git-wip-us.apache.org/repos/asf/hbase/blob/4ddfecac/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
hbase git commit: HBASE-19787 Fix or disable tests broken in branch-2 so can cut beta-1
Repository: hbase Updated Branches: refs/heads/branch-2 d8271b036 -> 026f535a7 HBASE-19787 Fix or disable tests broken in branch-2 so can cut beta-1 M dev-support/make_rc.sh Disable checkstyle building site. Its an issue being fixed over in HBASE-19780 M hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java The clusterid was being set into the process only after the regionserver registers with the Master. That can be too late for some test clients in particular. e.g. TestZKAsyncRegistry needs it as soon as it goes to run which could be before Master had called its run method which is regionserver run method which then calls back to the master to register itself... and only then do we set the clusterid. HBASE-19694 changed start order which made it so this test failed. Setting the clusterid right after we set it in zk makes the test pass. Another change was that backup masters were not going down on stop. Backup masters were sleeping for the default zk period which is 90 seconds. They were not being woken up to check for stop. On stop master now tells active master manager. M hbase-server/src/test/java/org/apache/hadoop/hbase/TestJMXConnectorServer.java Prevent creation of acl table. Messes up our being able to go down promptly. M hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRegionsOnMasterOptions.java M hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java M hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReadRequestMetrics.java Disabled for now because it wants to run with regions on the Master... currently broke! M hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKAsyncRegistry.java Add a bit of debugging. M hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDLSAsyncFSWAL.java Disabled. Fails 40% of the time. M hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDLSFSHLog.java Disabled. Fails 33% of the time. Disabled stochastic load balancer for favored nodes because it fails on occasion and we are not doing favored nodes in branch-2. Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/026f535a Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/026f535a Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/026f535a Branch: refs/heads/branch-2 Commit: 026f535a7747b89003252ded9585e827686aa79f Parents: d8271b0 Author: Michael StackAuthored: Fri Jan 12 13:39:32 2018 -0800 Committer: Michael Stack Committed: Fri Jan 12 14:09:23 2018 -0800 -- dev-support/make_rc.sh | 7 +-- .../org/apache/hadoop/hbase/master/HMaster.java | 21 ++-- .../hbase/regionserver/HRegionServer.java | 2 +- .../hadoop/hbase/util/JVMClusterUtil.java | 1 - .../hadoop/hbase/HBaseTestingUtility.java | 2 +- .../hadoop/hbase/TestJMXConnectorServer.java| 10 +- .../hadoop/hbase/client/TestMultiParallel.java | 2 ++ .../hbase/client/TestZKAsyncRegistry.java | 12 --- .../hadoop/hbase/master/TestDLSAsyncFSWAL.java | 2 ++ .../hadoop/hbase/master/TestDLSFSHLog.java | 2 ++ .../hbase/master/TestTableStateManager.java | 2 +- .../TestFavoredStochasticLoadBalancer.java | 1 + .../balancer/TestRegionsOnMasterOptions.java| 5 - .../TestRegionServerReadRequestMetrics.java | 4 +++- 14 files changed, 55 insertions(+), 18 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/026f535a/dev-support/make_rc.sh -- diff --git a/dev-support/make_rc.sh b/dev-support/make_rc.sh index 8bfdde2..f067ee9 100755 --- a/dev-support/make_rc.sh +++ b/dev-support/make_rc.sh @@ -75,9 +75,12 @@ function build_src { # Build bin tgz function build_bin { - MAVEN_OPTS="${mvnopts}" ${mvn} clean install -DskipTests -Papache-release -Prelease \ + MAVEN_OPTS="${mvnopts}" ${mvn} clean install -DskipTests \ +-Papache-release -Prelease \ -Dmaven.repo.local=${output_dir}/repository - MAVEN_OPTS="${mvnopts}" ${mvn} install -DskipTests site assembly:single -Papache-release -Prelease \ + MAVEN_OPTS="${mvnopts}" ${mvn} install -DskipTests \ +-Dcheckstyle.skip=true site assembly:single \ +-Papache-release -Prelease \ -Dmaven.repo.local=${output_dir}/repository mv ./hbase-assembly/target/hbase-*.tar.gz "${output_dir}" } http://git-wip-us.apache.org/repos/asf/hbase/blob/026f535a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
hbase git commit: HBASE-11409 - Add more flexibility for input directory structure to LoadIncrementalHFiles
Repository: hbase Updated Branches: refs/heads/branch-1 6f29a39d7 -> 48025cc84 HBASE-11409 - Add more flexibility for input directory structure to LoadIncrementalHFiles Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/48025cc8 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/48025cc8 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/48025cc8 Branch: refs/heads/branch-1 Commit: 48025cc84b791c7d64e7c01c59a06a4e16897459 Parents: 6f29a39 Author: Rahul GidwaniAuthored: Fri Jan 12 13:35:26 2018 -0800 Committer: Rahul Gidwani Committed: Fri Jan 12 13:35:26 2018 -0800 -- .../hbase/mapreduce/LoadIncrementalHFiles.java | 50 + .../mapreduce/TestLoadIncrementalHFiles.java| 74 2 files changed, 79 insertions(+), 45 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/48025cc8/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java index 853b59d..9d7d80b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java @@ -18,6 +18,7 @@ */ package org.apache.hadoop.hbase.mapreduce; +import com.google.common.annotations.VisibleForTesting; import static java.lang.String.format; import java.io.FileNotFoundException; @@ -96,6 +97,7 @@ import org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint; import org.apache.hadoop.hbase.security.token.FsDelegationToken; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSHDFSUtils; +import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; @@ -135,6 +137,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { private String bulkToken; private UserProvider userProvider; private int nrThreads; + private int depth = 2; private LoadIncrementalHFiles() {} @@ -143,6 +146,10 @@ public class LoadIncrementalHFiles extends Configured implements Tool { initialize(); } + public void setDepth(int depth) { +this.depth = depth; + } + private void initialize() throws Exception { if (hbAdmin == null) { // make a copy, just to be sure we're not overriding someone else's config @@ -161,9 +168,11 @@ public class LoadIncrementalHFiles extends Configured implements Tool { } private void usage() { -System.err.println("usage: " + NAME + " /path/to/hfileoutputformat-output tablename" + "\n -D" -+ CREATE_TABLE_CONF_KEY + "=no - can be used to avoid creation of table by this tool\n" -+ " Note: if you set this to 'no', then the target table must already exist in HBase\n" +System.err.println("usage: " + NAME + " /path/to/hfileoutputformat-output tablename -loadTable" ++ "\n -D" + CREATE_TABLE_CONF_KEY + "=no - can be used to avoid creation of table by " ++ "this tool\n Note: if you set this to 'no', then the target table must already exist " ++ "in HBase\n -loadTable implies your baseDirectory to store file has a depth of 3 ,you" ++ " must have an existing table" + "\n"); } @@ -287,22 +296,32 @@ public class LoadIncrementalHFiles extends Configured implements Tool { private void discoverLoadQueue(final Deque ret, final Path hfofDir, final boolean validateHFile) throws IOException { fs = hfofDir.getFileSystem(getConf()); -visitBulkHFiles(fs, hfofDir, new BulkHFileVisitor () { - @Override - public byte[] bulkFamily(final byte[] familyName) { +BulkHFileVisitor visitor = new BulkHFileVisitor () { + @Override public byte[] bulkFamily(final byte[] familyName) { return familyName; } - @Override - public void bulkHFile(final byte[] family, final FileStatus hfile) throws IOException { + + @Override public void bulkHFile(final byte[] family, final FileStatus hfile) + throws IOException { long length = hfile.getLen(); -if (length > getConf().getLong(HConstants.HREGION_MAX_FILESIZE, -HConstants.DEFAULT_MAX_FILE_SIZE)) { - LOG.warn("Trying to bulk load hfile " + hfile.getPath() + " with size: " + - length + " bytes can be problematic as it may lead to oversplitting."); +if (length > getConf() +
hbase git commit: HBASE-19694 The initialization order for a fresh cluster is incorrect Addendum. Adds in a missing line that helps debugging.
Repository: hbase Updated Branches: refs/heads/master f0ba7922f -> c799b0e69 HBASE-19694 The initialization order for a fresh cluster is incorrect Addendum. Adds in a missing line that helps debugging. Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c799b0e6 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c799b0e6 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c799b0e6 Branch: refs/heads/master Commit: c799b0e69bed5f0e894a703bcfbd208f44c980e7 Parents: f0ba792 Author: Michael StackAuthored: Thu Jan 11 17:47:00 2018 -0800 Committer: Michael Stack Committed: Fri Jan 12 13:09:05 2018 -0800 -- .../java/org/apache/hadoop/hbase/util/JVMClusterUtil.java | 7 --- 1 file changed, 4 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/c799b0e6/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java index 00410af..1ac790f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java @@ -187,7 +187,9 @@ public class JVMClusterUtil { int startTimeout = configuration != null ? Integer.parseInt( configuration.get("hbase.master.start.timeout.localHBaseCluster", "3")) : 3; if (System.currentTimeMillis() > startTime + startTimeout) { -throw new RuntimeException(String.format("Master not active after %s seconds", startTimeout)); +String msg = "Master not active after " + startTimeout + "ms"; +Threads.printThreadInfo(System.out, "Thread dump because: " + msg); +throw new RuntimeException(msg); } } @@ -216,8 +218,7 @@ public class JVMClusterUtil { } if (System.currentTimeMillis() > startTime + maxwait) { String msg = "Master not initialized after " + maxwait + "ms seconds"; -Threads.printThreadInfo(System.out, - "Thread dump because: " + msg); +Threads.printThreadInfo(System.out, "Thread dump because: " + msg); throw new RuntimeException(msg); } try {
hbase git commit: HBASE-19694 The initialization order for a fresh cluster is incorrect Addendum. Adds in a missing line that helps debugging.
Repository: hbase Updated Branches: refs/heads/branch-2 9ed52ee3e -> d8271b036 HBASE-19694 The initialization order for a fresh cluster is incorrect Addendum. Adds in a missing line that helps debugging. Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d8271b03 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d8271b03 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d8271b03 Branch: refs/heads/branch-2 Commit: d8271b03615c4fff23356c96e3c983a0cfffaf55 Parents: 9ed52ee Author: Michael StackAuthored: Thu Jan 11 17:47:00 2018 -0800 Committer: Michael Stack Committed: Fri Jan 12 13:08:27 2018 -0800 -- .../java/org/apache/hadoop/hbase/util/JVMClusterUtil.java | 7 --- 1 file changed, 4 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/d8271b03/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java index 00410af..1ac790f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java @@ -187,7 +187,9 @@ public class JVMClusterUtil { int startTimeout = configuration != null ? Integer.parseInt( configuration.get("hbase.master.start.timeout.localHBaseCluster", "3")) : 3; if (System.currentTimeMillis() > startTime + startTimeout) { -throw new RuntimeException(String.format("Master not active after %s seconds", startTimeout)); +String msg = "Master not active after " + startTimeout + "ms"; +Threads.printThreadInfo(System.out, "Thread dump because: " + msg); +throw new RuntimeException(msg); } } @@ -216,8 +218,7 @@ public class JVMClusterUtil { } if (System.currentTimeMillis() > startTime + maxwait) { String msg = "Master not initialized after " + maxwait + "ms seconds"; -Threads.printThreadInfo(System.out, - "Thread dump because: " + msg); +Threads.printThreadInfo(System.out, "Thread dump because: " + msg); throw new RuntimeException(msg); } try {
hbase git commit: HBASE-19752 RSGroupBasedLoadBalancer#getMisplacedRegions() should handle the case where rs group cannot be determined
Repository: hbase Updated Branches: refs/heads/branch-1.4 b52fc8ec8 -> 8e62eb22e HBASE-19752 RSGroupBasedLoadBalancer#getMisplacedRegions() should handle the case where rs group cannot be determined Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8e62eb22 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8e62eb22 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8e62eb22 Branch: refs/heads/branch-1.4 Commit: 8e62eb22e974790d47056a41aa54f6dd009756ea Parents: b52fc8e Author: tedyuAuthored: Fri Jan 12 12:59:28 2018 -0800 Committer: tedyu Committed: Fri Jan 12 12:59:28 2018 -0800 -- .../hbase/rsgroup/RSGroupBasedLoadBalancer.java| 13 + .../balancer/TestRSGroupBasedLoadBalancer.java | 17 + 2 files changed, 26 insertions(+), 4 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/8e62eb22/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java -- diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java index 049e723..b627c42 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.rsgroup; +import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ArrayListMultimap; import com.google.common.collect.LinkedListMultimap; import com.google.common.collect.ListMultimap; @@ -311,6 +312,7 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer, LoadBalanc return finalList; } + @VisibleForTesting private Set getMisplacedRegions( Map regions) throws IOException { Set misplacedRegions = new HashSet(); @@ -319,12 +321,15 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer, LoadBalanc ServerName assignedServer = region.getValue(); RSGroupInfo info = infoManager.getRSGroup(infoManager.getRSGroupOfTable(regionInfo.getTable())); - if (assignedServer != null && - (info == null || !info.containsServer(assignedServer.getAddress( { + RSGroupInfo otherInfo = infoManager.getRSGroupOfServer(assignedServer.getAddress()); + if (info == null && otherInfo == null) { +LOG.warn("Couldn't obtain rs group information for " + region + " on " + assignedServer); +continue; + } + if ((info == null || !info.containsServer(assignedServer.getAddress( { LOG.debug("Found misplaced region: " + regionInfo.getRegionNameAsString() + " on server: " + assignedServer + -" found in group: " + -infoManager.getRSGroupOfServer(assignedServer.getAddress()) + +" found in group: " + otherInfo + " outside of group: " + (info == null ? "UNKNOWN" : info.getName())); misplacedRegions.add(regionInfo); } http://git-wip-us.apache.org/repos/asf/hbase/blob/8e62eb22/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java -- diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java index 2360ce8..359a873 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java @@ -78,6 +78,7 @@ public class TestRSGroupBasedLoadBalancer { static String[] groups = new String[] { RSGroupInfo.DEFAULT_GROUP, "dg2", "dg3", "dg4" }; + static TableName table0 = TableName.valueOf("dt0"); static TableName[] tables = new TableName[] { TableName.valueOf("dt1"), TableName.valueOf("dt2"), @@ -222,6 +223,20 @@ public class TestRSGroupBasedLoadBalancer { assertRetainedAssignment(inputForTest, servers, newAssignment); } + @Test + public void testGetMisplacedRegions() throws Exception { +// Test case where region is not considered misplaced if RSGroupInfo cannot be determined +Map inputForTest = new HashMap<>(); +RegionInfo ri = RegionInfoBuilder.newBuilder(table0) +.setStartKey(new byte[16]) +.setEndKey(new byte[16]) +
hbase git commit: HBASE-19752 RSGroupBasedLoadBalancer#getMisplacedRegions() should handle the case where rs group cannot be determined
Repository: hbase Updated Branches: refs/heads/branch-1 b29a138ec -> 6f29a39d7 HBASE-19752 RSGroupBasedLoadBalancer#getMisplacedRegions() should handle the case where rs group cannot be determined Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6f29a39d Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6f29a39d Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6f29a39d Branch: refs/heads/branch-1 Commit: 6f29a39d764caf1b95c29bf93f2f6702b2e24250 Parents: b29a138 Author: tedyuAuthored: Fri Jan 12 12:57:55 2018 -0800 Committer: tedyu Committed: Fri Jan 12 12:57:55 2018 -0800 -- .../hbase/rsgroup/RSGroupBasedLoadBalancer.java | 19 ++- .../balancer/TestRSGroupBasedLoadBalancer.java | 13 + 2 files changed, 27 insertions(+), 5 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/6f29a39d/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java -- diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java index 049e723..5c08fb7 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.rsgroup; +import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ArrayListMultimap; import com.google.common.collect.LinkedListMultimap; import com.google.common.collect.ListMultimap; @@ -311,7 +312,8 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer, LoadBalanc return finalList; } - private Set getMisplacedRegions( + @VisibleForTesting + public Set getMisplacedRegions( Map regions) throws IOException { Set misplacedRegions = new HashSet(); for(Map.Entry region : regions.entrySet()) { @@ -319,12 +321,19 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer, LoadBalanc ServerName assignedServer = region.getValue(); RSGroupInfo info = infoManager.getRSGroup(infoManager.getRSGroupOfTable(regionInfo.getTable())); - if (assignedServer != null && - (info == null || !info.containsServer(assignedServer.getAddress( { + if (assignedServer == null) { +LOG.debug("There is no assigned server for " + region); +continue; + } + RSGroupInfo otherInfo = infoManager.getRSGroupOfServer(assignedServer.getAddress()); + if (info == null && otherInfo == null) { +LOG.warn("Couldn't obtain rs group information for " + region + " on " + assignedServer); +continue; + } + if ((info == null || !info.containsServer(assignedServer.getAddress( { LOG.debug("Found misplaced region: " + regionInfo.getRegionNameAsString() + " on server: " + assignedServer + -" found in group: " + -infoManager.getRSGroupOfServer(assignedServer.getAddress()) + +" found in group: " + otherInfo + " outside of group: " + (info == null ? "UNKNOWN" : info.getName())); misplacedRegions.add(regionInfo); } http://git-wip-us.apache.org/repos/asf/hbase/blob/6f29a39d/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java -- diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java index 2360ce8..e511d14 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java @@ -78,6 +78,7 @@ public class TestRSGroupBasedLoadBalancer { static String[] groups = new String[] { RSGroupInfo.DEFAULT_GROUP, "dg2", "dg3", "dg4" }; + static TableName table0 = TableName.valueOf("dt0"); static TableName[] tables = new TableName[] { TableName.valueOf("dt1"), TableName.valueOf("dt2"), @@ -222,6 +223,16 @@ public class TestRSGroupBasedLoadBalancer { assertRetainedAssignment(inputForTest, servers, newAssignment); } + @Test + public void testGetMisplacedRegions() throws Exception { +// Test case where region is not considered misplaced
hbase git commit: HBASE-19752 RSGroupBasedLoadBalancer#getMisplacedRegions() should handle the case where rs group cannot be determined
Repository: hbase Updated Branches: refs/heads/branch-2 b66208073 -> 9ed52ee3e HBASE-19752 RSGroupBasedLoadBalancer#getMisplacedRegions() should handle the case where rs group cannot be determined Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9ed52ee3 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9ed52ee3 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9ed52ee3 Branch: refs/heads/branch-2 Commit: 9ed52ee3e5dd6cc7a3c02953e0afab11de8dd680 Parents: b662080 Author: tedyuAuthored: Fri Jan 12 12:16:06 2018 -0800 Committer: tedyu Committed: Fri Jan 12 12:16:06 2018 -0800 -- .../hbase/rsgroup/RSGroupBasedLoadBalancer.java| 17 - .../balancer/TestRSGroupBasedLoadBalancer.java | 17 + 2 files changed, 29 insertions(+), 5 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/9ed52ee3/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java -- diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java index 1c70925..619de9e 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java @@ -300,7 +300,8 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer { return finalList; } - private Set getMisplacedRegions( + @VisibleForTesting + public Set getMisplacedRegions( Map regions) throws IOException { Set misplacedRegions = new HashSet<>(); for(Map.Entry region : regions.entrySet()) { @@ -308,10 +309,16 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer { ServerName assignedServer = region.getValue(); RSGroupInfo info = rsGroupInfoManager.getRSGroup(rsGroupInfoManager. getRSGroupOfTable(regionInfo.getTable())); - if (assignedServer != null && - (info == null || !info.containsServer(assignedServer.getAddress( { -RSGroupInfo otherInfo = null; -otherInfo = rsGroupInfoManager.getRSGroupOfServer(assignedServer.getAddress()); + if (assignedServer == null) { +LOG.debug("There is no assigned server for {}", region); +continue; + } + RSGroupInfo otherInfo = rsGroupInfoManager.getRSGroupOfServer(assignedServer.getAddress()); + if (info == null && otherInfo == null) { +LOG.warn("Couldn't obtain rs group information for {} on {}", region, assignedServer); +continue; + } + if ((info == null || !info.containsServer(assignedServer.getAddress( { LOG.debug("Found misplaced region: " + regionInfo.getRegionNameAsString() + " on server: " + assignedServer + " found in group: " + otherInfo + http://git-wip-us.apache.org/repos/asf/hbase/blob/9ed52ee3/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java -- diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java index e234438..68845d7 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java @@ -76,6 +76,7 @@ public class TestRSGroupBasedLoadBalancer { static String[] groups = new String[] { RSGroupInfo.DEFAULT_GROUP, "dg2", "dg3", "dg4" }; + static TableName table0 = TableName.valueOf("dt0"); static TableName[] tables = new TableName[] { TableName.valueOf("dt1"), TableName.valueOf("dt2"), @@ -225,6 +226,20 @@ public class TestRSGroupBasedLoadBalancer { assertClusterAsBalanced(loadMap); } + @Test + public void testGetMisplacedRegions() throws Exception { +// Test case where region is not considered misplaced if RSGroupInfo cannot be determined +Map inputForTest = new HashMap<>(); +RegionInfo ri = RegionInfoBuilder.newBuilder(table0) +.setStartKey(new byte[16]) +.setEndKey(new byte[16]) +.setSplit(false) +.setRegionId(regionId++) +.build(); +inputForTest.put(ri, servers.iterator().next()); +Set misplacedRegions =
hbase git commit: HBASE-19752 RSGroupBasedLoadBalancer#getMisplacedRegions() should handle the case where rs group cannot be determined
Repository: hbase Updated Branches: refs/heads/master a5601c8ea -> f0ba7922f HBASE-19752 RSGroupBasedLoadBalancer#getMisplacedRegions() should handle the case where rs group cannot be determined Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f0ba7922 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f0ba7922 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f0ba7922 Branch: refs/heads/master Commit: f0ba7922f3963276304aee1d354413bcf966e563 Parents: a5601c8 Author: tedyuAuthored: Fri Jan 12 12:10:22 2018 -0800 Committer: tedyu Committed: Fri Jan 12 12:10:22 2018 -0800 -- .../hbase/rsgroup/RSGroupBasedLoadBalancer.java| 17 - .../balancer/TestRSGroupBasedLoadBalancer.java | 17 + 2 files changed, 29 insertions(+), 5 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/f0ba7922/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java -- diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java index 65053b8..550f734 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java @@ -301,7 +301,8 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer { return finalList; } - private Set getMisplacedRegions( + @VisibleForTesting + public Set getMisplacedRegions( Map regions) throws IOException { Set misplacedRegions = new HashSet<>(); for(Map.Entry region : regions.entrySet()) { @@ -309,10 +310,16 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer { ServerName assignedServer = region.getValue(); RSGroupInfo info = rsGroupInfoManager.getRSGroup(rsGroupInfoManager. getRSGroupOfTable(regionInfo.getTable())); - if (assignedServer != null && - (info == null || !info.containsServer(assignedServer.getAddress( { -RSGroupInfo otherInfo = null; -otherInfo = rsGroupInfoManager.getRSGroupOfServer(assignedServer.getAddress()); + if (assignedServer == null) { +LOG.debug("There is no assigned server for {}", region); +continue; + } + RSGroupInfo otherInfo = rsGroupInfoManager.getRSGroupOfServer(assignedServer.getAddress()); + if (info == null && otherInfo == null) { +LOG.warn("Couldn't obtain rs group information for {} on {}", region, assignedServer); +continue; + } + if ((info == null || !info.containsServer(assignedServer.getAddress( { LOG.debug("Found misplaced region: " + regionInfo.getRegionNameAsString() + " on server: " + assignedServer + " found in group: " + otherInfo + http://git-wip-us.apache.org/repos/asf/hbase/blob/f0ba7922/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java -- diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java index e783a57..f36199e 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java @@ -75,6 +75,7 @@ public class TestRSGroupBasedLoadBalancer { private static SecureRandom rand; static String[] groups = new String[] { RSGroupInfo.DEFAULT_GROUP, "dg2", "dg3", "dg4" }; + static TableName table0 = TableName.valueOf("dt0"); static TableName[] tables = new TableName[] { TableName.valueOf("dt1"), TableName.valueOf("dt2"), @@ -214,6 +215,20 @@ public class TestRSGroupBasedLoadBalancer { assertClusterAsBalanced(loadMap); } + @Test + public void testGetMisplacedRegions() throws Exception { +// Test case where region is not considered misplaced if RSGroupInfo cannot be determined +Map inputForTest = new HashMap<>(); +RegionInfo ri = RegionInfoBuilder.newBuilder(table0) +.setStartKey(new byte[16]) +.setEndKey(new byte[16]) +.setSplit(false) +.setRegionId(regionId++) +.build(); +inputForTest.put(ri, servers.iterator().next()); +Set misplacedRegions
hbase git commit: HBASE-19568: Restore of HBase table using incremental backup doesn't restore rows from an earlier incremental backup
Repository: hbase Updated Branches: refs/heads/master 057e80c16 -> a5601c8ea HBASE-19568: Restore of HBase table using incremental backup doesn't restore rows from an earlier incremental backup Signed-off-by: Josh ElserProject: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a5601c8e Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a5601c8e Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a5601c8e Branch: refs/heads/master Commit: a5601c8eac6bfcac7d869574547f505d44e49065 Parents: 057e80c Author: Vladimir Rodionov Authored: Wed Jan 10 16:26:09 2018 -0800 Committer: Josh Elser Committed: Fri Jan 12 13:13:17 2018 -0500 -- .../hbase/backup/impl/BackupAdminImpl.java | 2 +- .../hadoop/hbase/backup/impl/BackupManager.java | 19 +-- .../hbase/backup/impl/BackupSystemTable.java| 135 +-- .../impl/IncrementalTableBackupClient.java | 59 ++-- .../hbase/backup/impl/RestoreTablesClient.java | 55 .../hbase/backup/impl/TableBackupClient.java| 32 ++--- .../hadoop/hbase/backup/TestBackupBase.java | 4 - .../TestIncrementalBackupWithBulkLoad.java | 24 +++- 8 files changed, 213 insertions(+), 117 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/a5601c8e/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java -- diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java index 8ba57d2..f27490c 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java @@ -271,7 +271,7 @@ public class BackupAdminImpl implements BackupAdmin { LOG.debug(numDeleted + " bulk loaded files out of " + map.size() + " were deleted"); } if (success) { -sysTable.deleteBulkLoadedFiles(map); +sysTable.deleteBulkLoadedRows(new ArrayList (map.keySet())); } sysTable.deleteBackupInfo(backupInfo.getBackupId()); http://git-wip-us.apache.org/repos/asf/hbase/blob/a5601c8e/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java -- diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java index 7199fd5..4ca998c 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.BackupInfo; import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; +import org.apache.hadoop.hbase.backup.BackupObserver; import org.apache.hadoop.hbase.backup.BackupRestoreConstants; import org.apache.hadoop.hbase.backup.BackupType; import org.apache.hadoop.hbase.backup.HBackupFileSystem; @@ -43,6 +44,7 @@ import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager; import org.apache.hadoop.hbase.backup.regionserver.LogRollRegionServerProcedureManager; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.procedure.ProcedureManagerHost; import org.apache.hadoop.hbase.util.Pair; import org.apache.yetus.audience.InterfaceAudience; @@ -140,10 +142,14 @@ public class BackupManager implements Closeable { conf.set(ProcedureManagerHost.REGIONSERVER_PROCEDURE_CONF_KEY, classes + "," + regionProcedureClass); } +String coproc = conf.get(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY); +String regionObserverClass = BackupObserver.class.getName(); +conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, (coproc == null ? "" : coproc + ",") + +regionObserverClass); if (LOG.isDebugEnabled()) { - LOG.debug("Added region procedure manager: " + regionProcedureClass); + LOG.debug("Added region procedure manager: " + regionProcedureClass + +". Added region observer: " + regionObserverClass); } - } public static boolean isBackupEnabled(Configuration conf) { @@ -415,13 +421,8 @@ public class BackupManager implements Closeable { return systemTable.readBulkloadRows(tableList); } - public void
[1/4] hbase git commit: HBASE-19775 hbase shell doesn't handle the exceptions that are wrapped in java.io.UncheckedIOException
Repository: hbase Updated Branches: refs/heads/branch-2 f7fbc168a -> b66208073 refs/heads/master 8bbfcdda4 -> 057e80c16 HBASE-19775 hbase shell doesn't handle the exceptions that are wrapped in java.io.UncheckedIOException Signed-off-by: Josh ElserSigned-off-by: Michael Stack Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b979487c Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b979487c Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b979487c Branch: refs/heads/branch-2 Commit: b979487cace818e59945d65bb37db399e777ab72 Parents: f7fbc16 Author: Sergey Soldatov Authored: Thu Jan 11 16:02:40 2018 -0800 Committer: Josh Elser Committed: Fri Jan 12 12:38:13 2018 -0500 -- hbase-shell/src/main/ruby/shell/commands.rb | 1 + 1 file changed, 1 insertion(+) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/b979487c/hbase-shell/src/main/ruby/shell/commands.rb -- diff --git a/hbase-shell/src/main/ruby/shell/commands.rb b/hbase-shell/src/main/ruby/shell/commands.rb index 1b8de9e..990e978 100644 --- a/hbase-shell/src/main/ruby/shell/commands.rb +++ b/hbase-shell/src/main/ruby/shell/commands.rb @@ -108,6 +108,7 @@ module Shell yield rescue => cause # let individual command handle exceptions first +cause = cause.getCause if cause.is_a? java.io.UncheckedIOException handle_exceptions(cause, *args) if respond_to?(:handle_exceptions) # Global HBase exception handling below if not handled by respective command above if cause.is_a?(org.apache.hadoop.hbase.TableNotFoundException)
[2/4] hbase git commit: HBASE-19769 Remove ZK metrics because of classloader issues
HBASE-19769 Remove ZK metrics because of classloader issues When we run MapReduce jobs via `yarn jar`, the special classloader which is set up by YARN creates a situation where our invocation of package-private Hadoop classes throws an IllegalAccessError. It's easiest to just remove these and rethink how to avoid further Hadoop metrics2 issues. Signed-off-by: Michael StackProject: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b6620807 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b6620807 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b6620807 Branch: refs/heads/branch-2 Commit: b662080734abfa3a11486147d365eb682e0b4ee7 Parents: b979487 Author: Josh Elser Authored: Thu Jan 11 18:14:32 2018 -0500 Committer: Josh Elser Committed: Fri Jan 12 12:40:47 2018 -0500 -- .../hbase/zookeeper/RecoverableZooKeeper.java | 78 -- .../hadoop/hbase/zookeeper/ZKMetrics.java | 108 --- .../hbase/zookeeper/ZKMetricsListener.java | 91 .../hadoop/hbase/zookeeper/TestZKMetrics.java | 80 -- 4 files changed, 357 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/b6620807/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java -- diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java index 4c76a5c..5df31b9 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java @@ -82,7 +82,6 @@ public class RecoverableZooKeeper { private Watcher watcher; private int sessionTimeout; private String quorumServers; - private final ZKMetricsListener metrics; public RecoverableZooKeeper(String quorumServers, int sessionTimeout, Watcher watcher, int maxRetries, int retryIntervalMillis, int maxSleepTime) @@ -112,7 +111,6 @@ public class RecoverableZooKeeper { this.watcher = watcher; this.sessionTimeout = sessionTimeout; this.quorumServers = quorumServers; -this.metrics = new ZKMetrics(); try { checkZk(); @@ -166,11 +164,8 @@ public class RecoverableZooKeeper { try { long startTime = EnvironmentEdgeManager.currentTime(); checkZk().delete(path, version); - this.metrics.registerWriteOperationLatency( - Math.min(EnvironmentEdgeManager.currentTime() - startTime, 1)); return; } catch (KeeperException e) { - this.metrics.registerFailedZKCall(); switch (e.code()) { case NONODE: if (isRetry) { @@ -182,11 +177,9 @@ public class RecoverableZooKeeper { throw e; case CONNECTIONLOSS: - this.metrics.registerConnectionLossException(); retryOrThrow(retryCounter, e, "delete"); break; case OPERATIONTIMEOUT: - this.metrics.registerOperationTimeoutException(); retryOrThrow(retryCounter, e, "delete"); break; @@ -211,18 +204,13 @@ public class RecoverableZooKeeper { try { long startTime = EnvironmentEdgeManager.currentTime(); Stat nodeStat = checkZk().exists(path, watcher); - this.metrics.registerReadOperationLatency( - Math.min(EnvironmentEdgeManager.currentTime() - startTime, 1)); return nodeStat; } catch (KeeperException e) { - this.metrics.registerFailedZKCall(); switch (e.code()) { case CONNECTIONLOSS: - this.metrics.registerConnectionLossException(); retryOrThrow(retryCounter, e, "exists"); break; case OPERATIONTIMEOUT: - this.metrics.registerOperationTimeoutException(); retryOrThrow(retryCounter, e, "exists"); break; @@ -246,18 +234,13 @@ public class RecoverableZooKeeper { try { long startTime = EnvironmentEdgeManager.currentTime(); Stat nodeStat = checkZk().exists(path, watch); - this.metrics.registerReadOperationLatency( - Math.min(EnvironmentEdgeManager.currentTime() - startTime, 1)); return nodeStat; } catch (KeeperException e) { - this.metrics.registerFailedZKCall(); switch (e.code()) { case CONNECTIONLOSS: - this.metrics.registerConnectionLossException();
[4/4] hbase git commit: HBASE-19769 Remove ZK metrics because of classloader issues
HBASE-19769 Remove ZK metrics because of classloader issues When we run MapReduce jobs via `yarn jar`, the special classloader which is set up by YARN creates a situation where our invocation of package-private Hadoop classes throws an IllegalAccessError. It's easiest to just remove these and rethink how to avoid further Hadoop metrics2 issues. Signed-off-by: Michael StackProject: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/057e80c1 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/057e80c1 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/057e80c1 Branch: refs/heads/master Commit: 057e80c1639412b25c2b3462851729a33cac39c7 Parents: 6bacb64 Author: Josh Elser Authored: Thu Jan 11 18:14:32 2018 -0500 Committer: Josh Elser Committed: Fri Jan 12 12:50:18 2018 -0500 -- .../hbase/zookeeper/RecoverableZooKeeper.java | 78 -- .../hadoop/hbase/zookeeper/ZKMetrics.java | 108 --- .../hbase/zookeeper/ZKMetricsListener.java | 91 .../hadoop/hbase/zookeeper/TestZKMetrics.java | 80 -- 4 files changed, 357 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/057e80c1/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java -- diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java index 4c76a5c..5df31b9 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java @@ -82,7 +82,6 @@ public class RecoverableZooKeeper { private Watcher watcher; private int sessionTimeout; private String quorumServers; - private final ZKMetricsListener metrics; public RecoverableZooKeeper(String quorumServers, int sessionTimeout, Watcher watcher, int maxRetries, int retryIntervalMillis, int maxSleepTime) @@ -112,7 +111,6 @@ public class RecoverableZooKeeper { this.watcher = watcher; this.sessionTimeout = sessionTimeout; this.quorumServers = quorumServers; -this.metrics = new ZKMetrics(); try { checkZk(); @@ -166,11 +164,8 @@ public class RecoverableZooKeeper { try { long startTime = EnvironmentEdgeManager.currentTime(); checkZk().delete(path, version); - this.metrics.registerWriteOperationLatency( - Math.min(EnvironmentEdgeManager.currentTime() - startTime, 1)); return; } catch (KeeperException e) { - this.metrics.registerFailedZKCall(); switch (e.code()) { case NONODE: if (isRetry) { @@ -182,11 +177,9 @@ public class RecoverableZooKeeper { throw e; case CONNECTIONLOSS: - this.metrics.registerConnectionLossException(); retryOrThrow(retryCounter, e, "delete"); break; case OPERATIONTIMEOUT: - this.metrics.registerOperationTimeoutException(); retryOrThrow(retryCounter, e, "delete"); break; @@ -211,18 +204,13 @@ public class RecoverableZooKeeper { try { long startTime = EnvironmentEdgeManager.currentTime(); Stat nodeStat = checkZk().exists(path, watcher); - this.metrics.registerReadOperationLatency( - Math.min(EnvironmentEdgeManager.currentTime() - startTime, 1)); return nodeStat; } catch (KeeperException e) { - this.metrics.registerFailedZKCall(); switch (e.code()) { case CONNECTIONLOSS: - this.metrics.registerConnectionLossException(); retryOrThrow(retryCounter, e, "exists"); break; case OPERATIONTIMEOUT: - this.metrics.registerOperationTimeoutException(); retryOrThrow(retryCounter, e, "exists"); break; @@ -246,18 +234,13 @@ public class RecoverableZooKeeper { try { long startTime = EnvironmentEdgeManager.currentTime(); Stat nodeStat = checkZk().exists(path, watch); - this.metrics.registerReadOperationLatency( - Math.min(EnvironmentEdgeManager.currentTime() - startTime, 1)); return nodeStat; } catch (KeeperException e) { - this.metrics.registerFailedZKCall(); switch (e.code()) { case CONNECTIONLOSS: - this.metrics.registerConnectionLossException();
[3/4] hbase git commit: HBASE-19775 hbase shell doesn't handle the exceptions that are wrapped in java.io.UncheckedIOException
HBASE-19775 hbase shell doesn't handle the exceptions that are wrapped in java.io.UncheckedIOException Signed-off-by: Josh ElserSigned-off-by: Michael Stack Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6bacb643 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6bacb643 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6bacb643 Branch: refs/heads/master Commit: 6bacb643bc66b994c386b7fd175664802047eef7 Parents: 8bbfcdd Author: Sergey Soldatov Authored: Thu Jan 11 16:02:40 2018 -0800 Committer: Josh Elser Committed: Fri Jan 12 12:50:15 2018 -0500 -- hbase-shell/src/main/ruby/shell/commands.rb | 1 + 1 file changed, 1 insertion(+) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/6bacb643/hbase-shell/src/main/ruby/shell/commands.rb -- diff --git a/hbase-shell/src/main/ruby/shell/commands.rb b/hbase-shell/src/main/ruby/shell/commands.rb index 1b8de9e..990e978 100644 --- a/hbase-shell/src/main/ruby/shell/commands.rb +++ b/hbase-shell/src/main/ruby/shell/commands.rb @@ -108,6 +108,7 @@ module Shell yield rescue => cause # let individual command handle exceptions first +cause = cause.getCause if cause.is_a? java.io.UncheckedIOException handle_exceptions(cause, *args) if respond_to?(:handle_exceptions) # Global HBase exception handling below if not handled by respective command above if cause.is_a?(org.apache.hadoop.hbase.TableNotFoundException)
hbase git commit: HBASE-19771 restore_snapshot shell command gives wrong namespace if the namespace doesn't exist
Repository: hbase Updated Branches: refs/heads/branch-2 72702eeb7 -> f7fbc168a HBASE-19771 restore_snapshot shell command gives wrong namespace if the namespace doesn't exist Signed-off-by: tedyuProject: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f7fbc168 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f7fbc168 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f7fbc168 Branch: refs/heads/branch-2 Commit: f7fbc168a1538ab4157db8ae8fa9f7827774 Parents: 72702ee Author: Janos Gub Authored: Fri Jan 12 10:16:13 2018 +0100 Committer: tedyu Committed: Fri Jan 12 07:37:40 2018 -0800 -- hbase-shell/src/main/ruby/shell/commands.rb | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/f7fbc168/hbase-shell/src/main/ruby/shell/commands.rb -- diff --git a/hbase-shell/src/main/ruby/shell/commands.rb b/hbase-shell/src/main/ruby/shell/commands.rb index f17a7f6..1b8de9e 100644 --- a/hbase-shell/src/main/ruby/shell/commands.rb +++ b/hbase-shell/src/main/ruby/shell/commands.rb @@ -117,7 +117,8 @@ module Shell raise "Unknown region #{args.first}!" end if cause.is_a?(org.apache.hadoop.hbase.NamespaceNotFoundException) - raise "Unknown namespace #{args.first.split(':')[0]}!" + s = /.*NamespaceNotFoundException: (?[^\n]+).*/.match(cause.message) + raise "Unknown namespace #{s['namespace']}!" end if cause.is_a?(org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException) raise "Unknown snapshot #{args.first}!"
hbase git commit: HBASE-19771 restore_snapshot shell command gives wrong namespace if the namespace doesn't exist
Repository: hbase Updated Branches: refs/heads/master 20ccaef84 -> 8bbfcdda4 HBASE-19771 restore_snapshot shell command gives wrong namespace if the namespace doesn't exist Signed-off-by: tedyuProject: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8bbfcdda Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8bbfcdda Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8bbfcdda Branch: refs/heads/master Commit: 8bbfcdda46aeec23ebcdc1713cf636081a1c96d0 Parents: 20ccaef Author: Janos Gub Authored: Fri Jan 12 10:16:13 2018 +0100 Committer: tedyu Committed: Fri Jan 12 07:37:10 2018 -0800 -- hbase-shell/src/main/ruby/shell/commands.rb | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/8bbfcdda/hbase-shell/src/main/ruby/shell/commands.rb -- diff --git a/hbase-shell/src/main/ruby/shell/commands.rb b/hbase-shell/src/main/ruby/shell/commands.rb index f17a7f6..1b8de9e 100644 --- a/hbase-shell/src/main/ruby/shell/commands.rb +++ b/hbase-shell/src/main/ruby/shell/commands.rb @@ -117,7 +117,8 @@ module Shell raise "Unknown region #{args.first}!" end if cause.is_a?(org.apache.hadoop.hbase.NamespaceNotFoundException) - raise "Unknown namespace #{args.first.split(':')[0]}!" + s = /.*NamespaceNotFoundException: (?[^\n]+).*/.match(cause.message) + raise "Unknown namespace #{s['namespace']}!" end if cause.is_a?(org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException) raise "Unknown snapshot #{args.first}!"
[16/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/org/apache/hadoop/hbase/client/TestHCM.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/client/TestHCM.html b/testdevapidocs/org/apache/hadoop/hbase/client/TestHCM.html deleted file mode 100644 index 69a07a3..000 --- a/testdevapidocs/org/apache/hadoop/hbase/client/TestHCM.html +++ /dev/null @@ -1,1124 +0,0 @@ -http://www.w3.org/TR/html4/loose.dtd;> - - - - - -TestHCM (Apache HBase 3.0.0-SNAPSHOT Test API) - - - - - -var methods = {"i0":9,"i1":9,"i2":10,"i3":9,"i4":9,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10}; -var tabs = {65535:["t0","All Methods"],1:["t1","Static Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; -var altColor = "altColor"; -var rowColor = "rowColor"; -var tableTab = "tableTab"; -var activeTableTab = "activeTableTab"; - - -JavaScript is disabled on your browser. - - - - - -Skip navigation links - - - - -Overview -Package -Class -Use -Tree -Deprecated -Index -Help - - - - -PrevClass -NextClass - - -Frames -NoFrames - - -AllClasses - - - - - - - -Summary: -Nested| -Field| -Constr| -Method - - -Detail: -Field| -Constr| -Method - - - - - - - - -org.apache.hadoop.hbase.client -Class TestHCM - - - -http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">java.lang.Object - - -org.apache.hadoop.hbase.client.TestHCM - - - - - - - - -public class TestHCM -extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object -This class is for testing HBaseConnectionManager features - - - - - - - - - - - -Nested Class Summary - -Nested Classes - -Modifier and Type -Class and Description - - -static class -TestHCM.BlockingFilter - - -static class -TestHCM.SleepAndFailFirstTime -This copro sleeps 20 second. - - - -static class -TestHCM.SleepCoprocessor - - - - - - - - - -Field Summary - -Fields - -Modifier and Type -Field and Description - - -private static http://docs.oracle.com/javase/8/docs/api/java/util/Random.html?is-external=true; title="class or interface in java.util">Random -_randy - - -private static byte[] -FAM_NAM - - -private static org.slf4j.Logger -LOG - - -org.junit.rules.TestName -name - - -private static byte[] -ROW - - -private static byte[] -ROW_X - - -private static int -RPC_RETRY - - -protected static http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicBoolean.html?is-external=true; title="class or interface in java.util.concurrent.atomic">AtomicBoolean -syncBlockingFilter - - -private static org.apache.hadoop.hbase.TableName -TABLE_NAME - - -private static org.apache.hadoop.hbase.TableName -TABLE_NAME1 - - -private static org.apache.hadoop.hbase.TableName -TABLE_NAME2 - - -private static org.apache.hadoop.hbase.TableName -TABLE_NAME3 - - -private static HBaseTestingUtility -TEST_UTIL - - -org.junit.rules.TestRule -timeout - - - - - - - - - -Constructor Summary - -Constructors - -Constructor and Description - - -TestHCM() - - - - - - - - - -Method Summary - -All MethodsStatic MethodsInstance MethodsConcrete Methods - -Modifier and Type -Method and Description - - -private static void -assertEqualsWithJitter(longexpected, - longactual) - - -private static void -assertEqualsWithJitter(longexpected, - longactual, - longjitterBase) - - -private int -setNumTries(org.apache.hadoop.hbase.client.ConnectionImplementationhci, - intnewVal) - - -static void -setUpBeforeClass() - - -static void -tearDownAfterClass() - - -void -testAdminFactory() -Naive test to check that Connection#getAdmin returns a properly constructed HBaseAdmin object - - - -void -testCacheSeqNums() -Test that stale cache updates don't override newer cached values. - - - -void -testCallableSleep() - - -void -testClosing() - - -void -testClusterConnection() - - -void -testClusterStatus() - - -void -testConnection() -This test checks that one can connect to the cluster with only the - ZooKeeper quorum set. - - - -private void -testConnectionClose(booleanallowsInterrupt) - - -void -testConnectionCloseAllowsInterrupt() -Test that we can handle connection close: it will trigger a retry, but the
[45/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html b/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html index c094735..eaa815a 100644 --- a/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html +++ b/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html @@ -198,7 +198,7 @@ extends Fields inherited from classorg.apache.hadoop.hbase.regionserver.HRegionServer -cacheConfig, cacheFlusher, clusterConnection, clusterStatusTracker, compactSplitThread, conf, configurationManager, csm, executorService, fs, fsOk, fsUtilizationChore, hMemManager, infoServer, initLatch, leases, lock, MASTER_HOSTNAME_KEY , metaTableLocator, movedRegions, msgInterval, numRegionsToReport, onlineRegions, regionFavoredNodesMap, REGIONSERVER, regionsInTransitionInRS, replicationSinkHandler, replicationSourceHandler, rpcServices, secureBulkLoadManager, serverName, sleeper, startcode, tableDescriptors, TEST_SKIP_REPORTING_TRANSITION, threadWakeFrequency, useThisHostnameInstead, walFactory, walFs, walRoller, zooKeeper +cacheConfig, cacheFlusher, clusterConnection, clusterStatusTracker, compactSplitThread, conf, configurationManager, csm, executorService, fs, fsOk, fsUtilizationChore, hMemManager, infoServer, leases, lock, MASTER_HOSTNAME_KEY, metaTa bleLocator, movedRegions, msgInterval, numRegionsToReport, onlineRegions, regionFavoredNodesMap, REGIONSERVER, regionsInTransitionInRS, replicationSinkHandler, replicationSourceHandler, rpcServices, secureBulkLoadManager, serverName, sleeper, startcode, tableDescriptors, TEST_SKIP_REPORTING_TRANSITION, threadWakeFrequency, useThisHostnameInstead, walFactory, walFs, walRoller, zooKeeper @@ -254,7 +254,7 @@ extends Methods inherited from classorg.apache.hadoop.hbase.regionserver.HRegionServer -abort, addRegion, addToMovedRegions, checkFileSystem, cleanMovedRegions, clearRegionBlockCache, closeAllRegions, href="../../../../../org/apache/hadoop/hbase/regionserver/HRegionServer.html#closeAndOfflineRegionForSplitOrMerge-java.util.List-">closeAndOfflineRegionForSplitOrMerge, > href="../../../../../org/apache/hadoop/hbase/regionserver/HRegionServer.html#closeRegion-java.lang.String-boolean-org.apache.hadoop.hbase.ServerName-">closeRegion, > href="../../../../../org/apache/hadoop/hbase/regionserver/HRegionServer.html#constructRegionServer-java.lang.Class-org.apache.hadoop.conf.Configuration-">constructRegionServer, > href="../../../../../org/apache/hadoop/hbase/regionserver/HRegionServer.html#convertThrowableToIOE-java.lang.Throwable-java.lang.String-">convertThrowableToIOE, > href="../../../../../org/apache/hadoop/hbase/regionserver/HRegionServer.html#createClusterConnection--">createClusterConnection, > href="../../../../../org/apache/hadoop/hbase/regionserver/HRegionServer.html#createConnection-org.apache.hadoop.conf.Configuration-">createConnection, > < a href="../../../../../org/apache/hadoop/hbase/regionserver/HRegionServer.html#createRegionLoad-java.lang.String-">createRegionLoad, createRegionServerStatusStub, createRegionServerStatusStub, execRegionServerService, executeProcedure, getCacheConfig, getChoreService, getClusterConnection, getClusterId, getCompactionPressure, getCompactionRequestor, getCompactSplitThread, getConfiguration, getConfigurationManager, getConnection, getCoordinatedStateManager, getEventLoopGroupConfig, getExecutorService, getFavoredNodesForRegion, getFileSystem, getFlushPressure, getFlushRequester, getFlushThroughputController, getFsTableDescriptors, getHeapMemoryManager, getInfoServer, getLastSequenceId, getLeases, getMasterAddressTracker, getMetaTableLocator, getMetrics, getMostLoadedRegions, getNonceManager, getNumberOfOnlineRegions, getOnlineRegion, getOnlineRegionsLocalContext, getOnlineTabl es, getRegion, getRegion, getRegionBlockLocations, getRegionByEncodedName, getRegionByEncodedName, getRegions, getRegions, getRegionServerAccounting, getRegionServerCoprocessorHost, getRegionServerCoprocessors, getRegionServerMetrics, getRegionServerRpcQuotaManager, getRegionServerSpaceQuotaManager, getRegionsInTransitionInRS, getReplicationSourceService, getRootDir, getRpcServer, getRSRpcServices, getSecureBulkLoadManager, getStartcode, getThreadWakeFrequency, getWAL, getWALFileSystem, getWalRoller, getWALRootDir, getWALs, handleReportForDutyResponse, initializeMemStoreChunkCreator, isAborted, isOnline, isStopped, isStopping, kill, movedRegionCleanerPeriod, onConfigurationChange, postOpenDeployTasks, regionLock, remoteProcedureComplete, removeRegion,
[26/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/hbase-build-configuration/integration.html -- diff --git a/hbase-build-configuration/integration.html b/hbase-build-configuration/integration.html index e2dd2e2..1a72d4d 100644 --- a/hbase-build-configuration/integration.html +++ b/hbase-build-configuration/integration.html @@ -7,7 +7,7 @@ - + Apache HBase - Build Configuration CI Management @@ -126,7 +126,7 @@ https://www.apache.org/;>The Apache Software Foundation. All rights reserved. - Last Published: 2018-01-11 + Last Published: 2018-01-12 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/hbase-build-configuration/issue-tracking.html -- diff --git a/hbase-build-configuration/issue-tracking.html b/hbase-build-configuration/issue-tracking.html index 0062a19..c108358 100644 --- a/hbase-build-configuration/issue-tracking.html +++ b/hbase-build-configuration/issue-tracking.html @@ -7,7 +7,7 @@ - + Apache HBase - Build Configuration Issue Management @@ -123,7 +123,7 @@ https://www.apache.org/;>The Apache Software Foundation. All rights reserved. - Last Published: 2018-01-11 + Last Published: 2018-01-12 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/hbase-build-configuration/license.html -- diff --git a/hbase-build-configuration/license.html b/hbase-build-configuration/license.html index 989d0f7..6ec28ca 100644 --- a/hbase-build-configuration/license.html +++ b/hbase-build-configuration/license.html @@ -7,7 +7,7 @@ - + Apache HBase - Build Configuration Project Licenses @@ -326,7 +326,7 @@ https://www.apache.org/;>The Apache Software Foundation. All rights reserved. - Last Published: 2018-01-11 + Last Published: 2018-01-12 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/hbase-build-configuration/mail-lists.html -- diff --git a/hbase-build-configuration/mail-lists.html b/hbase-build-configuration/mail-lists.html index 8e37d21..4722133 100644 --- a/hbase-build-configuration/mail-lists.html +++ b/hbase-build-configuration/mail-lists.html @@ -7,7 +7,7 @@ - + Apache HBase - Build Configuration Project Mailing Lists @@ -176,7 +176,7 @@ https://www.apache.org/;>The Apache Software Foundation. All rights reserved. - Last Published: 2018-01-11 + Last Published: 2018-01-12 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/hbase-build-configuration/plugin-management.html -- diff --git a/hbase-build-configuration/plugin-management.html b/hbase-build-configuration/plugin-management.html index 34048dc..6ef 100644 --- a/hbase-build-configuration/plugin-management.html +++ b/hbase-build-configuration/plugin-management.html @@ -7,7 +7,7 @@ - + Apache HBase - Build Configuration Project Plugin Management @@ -271,7 +271,7 @@ https://www.apache.org/;>The Apache Software Foundation. All rights reserved. - Last Published: 2018-01-11 + Last Published: 2018-01-12 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/hbase-build-configuration/plugins.html -- diff --git a/hbase-build-configuration/plugins.html b/hbase-build-configuration/plugins.html index e4617dc..3b0cda5 100644 --- a/hbase-build-configuration/plugins.html +++ b/hbase-build-configuration/plugins.html @@ -7,7 +7,7 @@ - + Apache HBase - Build Configuration Project Plugins @@ -214,7 +214,7 @@ https://www.apache.org/;>The Apache Software Foundation. All rights reserved. - Last Published: 2018-01-11 + Last Published: 2018-01-12
[06/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/src-html/org/apache/hadoop/hbase/TestCellUtil.html -- diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestCellUtil.html b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestCellUtil.html index 232ef56..bc3a6d0 100644 --- a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestCellUtil.html +++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestCellUtil.html @@ -29,610 +29,626 @@ 021import static org.junit.Assert.assertEquals; 022import static org.junit.Assert.assertFalse; 023import static org.junit.Assert.assertTrue; -024 -025import java.io.ByteArrayOutputStream; -026import java.io.IOException; -027import java.math.BigDecimal; -028import java.nio.ByteBuffer; -029import java.util.ArrayList; -030import java.util.List; -031import java.util.NavigableMap; -032import java.util.TreeMap; -033import org.apache.hadoop.hbase.testclassification.MiscTests; -034import org.apache.hadoop.hbase.testclassification.SmallTests; -035import org.apache.hadoop.hbase.util.Bytes; -036import org.junit.Assert; -037import org.junit.Test; -038import org.junit.experimental.categories.Category; -039 -040@Category({MiscTests.class, SmallTests.class}) -041public class TestCellUtil { -042 /** -043 * CellScannable used in test. Returns a {@link TestCellScanner} -044 */ -045 private static class TestCellScannable implements CellScannable { -046private final int cellsCount; -047TestCellScannable(final int cellsCount) { -048 this.cellsCount = cellsCount; -049} -050@Override -051public CellScanner cellScanner() { -052 return new TestCellScanner(this.cellsCount); -053} -054 } -055 -056 /** -057 * CellScanner used in test. -058 */ -059 private static class TestCellScanner implements CellScanner { -060private int count = 0; -061private Cell current = null; -062private final int cellsCount; -063 -064TestCellScanner(final int cellsCount) { -065 this.cellsCount = cellsCount; -066} -067 -068@Override -069public Cell current() { -070 return this.current; -071} -072 -073@Override -074public boolean advance() throws IOException { -075 if (this.count cellsCount) { -076this.current = new TestCell(this.count); -077this.count++; -078return true; -079 } -080 return false; -081} -082 } -083 -084 /** -085 * Cell used in test. Has row only. -086 */ -087 private static class TestCell implements Cell { -088private final byte [] row; -089 -090TestCell(final int i) { -091 this.row = Bytes.toBytes(i); -092} -093 -094@Override -095public byte[] getRowArray() { -096 return this.row; -097} -098 -099@Override -100public int getRowOffset() { -101 return 0; -102} -103 -104@Override -105public short getRowLength() { -106 return (short)this.row.length; -107} -108 -109@Override -110public byte[] getFamilyArray() { -111 // TODO Auto-generated method stub -112 return null; -113} -114 -115@Override -116public int getFamilyOffset() { -117 // TODO Auto-generated method stub -118 return 0; -119} -120 -121@Override -122public byte getFamilyLength() { -123 // TODO Auto-generated method stub -124 return 0; -125} -126 -127@Override -128public byte[] getQualifierArray() { -129 // TODO Auto-generated method stub -130 return null; -131} -132 -133@Override -134public int getQualifierOffset() { -135 // TODO Auto-generated method stub -136 return 0; -137} -138 -139@Override -140public int getQualifierLength() { -141 // TODO Auto-generated method stub -142 return 0; -143} -144 -145@Override -146public long getTimestamp() { -147 // TODO Auto-generated method stub -148 return 0; -149} -150 -151@Override -152public byte getTypeByte() { -153 // TODO Auto-generated method stub -154 return 0; -155} -156 -157@Override -158public byte[] getValueArray() { -159 // TODO Auto-generated method stub -160 return null; -161} -162 -163@Override -164public int getValueOffset() { -165 // TODO Auto-generated method stub -166 return 0; -167} -168 -169@Override -170public int getValueLength() { -171 // TODO Auto-generated method stub -172 return 0; -173} -174 -175@Override -176public byte[] getTagsArray() { -177 // TODO Auto-generated method stub -178 return null; -179} -180 -181@Override -182public int getTagsOffset() { -183 // TODO Auto-generated method stub -184 return 0; -185} -186 -187@Override -188public long getSequenceId() { -189 // TODO Auto-generated method stub -190 return 0; -191} -192 -193
[22/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/org/apache/hadoop/hbase/client/AbstractTestCITimeout.SleepAndFailFirstTime.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/client/AbstractTestCITimeout.SleepAndFailFirstTime.html b/testdevapidocs/org/apache/hadoop/hbase/client/AbstractTestCITimeout.SleepAndFailFirstTime.html new file mode 100644 index 000..085cfc6 --- /dev/null +++ b/testdevapidocs/org/apache/hadoop/hbase/client/AbstractTestCITimeout.SleepAndFailFirstTime.html @@ -0,0 +1,534 @@ +http://www.w3.org/TR/html4/loose.dtd;> + + + + + +AbstractTestCITimeout.SleepAndFailFirstTime (Apache HBase 3.0.0-SNAPSHOT Test API) + + + + + +var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10}; +var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; +var altColor = "altColor"; +var rowColor = "rowColor"; +var tableTab = "tableTab"; +var activeTableTab = "activeTableTab"; + + +JavaScript is disabled on your browser. + + + + + +Skip navigation links + + + + +Overview +Package +Class +Use +Tree +Deprecated +Index +Help + + + + +PrevClass +NextClass + + +Frames +NoFrames + + +AllClasses + + + + + + + +Summary: +Nested| +Field| +Constr| +Method + + +Detail: +Field| +Constr| +Method + + + + + + + + +org.apache.hadoop.hbase.client +Class AbstractTestCITimeout.SleepAndFailFirstTime + + + +http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">java.lang.Object + + +org.apache.hadoop.hbase.client.AbstractTestCITimeout.SleepAndFailFirstTime + + + + + + + +All Implemented Interfaces: +org.apache.hadoop.hbase.Coprocessor, org.apache.hadoop.hbase.coprocessor.RegionCoprocessor, org.apache.hadoop.hbase.coprocessor.RegionObserver + + +Enclosing class: +AbstractTestCITimeout + + + +public static class AbstractTestCITimeout.SleepAndFailFirstTime +extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object +implements org.apache.hadoop.hbase.coprocessor.RegionCoprocessor, org.apache.hadoop.hbase.coprocessor.RegionObserver +This copro sleeps 20 second. The first call it fails. The second time, it works. + + + + + + + + + + + +Nested Class Summary + + + + +Nested classes/interfaces inherited from interfaceorg.apache.hadoop.hbase.Coprocessor +org.apache.hadoop.hbase.Coprocessor.State + + + + + +Nested classes/interfaces inherited from interfaceorg.apache.hadoop.hbase.coprocessor.RegionObserver +org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType + + + + + + + + +Field Summary + +Fields + +Modifier and Type +Field and Description + + +(package private) static http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true; title="class or interface in java.util.concurrent.atomic">AtomicLong +ct + + +(package private) static long +DEFAULT_SLEEP_TIME + + +(package private) static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String +SLEEP_TIME_CONF_KEY + + +(package private) static http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true; title="class or interface in java.util.concurrent.atomic">AtomicLong +sleepTime + + + + + + +Fields inherited from interfaceorg.apache.hadoop.hbase.Coprocessor +PRIORITY_HIGHEST, PRIORITY_LOWEST, PRIORITY_SYSTEM, PRIORITY_USER, VERSION + + + + + + + + +Constructor Summary + +Constructors + +Constructor and Description + + +SleepAndFailFirstTime() + + + + + + + + + +Method Summary + +All MethodsInstance MethodsConcrete Methods + +Modifier and Type +Method and Description + + +http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true; title="class or interface in java.util">Optionalorg.apache.hadoop.hbase.coprocessor.RegionObserver +getRegionObserver() + + +void +postOpen(org.apache.hadoop.hbase.coprocessor.ObserverContextorg.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironmentc) + + +void +preDelete(org.apache.hadoop.hbase.coprocessor.ObserverContextorg.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironmente, + org.apache.hadoop.hbase.client.Deletedelete, + org.apache.hadoop.hbase.wal.WALEditedit, + org.apache.hadoop.hbase.client.Durabilitydurability) + + +void
[15/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/org/apache/hadoop/hbase/client/class-use/AbstractTestCITimeout.SleepAndFailFirstTime.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/client/class-use/AbstractTestCITimeout.SleepAndFailFirstTime.html b/testdevapidocs/org/apache/hadoop/hbase/client/class-use/AbstractTestCITimeout.SleepAndFailFirstTime.html new file mode 100644 index 000..f020af6 --- /dev/null +++ b/testdevapidocs/org/apache/hadoop/hbase/client/class-use/AbstractTestCITimeout.SleepAndFailFirstTime.html @@ -0,0 +1,125 @@ +http://www.w3.org/TR/html4/loose.dtd;> + + + + + +Uses of Class org.apache.hadoop.hbase.client.AbstractTestCITimeout.SleepAndFailFirstTime (Apache HBase 3.0.0-SNAPSHOT Test API) + + + + + + + +JavaScript is disabled on your browser. + + + + + +Skip navigation links + + + + +Overview +Package +Class +Use +Tree +Deprecated +Index +Help + + + + +Prev +Next + + +Frames +NoFrames + + +AllClasses + + + + + + + + + + +Uses of Classorg.apache.hadoop.hbase.client.AbstractTestCITimeout.SleepAndFailFirstTime + +No usage of org.apache.hadoop.hbase.client.AbstractTestCITimeout.SleepAndFailFirstTime + + + + +Skip navigation links + + + + +Overview +Package +Class +Use +Tree +Deprecated +Index +Help + + + + +Prev +Next + + +Frames +NoFrames + + +AllClasses + + + + + + + + + +Copyright 20072018 https://www.apache.org/;>The Apache Software Foundation. All rights reserved. + + http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/org/apache/hadoop/hbase/client/class-use/AbstractTestCITimeout.SleepCoprocessor.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/client/class-use/AbstractTestCITimeout.SleepCoprocessor.html b/testdevapidocs/org/apache/hadoop/hbase/client/class-use/AbstractTestCITimeout.SleepCoprocessor.html new file mode 100644 index 000..f3fe2fd --- /dev/null +++ b/testdevapidocs/org/apache/hadoop/hbase/client/class-use/AbstractTestCITimeout.SleepCoprocessor.html @@ -0,0 +1,125 @@ +http://www.w3.org/TR/html4/loose.dtd;> + + + + + +Uses of Class org.apache.hadoop.hbase.client.AbstractTestCITimeout.SleepCoprocessor (Apache HBase 3.0.0-SNAPSHOT Test API) + + + + + + + +JavaScript is disabled on your browser. + + + + + +Skip navigation links + + + + +Overview +Package +Class +Use +Tree +Deprecated +Index +Help + + + + +Prev +Next + + +Frames +NoFrames + + +AllClasses + + + + + + + + + + +Uses of Classorg.apache.hadoop.hbase.client.AbstractTestCITimeout.SleepCoprocessor + +No usage of org.apache.hadoop.hbase.client.AbstractTestCITimeout.SleepCoprocessor + + + + +Skip navigation links + + + + +Overview +Package +Class +Use +Tree +Deprecated +Index +Help + + + + +Prev +Next + + +Frames +NoFrames + + +AllClasses + + + + + + + + + +Copyright 20072018 https://www.apache.org/;>The Apache Software Foundation. All rights reserved. + + http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/org/apache/hadoop/hbase/client/class-use/AbstractTestCITimeout.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/client/class-use/AbstractTestCITimeout.html b/testdevapidocs/org/apache/hadoop/hbase/client/class-use/AbstractTestCITimeout.html new file mode 100644 index 000..9a0672a --- /dev/null +++
[19/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/org/apache/hadoop/hbase/client/TestCISleep.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/client/TestCISleep.html b/testdevapidocs/org/apache/hadoop/hbase/client/TestCISleep.html new file mode 100644 index 000..37187bc --- /dev/null +++ b/testdevapidocs/org/apache/hadoop/hbase/client/TestCISleep.html @@ -0,0 +1,394 @@ +http://www.w3.org/TR/html4/loose.dtd;> + + + + + +TestCISleep (Apache HBase 3.0.0-SNAPSHOT Test API) + + + + + +var methods = {"i0":10,"i1":10,"i2":10}; +var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; +var altColor = "altColor"; +var rowColor = "rowColor"; +var tableTab = "tableTab"; +var activeTableTab = "activeTableTab"; + + +JavaScript is disabled on your browser. + + + + + +Skip navigation links + + + + +Overview +Package +Class +Use +Tree +Deprecated +Index +Help + + + + +PrevClass +NextClass + + +Frames +NoFrames + + +AllClasses + + + + + + + +Summary: +Nested| +Field| +Constr| +Method + + +Detail: +Field| +Constr| +Method + + + + + + + + +org.apache.hadoop.hbase.client +Class TestCISleep + + + +http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">java.lang.Object + + +org.apache.hadoop.hbase.client.AbstractTestCITimeout + + +org.apache.hadoop.hbase.client.TestCISleep + + + + + + + + + + +public class TestCISleep +extends AbstractTestCITimeout + + + + + + + + + + + +Nested Class Summary + + + + +Nested classes/interfaces inherited from classorg.apache.hadoop.hbase.client.AbstractTestCITimeout +AbstractTestCITimeout.SleepAndFailFirstTime, AbstractTestCITimeout.SleepCoprocessor + + + + + + + + +Field Summary + +Fields + +Modifier and Type +Field and Description + + +private static org.slf4j.Logger +LOG + + +private org.apache.hadoop.hbase.TableName +tableName + + + + + + +Fields inherited from classorg.apache.hadoop.hbase.client.AbstractTestCITimeout +FAM_NAM, name, TEST_UTIL + + + + + + + + +Constructor Summary + +Constructors + +Constructor and Description + + +TestCISleep() + + + + + + + + + +Method Summary + +All MethodsInstance MethodsConcrete Methods + +Modifier and Type +Method and Description + + +void +setUp() + + +void +testCallableSleep() + + +void +testRpcRetryingCallerSleep() +Test starting from 0 index when RpcRetryingCaller calculate the backoff time. + + + + + + + +Methods inherited from classorg.apache.hadoop.hbase.client.AbstractTestCITimeout +setUpBeforeClass, tearDownAfterClass + + + + + +Methods inherited from classjava.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object +http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--; title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-; title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--; title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--; title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--; title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--; title="class or interface in java.lang">notify, http://docs.oracle.com/javase/8/docs/api/java/lang /Object.html?is-external=true#notifyAll--" title="class or interface in java.lang">notifyAll, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--; title="class or interface in java.lang">toString, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--; title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-; title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-int-; title="class or interface in java.lang">wait + + + + + + + + + + + + + + +Field Detail + + + + + +LOG +private staticorg.slf4j.Logger LOG + + + + + + + +tableName +privateorg.apache.hadoop.hbase.TableName tableName +
[23/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/org/apache/hadoop/hbase/TestMovedRegionsCleaner.TestMockRegionServer.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/TestMovedRegionsCleaner.TestMockRegionServer.html b/testdevapidocs/org/apache/hadoop/hbase/TestMovedRegionsCleaner.TestMockRegionServer.html index 0048578..d5b9f50 100644 --- a/testdevapidocs/org/apache/hadoop/hbase/TestMovedRegionsCleaner.TestMockRegionServer.html +++ b/testdevapidocs/org/apache/hadoop/hbase/TestMovedRegionsCleaner.TestMockRegionServer.html @@ -180,7 +180,7 @@ extends Methods inherited from classorg.apache.hadoop.hbase.regionserver.HRegionServer -abort, addRegion, addToMovedRegions, canCreateBaseZNode, canUpdateTableDescriptor, checkFileSystem, clearRegionBlockCache, closeAllRegions, closeAndOfflineRegionForSplitOrMerge, closeRegion, configureInfoServer, constructRegionServer, convertThrowableToIOE, createClusterConnection, createConnection, createRegionLoad, createRegionServerStatusStub, createRegionServerStatusStub, createRpcServices, execRegionServerService, executeProcedure, getCacheConfig, getChoreService, getClusterConnection, getClusterId, getCompactionPressure, getCompactionRequestor, getCompactSplitThread, getConfiguration, getConfigurationManager, getConnection, getCoordinatedStateManager, getDumpServlet, getEventLoopGroupConfig, getExecutorService, getFavoredNodesForRegion, getFileSystem, getFlushPressure, getFlushRequester, getFlushThroughputController, getFsTableDescriptors, getHeapMemoryManager, getInfoServer, getLastSequenceId, getLeases, getMasterAddressTracker, getMetaTableLocator, getMetaTableObserver , getMetrics, getMostLoadedRegions, getNonceManager, getNumberOfOnlineRegions, getOnlineRegion, getOnlineRegionsLocalContext, getOnlineTables, getProcessName, getRegion, getRegion, getRegionBlockLocations, getRegionByEncodedName, getRegionByEncodedName, getRegions, getRegions, getRegionServerAccounting, getRegionServerCoprocessorHost, getRegionServerCoprocessors, getRegionServerMetrics, getRegionServerRpcQuotaManager, getRegionServerSpaceQuotaManager, getRegionsInTransitionInRS, getReplicationSourceService, getRootDir, getRpcServer, getRSRpcServices, getSecureBulkLoadManager, getServerName, getStartcode, getTableDescriptors, getThreadWakeFrequency, getWAL, getWALFileSystem, getWalRoller, getWALRootDir, getWALs, getZooKeeper, initializeMemStoreChunkCreator, isAborted, isOnline, isStopped, isStopping, login, main, onConfigurationChange, postOpenDeployTasks, regionLock, registerService, remoteProcedureComplete, removeRegion, reportRegionSizesForQuotas, reportRegionStateTransition, send ShutdownInterrupt, setInitLatch, setupClusterConnection, shouldUseThisHostnameInstead, stop, stop, stopServiceThreads, toString, tryRegionServerReport, unassign, updateConfiguration, updateRegionFavoredNodesMapping, waitForMasterActive, waitForServerOnline, walRollRequestFinished +abort, addRegion, addToMovedRegions, canCreateBaseZNode, canUpdateTableDescriptor, checkFileSystem, clearRegionBlockCache, closeAllRegions, closeAndOfflineRegionForSplitOrMerge, closeRegion, configureInfoServer, constructRegionServer, convertThrowableToIOE, createClusterConnection, createConnection, createRegionLoad, createRegionServerStatusStub, createRegionServerStatusStub, createRpcServices, execRegionServerService, executeProcedure, getCacheConfig, getChoreService, getClusterConnection, getClusterId, getCompactionPressure, getCompactionRequestor, getCompactSplitThread, getConfiguration, getConfigurationManager, getConnection, getCoordinatedStateManager, getDumpServlet, getEventLoopGroupConfig, getExecutorService, getFavoredNodesForRegion, getFileSystem, getFlushPressure, getFlushRequester, getFlushThroughputController, getFsTableDescriptors, getHeapMemoryManager, getInfoServer, getLastSequenceId, getLeases, getMasterAddressTracker, getMetaTableLocator, getMetaTableObserver , getMetrics, getMostLoadedRegions, getNonceManager, getNumberOfOnlineRegions, getOnlineRegion, getOnlineRegionsLocalContext, getOnlineTables, getProcessName, getRegion, getRegion, getRegionBlockLocations, getRegionByEncodedName, getRegionByEncodedName, getRegions, getRegions, getRegionServerAccounting, getRegionServerCoprocessorHost, getRegionServerCoprocessors, getRegionServerMetrics, getRegionServerRpcQuotaManager, getRegionServerSpaceQuotaManager, getRegionsInTransitionInRS, getReplicationSourceService, getRootDir, getRpcServer, getRSRpcServices, getSecureBulkLoadManager, getServerName, getStartcode, getTableDescriptors, getThreadWakeFrequency, getWAL, getWALFileSystem, getWalRoller, getWALRootDir, getWALs, getZooKeeper, initializeMemStoreChunkCreator, isAborted, isOnline, isStopped, isStopping, login, main, onConfigurationChange, postOpenDeployTasks, regionLock, registerService,
[01/51] [partial] hbase-site git commit: Published site at .
Repository: hbase-site Updated Branches: refs/heads/asf-site ba96e306f -> 0b638133a http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestHCM.BlockingFilter.html -- diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestHCM.BlockingFilter.html b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestHCM.BlockingFilter.html deleted file mode 100644 index eaa3d11..000 --- a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestHCM.BlockingFilter.html +++ /dev/null @@ -1,1543 +0,0 @@ -http://www.w3.org/TR/html4/loose.dtd;> - - -Source code - - - - -001/* -002 * Licensed to the Apache Software Foundation (ASF) under one -003 * or more contributor license agreements. See the NOTICE file -004 * distributed with this work for additional information -005 * regarding copyright ownership. The ASF licenses this file -006 * to you under the Apache License, Version 2.0 (the -007 * "License"); you may not use this file except in compliance -008 * with the License. You may obtain a copy of the License at -009 * -010 * http://www.apache.org/licenses/LICENSE-2.0 -011 * -012 * Unless required by applicable law or agreed to in writing, software -013 * distributed under the License is distributed on an "AS IS" BASIS, -014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -015 * See the License for the specific language governing permissions and -016 * limitations under the License. -017 */ -018package org.apache.hadoop.hbase.client; -019 -020import static org.junit.Assert.assertEquals; -021import static org.junit.Assert.assertFalse; -022import static org.junit.Assert.assertNotNull; -023import static org.junit.Assert.assertNull; -024import static org.junit.Assert.assertTrue; -025import static org.junit.Assert.fail; -026 -027import java.io.IOException; -028import java.lang.reflect.Field; -029import java.lang.reflect.Modifier; -030import java.net.SocketTimeoutException; -031import java.util.ArrayList; -032import java.util.List; -033import java.util.Optional; -034import java.util.Random; -035import java.util.concurrent.ExecutorService; -036import java.util.concurrent.SynchronousQueue; -037import java.util.concurrent.ThreadPoolExecutor; -038import java.util.concurrent.TimeUnit; -039import java.util.concurrent.atomic.AtomicBoolean; -040import java.util.concurrent.atomic.AtomicInteger; -041import java.util.concurrent.atomic.AtomicLong; -042import java.util.concurrent.atomic.AtomicReference; -043 -044import org.apache.hadoop.conf.Configuration; -045import org.apache.hadoop.hbase.CategoryBasedTimeout; -046import org.apache.hadoop.hbase.Cell; -047import org.apache.hadoop.hbase.HBaseTestingUtility; -048import org.apache.hadoop.hbase.HConstants; -049import org.apache.hadoop.hbase.HRegionLocation; -050import org.apache.hadoop.hbase.HTableDescriptor; -051import org.apache.hadoop.hbase.RegionLocations; -052import org.apache.hadoop.hbase.ServerName; -053import org.apache.hadoop.hbase.TableName; -054import org.apache.hadoop.hbase.Waiter; -055import org.apache.hadoop.hbase.coprocessor.ObserverContext; -056import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; -057import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; -058import org.apache.hadoop.hbase.coprocessor.RegionObserver; -059import org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil; -060import org.apache.hadoop.hbase.exceptions.DeserializationException; -061import org.apache.hadoop.hbase.exceptions.RegionMovedException; -062import org.apache.hadoop.hbase.filter.Filter; -063import org.apache.hadoop.hbase.filter.FilterBase; -064import org.apache.hadoop.hbase.ipc.HBaseRpcController; -065import org.apache.hadoop.hbase.ipc.RpcClient; -066import org.apache.hadoop.hbase.ipc.RpcControllerFactory; -067import org.apache.hadoop.hbase.master.HMaster; -068import org.apache.hadoop.hbase.regionserver.HRegion; -069import org.apache.hadoop.hbase.regionserver.HRegionServer; -070import org.apache.hadoop.hbase.regionserver.Region; -071import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException; -072import org.apache.hadoop.hbase.wal.WALEdit; -073import org.apache.hadoop.hbase.testclassification.LargeTests; -074import org.apache.hadoop.hbase.util.Bytes; -075import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -076import org.apache.hadoop.hbase.util.JVMClusterUtil; -077import org.apache.hadoop.hbase.util.ManualEnvironmentEdge; -078import org.apache.hadoop.hbase.util.Threads; -079import org.junit.AfterClass; -080import org.junit.Assert; -081import org.junit.BeforeClass; -082import org.junit.Ignore; -083import org.junit.Rule; -084import org.junit.Test; -085import org.junit.experimental.categories.Category; -086import org.junit.rules.TestName; -087import org.junit.rules.TestRule; -088import
[24/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.MiniHBaseClusterRegionServer.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.MiniHBaseClusterRegionServer.html b/testdevapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.MiniHBaseClusterRegionServer.html index 2f116fc..ecf1a43 100644 --- a/testdevapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.MiniHBaseClusterRegionServer.html +++ b/testdevapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.MiniHBaseClusterRegionServer.html @@ -197,7 +197,7 @@ extends org.apache.hadoop.hbase.regionserver.HRegionServer Fields inherited from classorg.apache.hadoop.hbase.regionserver.HRegionServer -cacheConfig, cacheFlusher, clusterConnection, clusterStatusTracker, compactSplitThread, conf, configurationManager, csm, executorService, fs, fsOk, fsUtilizationChore, hMemManager, infoServer, initLatch, leases, lock, MASTER_HOSTNAME_KEY, metaTableLocator, movedRegions, msgInterval, numRegionsToReport, onlineRegions, regionFavoredNodesMap, REGIONSERVER, regionsInTransitionInRS, replicationSinkHandler, replicationSourceHandler, rpcServices, secureBulkLoadManager, serverName, sleeper, startcode, tableDescriptors, TEST_SKIP_REPORTING_TRANSITION, threadWakeFrequency, useThisHostnameInstead, walFactory, walFs, walRoller, zooKeeper +cacheConfig, cacheFlusher, clusterConnection, clusterStatusTracker, compactSplitThread, conf, configurationManager, csm, executorService, fs, fsOk, fsUtilizationChore, hMemManager, infoServer, leases, lock, MASTER_HOSTNAME_KEY, metaTableLocator, movedRegions, msgInterval, numRegionsToReport, onlineRegions, regionFavoredNodesMap, REGIONSERVER, regionsInTransitionInRS, replicationSinkHandler, replicationSourceHandler, rpcServices, secureBulkLoadManager, serverName, sleeper, startcode, tableDescriptors, TEST_SKIP_REPORTING_TRANSITION, threadWakeFrequency, useThisHostnameInstead, walFactory, walFs, walRoller, zooKeeper @@ -262,7 +262,7 @@ extends org.apache.hadoop.hbase.regionserver.HRegionServer Methods inherited from classorg.apache.hadoop.hbase.regionserver.HRegionServer -abort, addRegion, addToMovedRegions, canCreateBaseZNode, canUpdateTableDescriptor, checkFileSystem, cleanMovedRegions, clearRegionBlockCache, closeAllRegions, closeAndOfflineRegionForSplitOrMerge, closeRegion, configureInfoServer, constructRegionServer, convertThrowableToIOE, createClusterConnection, createConnection, createRegionLoad, createRegionServerStatusStub, createRegionServerStatusStub, createRpcServices, execRegionServerService, executeProcedure, getCacheConfig, getChoreService, getClusterConnection, getClusterId, getCompactionPressure, getCompactionRequestor, getCompactSplitThread, getConfiguration, getConfigurationManager, getConnection, getCoordinatedStateManager, getDumpServlet, getEventLoopGroupConfig, getExecutorService, getFavoredNodesForRegion, getFileSystem, getFlushPressure, getFlushRequester, getFlushThroughputController, getFsTableDescriptors, getHeapMemoryManager, getInfoServer, getLastSequenceId, getLeases, getMasterAddressTracker, getMetaTableLocator, g etMetaTableObserver, getMetrics, getMostLoadedRegions, getNonceManager, getNumberOfOnlineRegions, getOnlineRegion, getOnlineRegionsLocalContext, getOnlineTables, getProcessName, getRegion, getRegion, getRegionBlockLocations, getRegionByEncodedName, getRegionByEncodedName, getRegions, getRegions, getRegionServerAccounting, getRegionServerCoprocessorHost, getRegionServerCoprocessors, getRegionServerMetrics, getRegionServerRpcQuotaManager, getRegionServerSpaceQuotaManager, getRegionsInTransitionInRS, getReplicationSourceService, getRootDir, getRpcServer, getRSRpcServices, getSecureBulkLoadManager, getServerName, getStartcode, getTableDescriptors, getThreadWakeFrequency, getWAL, getWALFileSystem, getWalRoller, getWALRootDir, getWALs, getZooKeeper, initializeMemStoreChunkCreator, isAborted, isOnline, isStopped, isStopping, login, main, movedRegionCleanerPeriod, onConfigurationChange, postOpenDeployTasks, regionLock, registerService, remoteProcedureComplete, removeRegion, reportRegionSize sForQuotas, reportRegionStateTransition, sendShutdownInterrupt, setInitLatch, setupClusterConnection, shouldUseThisHostnameInstead, stop, stop, stopServiceThreads, toString, tryRegionServerReport, unassign, updateConfiguration, updateRegionFavoredNodesMapping, waitForMasterActive, waitForServerOnline, walRollRequestFinished +abort, addRegion, addToMovedRegions, canCreateBaseZNode, canUpdateTableDescriptor, checkFileSystem, cleanMovedRegions, clearRegionBlockCache, closeAllRegions, closeAndOfflineRegionForSplitOrMerge, closeRegion, configureInfoServer, constructRegionServer, convertThrowableToIOE, createClusterConnection, createConnection, createRegionLoad, createRegionServerStatusStub,
[11/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.html b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.html index aef93fa..771d9be 100644 --- a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.html +++ b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.html @@ -453,7 +453,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? testSelectiveFlushWhenNotEnabled -publicvoidtestSelectiveFlushWhenNotEnabled() +publicvoidtestSelectiveFlushWhenNotEnabled() throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException Throws: @@ -467,7 +467,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? getRegionWithName -private staticorg.apache.hadoop.hbase.util.Pairorg.apache.hadoop.hbase.regionserver.HRegion,org.apache.hadoop.hbase.regionserver.HRegionServergetRegionWithName(org.apache.hadoop.hbase.TableNametableName) +private staticorg.apache.hadoop.hbase.util.Pairorg.apache.hadoop.hbase.regionserver.HRegion,org.apache.hadoop.hbase.regionserver.HRegionServergetRegionWithName(org.apache.hadoop.hbase.TableNametableName) @@ -476,7 +476,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? doTestLogReplay -privatevoiddoTestLogReplay() +privatevoiddoTestLogReplay() throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true; title="class or interface in java.lang">Exception Throws: @@ -490,7 +490,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? testLogReplayWithDistributedLogSplit -publicvoidtestLogReplayWithDistributedLogSplit() +publicvoidtestLogReplayWithDistributedLogSplit() throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true; title="class or interface in java.lang">Exception Throws: @@ -504,7 +504,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? getWAL -privateorg.apache.hadoop.hbase.wal.WALgetWAL(org.apache.hadoop.hbase.regionserver.Regionregion) +privateorg.apache.hadoop.hbase.wal.WALgetWAL(org.apache.hadoop.hbase.regionserver.Regionregion) @@ -513,7 +513,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? getNumRolledLogFiles -privateintgetNumRolledLogFiles(org.apache.hadoop.hbase.regionserver.Regionregion) +privateintgetNumRolledLogFiles(org.apache.hadoop.hbase.regionserver.Regionregion) @@ -522,7 +522,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? testFlushingWhenLogRolling -publicvoidtestFlushingWhenLogRolling() +publicvoidtestFlushingWhenLogRolling() throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true; title="class or interface in java.lang">Exception When a log roll is about to happen, we do a flush of the regions who will be affected by the log roll. These flushes cannot be a selective flushes, otherwise we cannot roll the logs. This @@ -540,7 +540,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? doPut -privatevoiddoPut(org.apache.hadoop.hbase.client.Tabletable, +privatevoiddoPut(org.apache.hadoop.hbase.client.Tabletable, longmemstoreFlushSize) throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException, http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true; title="class or interface in java.lang">InterruptedException @@ -557,7 +557,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? testCompareStoreFileCount -publicvoidtestCompareStoreFileCount() +publicvoidtestCompareStoreFileCount() throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true; title="class or interface in java.lang">Exception Throws: @@ -571,7 +571,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? main -public staticvoidmain(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String[]args) +public staticvoidmain(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String[]args) throws
[33/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html index 163ade0..802fc2f 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html @@ -254,3512 +254,3505 @@ 246 protected MemStoreFlusher cacheFlusher; 247 248 protected HeapMemoryManager hMemManager; -249 protected CountDownLatch initLatch = null; -250 -251 /** -252 * Cluster connection to be shared by services. -253 * Initialized at server startup and closed when server shuts down. -254 * Clients must never close it explicitly. -255 */ -256 protected ClusterConnection clusterConnection; -257 -258 /* -259 * Long-living meta table locator, which is created when the server is started and stopped -260 * when server shuts down. References to this locator shall be used to perform according -261 * operations in EventHandlers. Primary reason for this decision is to make it mockable -262 * for tests. -263 */ -264 protected MetaTableLocator metaTableLocator; -265 -266 /** -267 * Go here to get table descriptors. -268 */ -269 protected TableDescriptors tableDescriptors; -270 -271 // Replication services. If no replication, this handler will be null. -272 protected ReplicationSourceService replicationSourceHandler; -273 protected ReplicationSinkService replicationSinkHandler; -274 -275 // Compactions -276 public CompactSplit compactSplitThread; -277 -278 /** -279 * Map of regions currently being served by this region server. Key is the -280 * encoded region name. All access should be synchronized. -281 */ -282 protected final MapString, HRegion onlineRegions = new ConcurrentHashMap(); -283 -284 /** -285 * Map of encoded region names to the DataNode locations they should be hosted on -286 * We store the value as InetSocketAddress since this is used only in HDFS -287 * API (create() that takes favored nodes as hints for placing file blocks). -288 * We could have used ServerName here as the value class, but we'd need to -289 * convert it to InetSocketAddress at some point before the HDFS API call, and -290 * it seems a bit weird to store ServerName since ServerName refers to RegionServers -291 * and here we really mean DataNode locations. -292 */ -293 protected final MapString, InetSocketAddress[] regionFavoredNodesMap = -294 new ConcurrentHashMap(); -295 -296 // Leases -297 protected Leases leases; -298 -299 // Instance of the hbase executor executorService. -300 protected ExecutorService executorService; -301 -302 // If false, the file system has become unavailable -303 protected volatile boolean fsOk; -304 protected HFileSystem fs; -305 protected HFileSystem walFs; -306 -307 // Set when a report to the master comes back with a message asking us to -308 // shutdown. Also set by call to stop when debugging or running unit tests -309 // of HRegionServer in isolation. -310 private volatile boolean stopped = false; -311 -312 // Go down hard. Used if file system becomes unavailable and also in -313 // debugging and unit tests. -314 private volatile boolean abortRequested; -315 -316 ConcurrentMapString, Integer rowlocks = new ConcurrentHashMap(); -317 -318 // A state before we go into stopped state. At this stage we're closing user -319 // space regions. -320 private boolean stopping = false; -321 -322 volatile boolean killed = false; -323 -324 protected final Configuration conf; -325 -326 private Path rootDir; -327 private Path walRootDir; -328 -329 protected final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); -330 -331 final int numRetries; -332 protected final int threadWakeFrequency; -333 protected final int msgInterval; -334 -335 protected final int numRegionsToReport; -336 -337 // Stub to do region server status calls against the master. -338 private volatile RegionServerStatusService.BlockingInterface rssStub; -339 private volatile LockService.BlockingInterface lockStub; -340 // RPC client. Used to make the stub above that does region server status checking. -341 RpcClient rpcClient; -342 -343 private RpcRetryingCallerFactory rpcRetryingCallerFactory; -344 private RpcControllerFactory rpcControllerFactory; -345 -346 private UncaughtExceptionHandler uncaughtExceptionHandler; -347 -348 // Info server. Default access so can be used by unit tests. REGIONSERVER -349 // is name of the webapp and the attribute name used stuffing this instance -350
[46/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/devapidocs/org/apache/hadoop/hbase/master/HMaster.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/master/HMaster.html b/devapidocs/org/apache/hadoop/hbase/master/HMaster.html index 68b1d48..7c234b6 100644 --- a/devapidocs/org/apache/hadoop/hbase/master/HMaster.html +++ b/devapidocs/org/apache/hadoop/hbase/master/HMaster.html @@ -128,7 +128,7 @@ var activeTableTab = "activeTableTab"; @InterfaceAudience.LimitedPrivate(value="Tools") -public class HMaster +public class HMaster extends HRegionServer implements MasterServices HMaster is the "master server" for HBase. An HBase cluster has one active @@ -455,7 +455,7 @@ implements HRegionServer -cacheConfig, cacheFlusher, clusterConnection, clusterStatusTracker, compactSplitThread, conf, configurationManager, csm, executorService, fs, fsOk, fsUtilizationChore, hMemManager, infoServer, initLatch, leases, lock, MASTER_HOSTNAME_KEY , metaTableLocator, movedRegions, msgInterval, numRegionsToReport, onlineRegions, regionFavoredNodesMap, REGIONSERVER, regionsInTransitionInRS, replicationSinkHandler, replicationSourceHandler, rpcServices, secureBulkLoadManager, serverName, sleeper, startcode, tableDescriptors, TEST_SKIP_REPORTING_TRANSITION, threadWakeFrequency, useThisHostnameInstead, walFactory, walFs, walRoller, zooKeeper +cacheConfig, cacheFlusher, clusterConnection, clusterStatusTracker, compactSplitThread, conf, configurationManager, csm, executorService, fs, fsOk, fsUtilizationChore, hMemManager, infoServer, leases, lock, MASTER_HOSTNAME_KEY, metaTa bleLocator, movedRegions, msgInterval, numRegionsToReport, onlineRegions, regionFavoredNodesMap, REGIONSERVER, regionsInTransitionInRS, replicationSinkHandler, replicationSourceHandler, rpcServices, secureBulkLoadManager, serverName, sleeper, startcode, tableDescriptors, TEST_SKIP_REPORTING_TRANSITION, threadWakeFrequency, useThisHostnameInstead, walFactory, walFs, walRoller, zooKeeper @@ -1389,7 +1389,7 @@ implements HRegionServer -abort, addRegion, addToMovedRegions, checkFileSystem, cleanMovedRegions, clearRegionBlockCache, closeAllRegions, href="../../../../../org/apache/hadoop/hbase/regionserver/HRegionServer.html#closeAndOfflineRegionForSplitOrMerge-java.util.List-">closeAndOfflineRegionForSplitOrMerge, > href="../../../../../org/apache/hadoop/hbase/regionserver/HRegionServer.html#closeRegion-java.lang.String-boolean-org.apache.hadoop.hbase.ServerName-">closeRegion, > href="../../../../../org/apache/hadoop/hbase/regionserver/HRegionServer.html#constructRegionServer-java.lang.Class-org.apache.hadoop.conf.Configuration-">constructRegionServer, > href="../../../../../org/apache/hadoop/hbase/regionserver/HRegionServer.html#convertThrowableToIOE-java.lang.Throwable-java.lang.String-">convertThrowableToIOE, > href="../../../../../org/apache/hadoop/hbase/regionserver/HRegionServer.html#createClusterConnection--">createClusterConnection, > href="../../../../../org/apache/hadoop/hbase/regionserver/HRegionServer.html#createConnection-org.apache.hadoop.conf.Configuration-">createConnection, > < a href="../../../../../org/apache/hadoop/hbase/regionserver/HRegionServer.html#createRegionLoad-java.lang.String-">createRegionLoad, createRegionServerStatusStub, createRegionServerStatusStub, execRegionServerService, executeProcedure, getCacheConfig, getChoreService, getClusterConnection, getClusterId, getCompactionPressure, getCompactionRequestor, getCompactSplitThread, getConfiguration, getConfigurationManager, getConnection, getCoordinatedStateManager, getEventLoopGroupConfig, getExecutorService, getFavoredNodesForRegion, getFileSystem, getFlushPressure, getFlushRequester, getFlushThroughputController, getFsTableDescriptors, getHeapMemoryManager, getInfoServer, getLastSequenceId, getLeases, getMasterAddressTracker, getMetaTableLocator, getMetrics, getMostLoadedRegions, getNonceManager, getNumberOfOnlineRegions, getOnlineRegion, getOnlineRegionsLocalContext, getOnlineTabl es, getRegion, getRegion, getRegionBlockLocations, getRegionByEncodedName, getRegionByEncodedName, getRegions, getRegions, getRegionServerAccounting, getRegionServerCoprocessorHost, getRegionServerCoprocessors, getRegionServerMetrics, getRegionServerRpcQuotaManager, getRegionServerSpaceQuotaManager, getRegionsInTransitionInRS, getReplicationSourceService, getRootDir, getRpcServer, getRSRpcServices, getSecureBulkLoadManager, getStartcode, getThreadWakeFrequency, getWAL, getWALFileSystem, getWalRoller, getWALRootDir, getWALs, handleReportForDutyResponse, initializeMemStoreChunkCreator, isAborted, isOnline, isStopped, isStopping, kill, movedRegionCleanerPeriod, onConfigurationChange, postOpenDeployTasks, regionLock, remoteProcedureComplete,
[38/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html index fa489c5..1fc3ca7 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html @@ -47,3488 +47,3492 @@ 039import java.util.Map.Entry; 040import java.util.Objects; 041import java.util.Set; -042import java.util.concurrent.CountDownLatch; -043import java.util.concurrent.ExecutionException; -044import java.util.concurrent.Future; -045import java.util.concurrent.TimeUnit; -046import java.util.concurrent.TimeoutException; -047import java.util.concurrent.atomic.AtomicInteger; -048import java.util.concurrent.atomic.AtomicReference; -049import java.util.function.Function; -050import java.util.regex.Pattern; -051import java.util.stream.Collectors; -052import javax.servlet.ServletException; -053import javax.servlet.http.HttpServlet; -054import javax.servlet.http.HttpServletRequest; -055import javax.servlet.http.HttpServletResponse; -056import org.apache.hadoop.conf.Configuration; -057import org.apache.hadoop.fs.Path; -058import org.apache.hadoop.hbase.ClusterMetrics; -059import org.apache.hadoop.hbase.ClusterMetrics.Option; -060import org.apache.hadoop.hbase.ClusterMetricsBuilder; -061import org.apache.hadoop.hbase.CoordinatedStateException; -062import org.apache.hadoop.hbase.DoNotRetryIOException; -063import org.apache.hadoop.hbase.HBaseIOException; -064import org.apache.hadoop.hbase.HBaseInterfaceAudience; -065import org.apache.hadoop.hbase.HConstants; -066import org.apache.hadoop.hbase.InvalidFamilyOperationException; -067import org.apache.hadoop.hbase.MasterNotRunningException; -068import org.apache.hadoop.hbase.MetaTableAccessor; -069import org.apache.hadoop.hbase.NamespaceDescriptor; -070import org.apache.hadoop.hbase.PleaseHoldException; -071import org.apache.hadoop.hbase.ReplicationPeerNotFoundException; -072import org.apache.hadoop.hbase.ServerLoad; -073import org.apache.hadoop.hbase.ServerMetricsBuilder; -074import org.apache.hadoop.hbase.ServerName; -075import org.apache.hadoop.hbase.TableDescriptors; -076import org.apache.hadoop.hbase.TableName; -077import org.apache.hadoop.hbase.TableNotDisabledException; -078import org.apache.hadoop.hbase.TableNotFoundException; -079import org.apache.hadoop.hbase.UnknownRegionException; -080import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; -081import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; -082import org.apache.hadoop.hbase.client.MasterSwitchType; -083import org.apache.hadoop.hbase.client.RegionInfo; -084import org.apache.hadoop.hbase.client.Result; -085import org.apache.hadoop.hbase.client.TableDescriptor; -086import org.apache.hadoop.hbase.client.TableDescriptorBuilder; -087import org.apache.hadoop.hbase.client.TableState; -088import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; -089import org.apache.hadoop.hbase.exceptions.DeserializationException; -090import org.apache.hadoop.hbase.exceptions.MergeRegionException; -091import org.apache.hadoop.hbase.executor.ExecutorType; -092import org.apache.hadoop.hbase.favored.FavoredNodesManager; -093import org.apache.hadoop.hbase.favored.FavoredNodesPromoter; -094import org.apache.hadoop.hbase.http.InfoServer; -095import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; -096import org.apache.hadoop.hbase.ipc.RpcServer; -097import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; -098import org.apache.hadoop.hbase.log.HBaseMarkers; -099import org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode; -100import org.apache.hadoop.hbase.master.assignment.AssignmentManager; -101import org.apache.hadoop.hbase.master.assignment.MergeTableRegionsProcedure; -102import org.apache.hadoop.hbase.master.assignment.RegionStates; -103import org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode; -104import org.apache.hadoop.hbase.master.balancer.BalancerChore; -105import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer; -106import org.apache.hadoop.hbase.master.balancer.ClusterStatusChore; -107import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory; -108import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; -109import org.apache.hadoop.hbase.master.cleaner.LogCleaner; -110import org.apache.hadoop.hbase.master.cleaner.ReplicationMetaCleaner; -111import org.apache.hadoop.hbase.master.locking.LockManager; -112import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan; -113import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType; -114import org.apache.hadoop.hbase.master.normalizer.RegionNormalizer; -115import
[42/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/devapidocs/org/apache/hadoop/hbase/regionserver/NoTagByteBufferChunkKeyValue.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/NoTagByteBufferChunkKeyValue.html b/devapidocs/org/apache/hadoop/hbase/regionserver/NoTagByteBufferChunkKeyValue.html index 752ff00..4ae055b 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/NoTagByteBufferChunkKeyValue.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/NoTagByteBufferChunkKeyValue.html @@ -253,18 +253,18 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--; title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--; title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--; title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--; title="class or interface in java.lang">notify, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notifyAll--; title="class or interface in java.lang">notifyAll, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--; title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-ex ternal=true#wait-long-" title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-int-; title="class or interface in java.lang">wait - + -Methods inherited from interfaceorg.apache.hadoop.hbase.ExtendedCell -getType +Methods inherited from interfaceorg.apache.hadoop.hbase.RawCell +checkForTagsLength, cloneTags, getTag, getTags - + -Methods inherited from interfaceorg.apache.hadoop.hbase.RawCell -checkForTagsLength, cloneTags, getTag, getTags +Methods inherited from interfaceorg.apache.hadoop.hbase.Cell +getType http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html index c21343b..cab1907 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html @@ -704,19 +704,19 @@ java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true; title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true; title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true; title="class or interface in java.io">Serializable) +org.apache.hadoop.hbase.regionserver.CompactingMemStore.IndexType +org.apache.hadoop.hbase.regionserver.HRegion.FlushResult.Result +org.apache.hadoop.hbase.regionserver.BloomType org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor.Status -org.apache.hadoop.hbase.regionserver.DefaultHeapMemoryTuner.StepDirection -org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope -org.apache.hadoop.hbase.regionserver.MemStoreCompactionStrategy.Action org.apache.hadoop.hbase.regionserver.FlushType org.apache.hadoop.hbase.regionserver.TimeRangeTracker.Type org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl.FactoryStorage -org.apache.hadoop.hbase.regionserver.BloomType -org.apache.hadoop.hbase.regionserver.CompactingMemStore.IndexType +org.apache.hadoop.hbase.regionserver.MemStoreCompactionStrategy.Action org.apache.hadoop.hbase.regionserver.ScannerContext.NextState +org.apache.hadoop.hbase.regionserver.DefaultHeapMemoryTuner.StepDirection org.apache.hadoop.hbase.regionserver.ScanType +org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope org.apache.hadoop.hbase.regionserver.Region.Operation -org.apache.hadoop.hbase.regionserver.HRegion.FlushResult.Result http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html index c2cf4f9..e97b095 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html @@ -130,8
[25/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/index-all.html -- diff --git a/testdevapidocs/index-all.html b/testdevapidocs/index-all.html index a11a540..e90f071 100644 --- a/testdevapidocs/index-all.html +++ b/testdevapidocs/index-all.html @@ -264,6 +264,30 @@ AbstractTestAsyncTableScan() - Constructor for class org.apache.hadoop.hbase.client.AbstractTestAsyncTableScan +AbstractTestCIOperationTimeout - Class in org.apache.hadoop.hbase.client + +Based class for testing operation timeout logic for ConnectionImplementation. + +AbstractTestCIOperationTimeout() - Constructor for class org.apache.hadoop.hbase.client.AbstractTestCIOperationTimeout + +AbstractTestCIRpcTimeout - Class in org.apache.hadoop.hbase.client + +Based class for testing rpc timeout logic for ConnectionImplementation. + +AbstractTestCIRpcTimeout() - Constructor for class org.apache.hadoop.hbase.client.AbstractTestCIRpcTimeout + +AbstractTestCITimeout - Class in org.apache.hadoop.hbase.client + +Based class for testing timeout logic for ConnectionImplementation. + +AbstractTestCITimeout() - Constructor for class org.apache.hadoop.hbase.client.AbstractTestCITimeout + +AbstractTestCITimeout.SleepAndFailFirstTime - Class in org.apache.hadoop.hbase.client + +This copro sleeps 20 second. + +AbstractTestCITimeout.SleepCoprocessor - Class in org.apache.hadoop.hbase.client + AbstractTestDateTieredCompactionPolicy - Class in org.apache.hadoop.hbase.regionserver AbstractTestDateTieredCompactionPolicy() - Constructor for class org.apache.hadoop.hbase.regionserver.AbstractTestDateTieredCompactionPolicy @@ -1467,9 +1491,9 @@ assertEquals(WAL.Entry, WAL.Entry) - Method in class org.apache.hadoop.hbase.replication.TestReplicationWALEntryFilters -assertEqualsWithJitter(long, long) - Static method in class org.apache.hadoop.hbase.client.TestHCM +assertEqualsWithJitter(long, long) - Static method in class org.apache.hadoop.hbase.client.TestConnectionImplementation -assertEqualsWithJitter(long, long, long) - Static method in class org.apache.hadoop.hbase.client.TestHCM +assertEqualsWithJitter(long, long, long) - Static method in class org.apache.hadoop.hbase.client.TestConnectionImplementation assertEqualTables(int, TableName, TableName) - Method in class org.apache.hadoop.hbase.mapreduce.TestSyncTable @@ -2514,7 +2538,7 @@ BlockingCompactionContext() - Constructor for class org.apache.hadoop.hbase.regionserver.TestCompaction.BlockingStoreMockMaker.BlockingCompactionContext -BlockingFilter() - Constructor for class org.apache.hadoop.hbase.client.TestHCM.BlockingFilter +BlockingFilter() - Constructor for class org.apache.hadoop.hbase.client.TestConnectionImplementation.BlockingFilter blockingStoreFiles - Static variable in class org.apache.hadoop.hbase.master.cleaner.TestSnapshotFromMaster @@ -5787,7 +5811,7 @@ CONN - Static variable in class org.apache.hadoop.hbase.client.TestAsyncTableScanRenewLease -conn - Static variable in class org.apache.hadoop.hbase.client.TestConnectionImplementation +conn - Static variable in class org.apache.hadoop.hbase.client.TestCIBadHostname CONN - Static variable in class org.apache.hadoop.hbase.client.TestMvccConsistentScanner @@ -7889,12 +7913,12 @@ csrfEnabled - Static variable in class org.apache.hadoop.hbase.rest.TestSchemaResource +ct - Static variable in class org.apache.hadoop.hbase.client.AbstractTestCITimeout.SleepAndFailFirstTime + ct - Static variable in class org.apache.hadoop.hbase.client.HConnectionTestingUtility.SleepAtFirstRpcCall ct - Static variable in class org.apache.hadoop.hbase.client.TestDropTimeoutRequest.SleepLongerAtFirstCoprocessor -ct - Static variable in class org.apache.hadoop.hbase.client.TestHCM.SleepAndFailFirstTime - ct - Static variable in class org.apache.hadoop.hbase.client.TestServerBusyException.SleepLongerAtFirstCoprocessor ctBeforeDelete - Variable in class org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver @@ -8535,9 +8559,9 @@ DEFAULT_SERVER_USERNAME - Static variable in class org.apache.hadoop.hbase.RESTApiClusterManager -DEFAULT_SLEEP_TIME - Static variable in class org.apache.hadoop.hbase.client.HConnectionTestingUtility.SleepAtFirstRpcCall +DEFAULT_SLEEP_TIME - Static variable in class org.apache.hadoop.hbase.client.AbstractTestCITimeout.SleepAndFailFirstTime -DEFAULT_SLEEP_TIME - Static variable in class org.apache.hadoop.hbase.client.TestHCM.SleepAndFailFirstTime +DEFAULT_SLEEP_TIME - Static variable in class org.apache.hadoop.hbase.client.HConnectionTestingUtility.SleepAtFirstRpcCall DEFAULT_START_KEY - Static variable in class org.apache.hadoop.hbase.util.LoadTestTool @@ -10227,10 +10251,28 @@ execute() - Method in class org.apache.hadoop.hbase.backup.TestBackupBase.IncrementalTableBackupClientForTest +execute(Table) - Method in class
[27/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-convergence.html -- diff --git a/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-convergence.html b/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-convergence.html index bf1e334..f55c3a6 100644 --- a/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-convergence.html +++ b/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-convergence.html @@ -7,7 +7,7 @@ - + Apache HBase - Exemplar for hbase-client archetype Reactor Dependency Convergence @@ -912,7 +912,7 @@ https://www.apache.org/;>The Apache Software Foundation. All rights reserved. - Last Published: 2018-01-11 + Last Published: 2018-01-12 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-info.html -- diff --git a/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-info.html b/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-info.html index bdccdc9..692a5be 100644 --- a/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-info.html +++ b/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-info.html @@ -7,7 +7,7 @@ - + Apache HBase - Exemplar for hbase-client archetype Dependency Information @@ -147,7 +147,7 @@ https://www.apache.org/;>The Apache Software Foundation. All rights reserved. - Last Published: 2018-01-11 + Last Published: 2018-01-12 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-management.html -- diff --git a/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-management.html b/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-management.html index 482ab8a..04acfbe 100644 --- a/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-management.html +++ b/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-management.html @@ -7,7 +7,7 @@ - + Apache HBase - Exemplar for hbase-client archetype Project Dependency Management @@ -810,7 +810,7 @@ https://www.apache.org/;>The Apache Software Foundation. All rights reserved. - Last Published: 2018-01-11 + Last Published: 2018-01-12 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/hbase-build-configuration/hbase-archetypes/hbase-client-project/index.html -- diff --git a/hbase-build-configuration/hbase-archetypes/hbase-client-project/index.html b/hbase-build-configuration/hbase-archetypes/hbase-client-project/index.html index 9bf6e8e..9d5eee1 100644 --- a/hbase-build-configuration/hbase-archetypes/hbase-client-project/index.html +++ b/hbase-build-configuration/hbase-archetypes/hbase-client-project/index.html @@ -7,7 +7,7 @@ - + Apache HBase - Exemplar for hbase-client archetype About @@ -119,7 +119,7 @@ https://www.apache.org/;>The Apache Software Foundation. All rights reserved. - Last Published: 2018-01-11 + Last Published: 2018-01-12 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/hbase-build-configuration/hbase-archetypes/hbase-client-project/integration.html -- diff --git a/hbase-build-configuration/hbase-archetypes/hbase-client-project/integration.html b/hbase-build-configuration/hbase-archetypes/hbase-client-project/integration.html index 06a6c6f..3b2cf47 100644 --- a/hbase-build-configuration/hbase-archetypes/hbase-client-project/integration.html +++ b/hbase-build-configuration/hbase-archetypes/hbase-client-project/integration.html @@ -7,7 +7,7 @@ - + Apache HBase - Exemplar for hbase-client archetype CI Management @@ -126,7 +126,7 @@
[35/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html index 163ade0..802fc2f 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html @@ -254,3512 +254,3505 @@ 246 protected MemStoreFlusher cacheFlusher; 247 248 protected HeapMemoryManager hMemManager; -249 protected CountDownLatch initLatch = null; -250 -251 /** -252 * Cluster connection to be shared by services. -253 * Initialized at server startup and closed when server shuts down. -254 * Clients must never close it explicitly. -255 */ -256 protected ClusterConnection clusterConnection; -257 -258 /* -259 * Long-living meta table locator, which is created when the server is started and stopped -260 * when server shuts down. References to this locator shall be used to perform according -261 * operations in EventHandlers. Primary reason for this decision is to make it mockable -262 * for tests. -263 */ -264 protected MetaTableLocator metaTableLocator; -265 -266 /** -267 * Go here to get table descriptors. -268 */ -269 protected TableDescriptors tableDescriptors; -270 -271 // Replication services. If no replication, this handler will be null. -272 protected ReplicationSourceService replicationSourceHandler; -273 protected ReplicationSinkService replicationSinkHandler; -274 -275 // Compactions -276 public CompactSplit compactSplitThread; -277 -278 /** -279 * Map of regions currently being served by this region server. Key is the -280 * encoded region name. All access should be synchronized. -281 */ -282 protected final MapString, HRegion onlineRegions = new ConcurrentHashMap(); -283 -284 /** -285 * Map of encoded region names to the DataNode locations they should be hosted on -286 * We store the value as InetSocketAddress since this is used only in HDFS -287 * API (create() that takes favored nodes as hints for placing file blocks). -288 * We could have used ServerName here as the value class, but we'd need to -289 * convert it to InetSocketAddress at some point before the HDFS API call, and -290 * it seems a bit weird to store ServerName since ServerName refers to RegionServers -291 * and here we really mean DataNode locations. -292 */ -293 protected final MapString, InetSocketAddress[] regionFavoredNodesMap = -294 new ConcurrentHashMap(); -295 -296 // Leases -297 protected Leases leases; -298 -299 // Instance of the hbase executor executorService. -300 protected ExecutorService executorService; -301 -302 // If false, the file system has become unavailable -303 protected volatile boolean fsOk; -304 protected HFileSystem fs; -305 protected HFileSystem walFs; -306 -307 // Set when a report to the master comes back with a message asking us to -308 // shutdown. Also set by call to stop when debugging or running unit tests -309 // of HRegionServer in isolation. -310 private volatile boolean stopped = false; -311 -312 // Go down hard. Used if file system becomes unavailable and also in -313 // debugging and unit tests. -314 private volatile boolean abortRequested; -315 -316 ConcurrentMapString, Integer rowlocks = new ConcurrentHashMap(); -317 -318 // A state before we go into stopped state. At this stage we're closing user -319 // space regions. -320 private boolean stopping = false; -321 -322 volatile boolean killed = false; -323 -324 protected final Configuration conf; -325 -326 private Path rootDir; -327 private Path walRootDir; -328 -329 protected final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); -330 -331 final int numRetries; -332 protected final int threadWakeFrequency; -333 protected final int msgInterval; -334 -335 protected final int numRegionsToReport; -336 -337 // Stub to do region server status calls against the master. -338 private volatile RegionServerStatusService.BlockingInterface rssStub; -339 private volatile LockService.BlockingInterface lockStub; -340 // RPC client. Used to make the stub above that does region server status checking. -341 RpcClient rpcClient; -342 -343 private RpcRetryingCallerFactory rpcRetryingCallerFactory; -344 private RpcControllerFactory rpcControllerFactory; -345 -346 private UncaughtExceptionHandler uncaughtExceptionHandler; -347 -348 // Info server. Default access so can be used by unit tests. REGIONSERVER -349 // is name of the webapp and the attribute name used stuffing this instance -350 // into web context. -351 protected
[08/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/src-html/org/apache/hadoop/hbase/TestCellUtil.TestCellScannable.html -- diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestCellUtil.TestCellScannable.html b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestCellUtil.TestCellScannable.html index 232ef56..bc3a6d0 100644 --- a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestCellUtil.TestCellScannable.html +++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestCellUtil.TestCellScannable.html @@ -29,610 +29,626 @@ 021import static org.junit.Assert.assertEquals; 022import static org.junit.Assert.assertFalse; 023import static org.junit.Assert.assertTrue; -024 -025import java.io.ByteArrayOutputStream; -026import java.io.IOException; -027import java.math.BigDecimal; -028import java.nio.ByteBuffer; -029import java.util.ArrayList; -030import java.util.List; -031import java.util.NavigableMap; -032import java.util.TreeMap; -033import org.apache.hadoop.hbase.testclassification.MiscTests; -034import org.apache.hadoop.hbase.testclassification.SmallTests; -035import org.apache.hadoop.hbase.util.Bytes; -036import org.junit.Assert; -037import org.junit.Test; -038import org.junit.experimental.categories.Category; -039 -040@Category({MiscTests.class, SmallTests.class}) -041public class TestCellUtil { -042 /** -043 * CellScannable used in test. Returns a {@link TestCellScanner} -044 */ -045 private static class TestCellScannable implements CellScannable { -046private final int cellsCount; -047TestCellScannable(final int cellsCount) { -048 this.cellsCount = cellsCount; -049} -050@Override -051public CellScanner cellScanner() { -052 return new TestCellScanner(this.cellsCount); -053} -054 } -055 -056 /** -057 * CellScanner used in test. -058 */ -059 private static class TestCellScanner implements CellScanner { -060private int count = 0; -061private Cell current = null; -062private final int cellsCount; -063 -064TestCellScanner(final int cellsCount) { -065 this.cellsCount = cellsCount; -066} -067 -068@Override -069public Cell current() { -070 return this.current; -071} -072 -073@Override -074public boolean advance() throws IOException { -075 if (this.count cellsCount) { -076this.current = new TestCell(this.count); -077this.count++; -078return true; -079 } -080 return false; -081} -082 } -083 -084 /** -085 * Cell used in test. Has row only. -086 */ -087 private static class TestCell implements Cell { -088private final byte [] row; -089 -090TestCell(final int i) { -091 this.row = Bytes.toBytes(i); -092} -093 -094@Override -095public byte[] getRowArray() { -096 return this.row; -097} -098 -099@Override -100public int getRowOffset() { -101 return 0; -102} -103 -104@Override -105public short getRowLength() { -106 return (short)this.row.length; -107} -108 -109@Override -110public byte[] getFamilyArray() { -111 // TODO Auto-generated method stub -112 return null; -113} -114 -115@Override -116public int getFamilyOffset() { -117 // TODO Auto-generated method stub -118 return 0; -119} -120 -121@Override -122public byte getFamilyLength() { -123 // TODO Auto-generated method stub -124 return 0; -125} -126 -127@Override -128public byte[] getQualifierArray() { -129 // TODO Auto-generated method stub -130 return null; -131} -132 -133@Override -134public int getQualifierOffset() { -135 // TODO Auto-generated method stub -136 return 0; -137} -138 -139@Override -140public int getQualifierLength() { -141 // TODO Auto-generated method stub -142 return 0; -143} -144 -145@Override -146public long getTimestamp() { -147 // TODO Auto-generated method stub -148 return 0; -149} -150 -151@Override -152public byte getTypeByte() { -153 // TODO Auto-generated method stub -154 return 0; -155} -156 -157@Override -158public byte[] getValueArray() { -159 // TODO Auto-generated method stub -160 return null; -161} -162 -163@Override -164public int getValueOffset() { -165 // TODO Auto-generated method stub -166 return 0; -167} -168 -169@Override -170public int getValueLength() { -171 // TODO Auto-generated method stub -172 return 0; -173} -174 -175@Override -176public byte[] getTagsArray() { -177 // TODO Auto-generated method stub -178 return null; -179} -180 -181@Override -182public int getTagsOffset() { -183 // TODO Auto-generated method stub -184 return 0; -185} -186 -187@Override -188public long
[29/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.html b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.html index 5b66298..ea864e9 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.html @@ -39,319 +39,329 @@ 031import java.util.concurrent.atomic.AtomicBoolean; 032 033import org.apache.hadoop.conf.Configuration; -034import org.apache.yetus.audience.InterfaceAudience; -035import org.apache.zookeeper.KeeperException; -036import org.apache.zookeeper.KeeperException.Code; -037import org.apache.zookeeper.ZooKeeper; -038import org.apache.zookeeper.data.Stat; -039import org.slf4j.Logger; -040import org.slf4j.LoggerFactory; -041import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; -042 -043/** -044 * A very simple read only zookeeper implementation without watcher support. -045 */ -046@InterfaceAudience.Private -047public final class ReadOnlyZKClient implements Closeable { -048 -049 private static final Logger LOG = LoggerFactory.getLogger(ReadOnlyZKClient.class); -050 -051 public static final String RECOVERY_RETRY = "zookeeper.recovery.retry"; -052 -053 private static final int DEFAULT_RECOVERY_RETRY = 30; -054 -055 public static final String RECOVERY_RETRY_INTERVAL_MILLIS = -056 "zookeeper.recovery.retry.intervalmill"; -057 -058 private static final int DEFAULT_RECOVERY_RETRY_INTERVAL_MILLIS = 1000; -059 -060 public static final String KEEPALIVE_MILLIS = "zookeeper.keep-alive.time"; -061 -062 private static final int DEFAULT_KEEPALIVE_MILLIS = 6; -063 -064 private static final EnumSetCode FAIL_FAST_CODES = EnumSet.of(Code.NOAUTH, Code.AUTHFAILED); -065 -066 private final String connectString; -067 -068 private final int sessionTimeoutMs; -069 -070 private final int maxRetries; -071 -072 private final int retryIntervalMs; -073 -074 private final int keepAliveTimeMs; -075 -076 private static abstract class Task implements Delayed { -077 -078protected long time = System.nanoTime(); -079 -080public boolean needZk() { -081 return false; -082} -083 -084public void exec(ZooKeeper zk) { -085} -086 -087public void connectFailed(IOException e) { -088} -089 -090public void closed(IOException e) { -091} -092 -093@Override -094public int compareTo(Delayed o) { -095 Task that = (Task) o; -096 int c = Long.compare(time, that.time); -097 if (c != 0) { -098return c; -099 } -100 return Integer.compare(System.identityHashCode(this), System.identityHashCode(that)); -101} -102 -103@Override -104public long getDelay(TimeUnit unit) { -105 return unit.convert(time - System.nanoTime(), TimeUnit.NANOSECONDS); -106} -107 } -108 -109 private static final Task CLOSE = new Task() { -110 }; -111 -112 private final DelayQueueTask tasks = new DelayQueue(); -113 -114 private final AtomicBoolean closed = new AtomicBoolean(false); -115 -116 private ZooKeeper zookeeper; -117 -118 private String getId() { -119return String.format("0x%08x", System.identityHashCode(this)); -120 } -121 -122 public ReadOnlyZKClient(Configuration conf) { -123this.connectString = ZKConfig.getZKQuorumServersString(conf); -124this.sessionTimeoutMs = conf.getInt(ZK_SESSION_TIMEOUT, DEFAULT_ZK_SESSION_TIMEOUT); -125this.maxRetries = conf.getInt(RECOVERY_RETRY, DEFAULT_RECOVERY_RETRY); -126this.retryIntervalMs = -127 conf.getInt(RECOVERY_RETRY_INTERVAL_MILLIS, DEFAULT_RECOVERY_RETRY_INTERVAL_MILLIS); -128this.keepAliveTimeMs = conf.getInt(KEEPALIVE_MILLIS, DEFAULT_KEEPALIVE_MILLIS); -129LOG.info("Start read only zookeeper connection " + getId() + " to " + connectString + -130", session timeout " + sessionTimeoutMs + " ms, retries " + maxRetries + -131", retry interval " + retryIntervalMs + " ms, keep alive " + keepAliveTimeMs + " ms"); -132Thread t = new Thread(this::run, "ReadOnlyZKClient"); -133t.setDaemon(true); -134t.start(); -135 } -136 -137 private abstract class ZKTaskT extends Task { -138 -139protected final String path; -140 -141private final CompletableFutureT future; -142 -143private final String operationType; -144 -145private int retries; -146 -147protected ZKTask(String path, CompletableFutureT future, String operationType) { -148 this.path = path; -149 this.future = future; -150 this.operationType = operationType; -151} -152 -153protected final void onComplete(ZooKeeper zk, int rc, T ret, boolean errorIfNoNode) { -154 tasks.add(new Task() { -155 -156
[28/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKMainServer.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKMainServer.html b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKMainServer.html index c725ebc..85096e8 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKMainServer.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKMainServer.html @@ -6,7 +6,7 @@ -001/** +001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information @@ -27,115 +27,107 @@ 019package org.apache.hadoop.hbase.zookeeper; 020 021import java.io.IOException; -022import java.util.concurrent.TimeUnit; -023 -024import org.apache.hadoop.conf.Configuration; -025import org.apache.hadoop.hbase.HBaseConfiguration; -026import org.apache.hadoop.hbase.HBaseInterfaceAudience; -027import org.apache.yetus.audience.InterfaceAudience; -028import org.apache.zookeeper.KeeperException; -029import org.apache.zookeeper.ZooKeeperMain; +022 +023import org.apache.hadoop.conf.Configuration; +024import org.apache.hadoop.hbase.HBaseConfiguration; +025import org.apache.hadoop.hbase.HBaseInterfaceAudience; +026import org.apache.yetus.audience.InterfaceAudience; +027import org.apache.zookeeper.KeeperException; +028import org.apache.zookeeper.ZooKeeperMain; +029 030 -031import org.apache.hbase.thirdparty.com.google.common.base.Stopwatch; -032 -033/** -034 * Tool for running ZookeeperMain from HBase by reading a ZooKeeper server -035 * from HBase XML configuration. -036 */ -037@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) -038public class ZKMainServer { -039 private static final String SERVER_ARG = "-server"; -040 -041 public String parse(final Configuration c) { -042return ZKConfig.getZKQuorumServersString(c); -043 } -044 -045 /** -046 * ZooKeeper 3.4.6 broke being able to pass commands on command line. -047 * See ZOOKEEPER-1897. This class is a hack to restore this faclity. -048 */ -049 private static class HACK_UNTIL_ZOOKEEPER_1897_ZooKeeperMain extends ZooKeeperMain { -050public HACK_UNTIL_ZOOKEEPER_1897_ZooKeeperMain(String[] args) -051throws IOException, InterruptedException { -052 super(args); -053 // Make sure we are connected before we proceed. Can take a while on some systems. If we -054 // run the command without being connected, we get ConnectionLoss KeeperErrorConnection... -055 Stopwatch stopWatch = Stopwatch.createStarted(); -056 while (!this.zk.getState().isConnected()) { -057Thread.sleep(1); -058if (stopWatch.elapsed(TimeUnit.SECONDS) 10) { -059 throw new InterruptedException("Failed connect after waiting " + -060 stopWatch.elapsed(TimeUnit.SECONDS) + "seconds; state=" + this.zk.getState() + -061 "; " + this.zk); -062} -063 } -064} -065 -066/** -067 * Run the command-line args passed. Calls System.exit when done. -068 * @throws KeeperException -069 * @throws IOException -070 * @throws InterruptedException -071 */ -072void runCmdLine() throws KeeperException, IOException, InterruptedException { -073 processCmd(this.cl); -074 System.exit(0); -075} +031/** +032 * Tool for running ZookeeperMain from HBase by reading a ZooKeeper server +033 * from HBase XML configuration. +034 */ +035@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) +036public class ZKMainServer { +037 private static final String SERVER_ARG = "-server"; +038 +039 public String parse(final Configuration c) { +040return ZKConfig.getZKQuorumServersString(c); +041 } +042 +043 /** +044 * ZooKeeper 3.4.6 broke being able to pass commands on command line. +045 * See ZOOKEEPER-1897. This class is a hack to restore this faclity. +046 */ +047 private static class HACK_UNTIL_ZOOKEEPER_1897_ZooKeeperMain extends ZooKeeperMain { +048public HACK_UNTIL_ZOOKEEPER_1897_ZooKeeperMain(String[] args) +049throws IOException, InterruptedException { +050 super(args); +051 // Make sure we are connected before we proceed. Can take a while on some systems. If we +052 // run the command without being connected, we get ConnectionLoss KeeperErrorConnection... +053 // Make it 30seconds. We dont' have a config in this context and zk doesn't have +054 // a timeout until after connection. 3ms is default for zk. +055 ZooKeeperHelper.ensureConnectedZooKeeper(this.zk, 3); +056} +057 +058/** +059 * Run the command-line args passed. Calls System.exit when done. +060 * @throws KeeperException +061 * @throws IOException +062 * @throws
[17/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/org/apache/hadoop/hbase/client/TestHCM.SleepAndFailFirstTime.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/client/TestHCM.SleepAndFailFirstTime.html b/testdevapidocs/org/apache/hadoop/hbase/client/TestHCM.SleepAndFailFirstTime.html deleted file mode 100644 index da314cd..000 --- a/testdevapidocs/org/apache/hadoop/hbase/client/TestHCM.SleepAndFailFirstTime.html +++ /dev/null @@ -1,534 +0,0 @@ -http://www.w3.org/TR/html4/loose.dtd;> - - - - - -TestHCM.SleepAndFailFirstTime (Apache HBase 3.0.0-SNAPSHOT Test API) - - - - - -var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10}; -var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; -var altColor = "altColor"; -var rowColor = "rowColor"; -var tableTab = "tableTab"; -var activeTableTab = "activeTableTab"; - - -JavaScript is disabled on your browser. - - - - - -Skip navigation links - - - - -Overview -Package -Class -Use -Tree -Deprecated -Index -Help - - - - -PrevClass -NextClass - - -Frames -NoFrames - - -AllClasses - - - - - - - -Summary: -Nested| -Field| -Constr| -Method - - -Detail: -Field| -Constr| -Method - - - - - - - - -org.apache.hadoop.hbase.client -Class TestHCM.SleepAndFailFirstTime - - - -http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">java.lang.Object - - -org.apache.hadoop.hbase.client.TestHCM.SleepAndFailFirstTime - - - - - - - -All Implemented Interfaces: -org.apache.hadoop.hbase.Coprocessor, org.apache.hadoop.hbase.coprocessor.RegionCoprocessor, org.apache.hadoop.hbase.coprocessor.RegionObserver - - -Enclosing class: -TestHCM - - - -public static class TestHCM.SleepAndFailFirstTime -extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object -implements org.apache.hadoop.hbase.coprocessor.RegionCoprocessor, org.apache.hadoop.hbase.coprocessor.RegionObserver -This copro sleeps 20 second. The first call it fails. The second time, it works. - - - - - - - - - - - -Nested Class Summary - - - - -Nested classes/interfaces inherited from interfaceorg.apache.hadoop.hbase.Coprocessor -org.apache.hadoop.hbase.Coprocessor.State - - - - - -Nested classes/interfaces inherited from interfaceorg.apache.hadoop.hbase.coprocessor.RegionObserver -org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType - - - - - - - - -Field Summary - -Fields - -Modifier and Type -Field and Description - - -(package private) static http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true; title="class or interface in java.util.concurrent.atomic">AtomicLong -ct - - -(package private) static long -DEFAULT_SLEEP_TIME - - -(package private) static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String -SLEEP_TIME_CONF_KEY - - -(package private) static http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true; title="class or interface in java.util.concurrent.atomic">AtomicLong -sleepTime - - - - - - -Fields inherited from interfaceorg.apache.hadoop.hbase.Coprocessor -PRIORITY_HIGHEST, PRIORITY_LOWEST, PRIORITY_SYSTEM, PRIORITY_USER, VERSION - - - - - - - - -Constructor Summary - -Constructors - -Constructor and Description - - -SleepAndFailFirstTime() - - - - - - - - - -Method Summary - -All MethodsInstance MethodsConcrete Methods - -Modifier and Type -Method and Description - - -http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true; title="class or interface in java.util">Optionalorg.apache.hadoop.hbase.coprocessor.RegionObserver -getRegionObserver() - - -void -postOpen(org.apache.hadoop.hbase.coprocessor.ObserverContextorg.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironmentc) - - -void -preDelete(org.apache.hadoop.hbase.coprocessor.ObserverContextorg.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironmente, - org.apache.hadoop.hbase.client.Deletedelete, - org.apache.hadoop.hbase.wal.WALEditedit, - org.apache.hadoop.hbase.client.Durabilitydurability) - - -void -preGetOp(org.apache.hadoop.hbase.coprocessor.ObserverContextorg.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironmente, -
[51/51] [partial] hbase-site git commit: Published site at .
Published site at . Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/0b638133 Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/0b638133 Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/0b638133 Branch: refs/heads/asf-site Commit: 0b638133acf74776d7bfe73530626d11cc21af20 Parents: ba96e30 Author: jenkinsAuthored: Fri Jan 12 15:31:09 2018 + Committer: jenkins Committed: Fri Jan 12 15:31:09 2018 + -- acid-semantics.html | 4 +- apache_hbase_reference_guide.pdf| 4 +- apidocs/index-all.html | 2 +- apidocs/org/apache/hadoop/hbase/Cell.html |16 +- .../src-html/org/apache/hadoop/hbase/Cell.html |77 +- book.html | 2 +- bulk-loads.html | 4 +- checkstyle-aggregate.html | 33864 + checkstyle.rss | 204 +- coc.html| 4 +- cygwin.html | 4 +- dependencies.html | 4 +- dependency-convergence.html | 4 +- dependency-info.html| 4 +- dependency-management.html | 4 +- devapidocs/allclasses-frame.html| 1 + devapidocs/allclasses-noframe.html | 1 + devapidocs/constant-values.html | 6 +- devapidocs/index-all.html |35 +- .../hadoop/hbase/ByteBufferExtendedCell.html| 4 +- .../hadoop/hbase/ByteBufferKeyOnlyKeyValue.html | 105 +- .../apache/hadoop/hbase/ByteBufferKeyValue.html | 9 +- .../org/apache/hadoop/hbase/Cell.Type.html |35 +- devapidocs/org/apache/hadoop/hbase/Cell.html|16 +- .../org/apache/hadoop/hbase/ExtendedCell.html |41 +- .../hadoop/hbase/IndividualBytesFieldCell.html | 9 +- .../hadoop/hbase/KeyValue.KeyOnlyKeyValue.html | 9 +- .../org/apache/hadoop/hbase/KeyValue.html | 9 +- .../hadoop/hbase/NoTagsByteBufferKeyValue.html | 9 +- .../org/apache/hadoop/hbase/NoTagsKeyValue.html | 9 +- ...ateCellUtil.EmptyByteBufferExtendedCell.html | 4 +- .../hadoop/hbase/PrivateCellUtil.EmptyCell.html | 4 +- ...llUtil.FirstOnRowByteBufferExtendedCell.html | 7 +- .../hbase/PrivateCellUtil.FirstOnRowCell.html | 7 +- ...vateCellUtil.FirstOnRowDeleteFamilyCell.html | 7 +- ...ellUtil.LastOnRowByteBufferExtendedCell.html | 7 +- .../hbase/PrivateCellUtil.LastOnRowCell.html| 7 +- ...llUtil.TagRewriteByteBufferExtendedCell.html | 9 +- .../hbase/PrivateCellUtil.TagRewriteCell.html | 9 +- ...alueAndTagRewriteByteBufferExtendedCell.html | 9 +- .../PrivateCellUtil.ValueAndTagRewriteCell.html | 9 +- .../org/apache/hadoop/hbase/RawCellBuilder.html |31 +- .../hadoop/hbase/RawCellBuilderFactory.html |10 +- .../apache/hadoop/hbase/SizeCachedKeyValue.html | 9 +- .../hadoop/hbase/SizeCachedNoTagsKeyValue.html | 9 +- .../hadoop/hbase/backup/package-tree.html | 4 +- .../hadoop/hbase/class-use/Cell.Type.html |37 +- .../hadoop/hbase/class-use/CellBuilder.html | 3 +- .../class-use/ZooKeeperConnectionException.html | 7 + .../hbase/client/Mutation.CellWrapper.html | 9 +- .../hadoop/hbase/client/package-tree.html |20 +- .../hadoop/hbase/executor/package-tree.html | 2 +- ...nlyFilter.KeyOnlyByteBufferExtendedCell.html | 7 +- .../hbase/filter/KeyOnlyFilter.KeyOnlyCell.html | 6 +- .../hadoop/hbase/filter/package-tree.html |10 +- ...BlockEncoder.OffheapDecodedExtendedCell.html | 9 +- ...feredDataBlockEncoder.OnheapDecodedCell.html | 9 +- .../hadoop/hbase/io/hfile/package-tree.html | 6 +- .../hadoop/hbase/mapreduce/package-tree.html| 2 +- .../master/HMaster.InitializationMonitor.html |20 +- .../hbase/master/HMaster.RedirectServlet.html |12 +- .../org/apache/hadoop/hbase/master/HMaster.html | 466 +- .../master/HMasterCommandLine.LocalHMaster.html | 4 +- .../hbase/master/balancer/package-tree.html | 2 +- .../hadoop/hbase/master/package-tree.html | 6 +- .../hbase/master/procedure/package-tree.html| 2 +- .../replication/ReplicationPeerManager.html |69 +- .../apache/hadoop/hbase/package-summary.html| 3 +- .../org/apache/hadoop/hbase/package-tree.html |18 +- .../org/apache/hadoop/hbase/package-use.html| 9 +- .../hadoop/hbase/procedure2/package-tree.html | 4 +- .../hadoop/hbase/quotas/package-tree.html | 6 +-
[43/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html index 74904fa..613c5d7 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html @@ -18,7 +18,7 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":9,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":9,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":9,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109 ":10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":9,"i127":10,"i128":9,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":9,"i148":10,"i149":9,"i150":10,"i151":10,"i152":10,"i153":10,"i154":10,"i155":10,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":10,"i167":10,"i168":10}; +var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":9,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":9,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":9,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109 ":10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":9,"i127":10,"i128":9,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":9,"i147":10,"i148":9,"i149":10,"i150":10,"i151":10,"i152":10,"i153":10,"i154":10,"i155":10,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":10,"i167":10}; var tabs = {65535:["t0","All Methods"],1:["t1","Static Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -294,131 +294,127 @@ implements INIT_PAUSE_TIME_MS -protected http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CountDownLatch.html?is-external=true; title="class or interface in java.util.concurrent">CountDownLatch -initLatch - - (package private) boolean killed - + protected Leases leases - + protected http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/ReentrantReadWriteLock.html?is-external=true; title="class or interface in java.util.concurrent.locks">ReentrantReadWriteLock lock - + private org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService.BlockingInterface lockStub - + private static org.slf4j.Logger LOG - + protected static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String MASTER_HOSTNAME_KEY - + private MasterAddressTracker masterAddressTracker - + private boolean masterless True if this RegionServer is coming up in a cluster where there is no Master;
[12/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/org/apache/hadoop/hbase/client/package-use.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/client/package-use.html b/testdevapidocs/org/apache/hadoop/hbase/client/package-use.html index e2ff53a..e89ffa5 100644 --- a/testdevapidocs/org/apache/hadoop/hbase/client/package-use.html +++ b/testdevapidocs/org/apache/hadoop/hbase/client/package-use.html @@ -110,104 +110,119 @@ AbstractTestAsyncTableScan -AbstractTestResultScannerCursor +AbstractTestCIOperationTimeout +Based class for testing operation timeout logic for ConnectionImplementation. + -AbstractTestScanCursor +AbstractTestCIRpcTimeout +Based class for testing rpc timeout logic for ConnectionImplementation. + -AbstractTestShell +AbstractTestCITimeout +Based class for testing timeout logic for ConnectionImplementation. + -ColumnCountOnRowFilter +AbstractTestResultScannerCursor +AbstractTestScanCursor + + +AbstractTestShell + + +ColumnCountOnRowFilter + + DoNothingAsyncRegistry Registry that does nothing. - + TestAsyncAdminBase Class to test AsyncAdmin. - + TestAsyncProcess.MyAsyncProcess - + TestAsyncProcess.MyAsyncProcessWithReplicas - + TestAsyncProcess.MyConnectionImpl Returns our async process. - + TestAsyncProcess.ResponseGenerator - + TestAsyncProcess.RR After reading TheDailyWtf, I always wanted to create a MyBoolean enum like this! - + TestAsyncTableGetMultiThreaded Will split the table, and move region randomly when testing. - + TestAsyncTableScanMetrics.ScanWithMetrics - + TestBlockEvictionFromClient.CustomInnerRegionObserver - + TestBlockEvictionFromClient.GetThread - + TestBlockEvictionFromClient.MultiGetThread - + TestBlockEvictionFromClient.ScanThread - + TestCloneSnapshotFromClient Test clone snapshots from the client - + TestFromClientSide Run tests that use the HBase clients; Table. - + TestFromClientSideScanExcpetion - + TestHBaseAdminNoCluster.MethodCaller - + TestIncrementsFromClientSide Run Increment tests that use the HBase clients; HTable. - + TestMetaCache.ExceptionInjector - + TestMetaCache.FakeRSRpcServices - + TestRestoreSnapshotFromClient Test restore snapshots from the client - + TestSnapshotCloneIndependence Test to verify that the cloned table is independent of the table from which it was cloned - + TestSnapshotFromClient Test create/using/deleting snapshots from the client http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.html b/testdevapidocs/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.html index 23b0ee4..5022712 100644 --- a/testdevapidocs/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.html +++ b/testdevapidocs/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.html @@ -18,7 +18,7 @@ catch(err) { } //--> -var methods = {"i0":9,"i1":10,"i2":9,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10}; +var methods = {"i0":9,"i1":10,"i2":9,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10}; var tabs = {65535:["t0","All Methods"],1:["t1","Static Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab"; -public class TestReplicationAdmin +public class TestReplicationAdmin extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object Unit testing of ReplicationAdmin @@ -253,32 +253,40 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? void +testPeerClusterKey() + + +void testPeerConfig() Tests that the peer configuration used by ReplicationAdmin contains all the peer's properties. - + void testPeerConfigConflict() - + void testPeerExcludeNamespaces() - + void testPeerExcludeTableCFs() - + +void +testPeerReplicationEndpointImpl() + + void testRemovePeerTableCFs() - + void testSetPeerNamespaces() - + void testSetReplicateAllUserTables() @@ -310,7 +318,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? LOG -private static finalorg.slf4j.Logger LOG +private static finalorg.slf4j.Logger LOG @@ -319,7 +327,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? TEST_UTIL -private static finalHBaseTestingUtility
[20/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/org/apache/hadoop/hbase/client/TestCIGetRpcTimeout.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/client/TestCIGetRpcTimeout.html b/testdevapidocs/org/apache/hadoop/hbase/client/TestCIGetRpcTimeout.html new file mode 100644 index 000..6f63255 --- /dev/null +++ b/testdevapidocs/org/apache/hadoop/hbase/client/TestCIGetRpcTimeout.html @@ -0,0 +1,333 @@ +http://www.w3.org/TR/html4/loose.dtd;> + + + + + +TestCIGetRpcTimeout (Apache HBase 3.0.0-SNAPSHOT Test API) + + + + + +var methods = {"i0":10}; +var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; +var altColor = "altColor"; +var rowColor = "rowColor"; +var tableTab = "tableTab"; +var activeTableTab = "activeTableTab"; + + +JavaScript is disabled on your browser. + + + + + +Skip navigation links + + + + +Overview +Package +Class +Use +Tree +Deprecated +Index +Help + + + + +PrevClass +NextClass + + +Frames +NoFrames + + +AllClasses + + + + + + + +Summary: +Nested| +Field| +Constr| +Method + + +Detail: +Field| +Constr| +Method + + + + + + + + +org.apache.hadoop.hbase.client +Class TestCIGetRpcTimeout + + + +http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">java.lang.Object + + +org.apache.hadoop.hbase.client.AbstractTestCITimeout + + +org.apache.hadoop.hbase.client.AbstractTestCIRpcTimeout + + +org.apache.hadoop.hbase.client.TestCIGetRpcTimeout + + + + + + + + + + + + +public class TestCIGetRpcTimeout +extends AbstractTestCIRpcTimeout + + + + + + + + + + + +Nested Class Summary + + + + +Nested classes/interfaces inherited from classorg.apache.hadoop.hbase.client.AbstractTestCITimeout +AbstractTestCITimeout.SleepAndFailFirstTime, AbstractTestCITimeout.SleepCoprocessor + + + + + + + + +Field Summary + + + + +Fields inherited from classorg.apache.hadoop.hbase.client.AbstractTestCITimeout +FAM_NAM, name, TEST_UTIL + + + + + + + + +Constructor Summary + +Constructors + +Constructor and Description + + +TestCIGetRpcTimeout() + + + + + + + + + +Method Summary + +All MethodsInstance MethodsConcrete Methods + +Modifier and Type +Method and Description + + +protected void +execute(org.apache.hadoop.hbase.client.Tabletable) + + + + + + +Methods inherited from classorg.apache.hadoop.hbase.client.AbstractTestCIRpcTimeout +setUp, testRpcTimeout + + + + + +Methods inherited from classorg.apache.hadoop.hbase.client.AbstractTestCITimeout +setUpBeforeClass, tearDownAfterClass + + + + + +Methods inherited from classjava.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object +http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--; title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-; title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--; title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--; title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--; title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--; title="class or interface in java.lang">notify, http://docs.oracle.com/javase/8/docs/api/java/lang /Object.html?is-external=true#notifyAll--" title="class or interface in java.lang">notifyAll, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--; title="class or interface in java.lang">toString, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--; title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-; title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-int-; title="class or interface in java.lang">wait + + + + + + + + + + + + + + +Constructor Detail + + + + + +TestCIGetRpcTimeout +publicTestCIGetRpcTimeout() + + + + + + + + + +Method Detail + + + + + +execute
[03/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestConnectionImplementation.BlockingFilter.html -- diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestConnectionImplementation.BlockingFilter.html b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestConnectionImplementation.BlockingFilter.html new file mode 100644 index 000..a1ce1bf --- /dev/null +++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestConnectionImplementation.BlockingFilter.html @@ -0,0 +1,1116 @@ +http://www.w3.org/TR/html4/loose.dtd;> + + +Source code + + + + +001/* +002 * Licensed to the Apache Software Foundation (ASF) under one +003 * or more contributor license agreements. See the NOTICE file +004 * distributed with this work for additional information +005 * regarding copyright ownership. The ASF licenses this file +006 * to you under the Apache License, Version 2.0 (the +007 * "License"); you may not use this file except in compliance +008 * with the License. You may obtain a copy of the License at +009 * +010 * http://www.apache.org/licenses/LICENSE-2.0 +011 * +012 * Unless required by applicable law or agreed to in writing, software +013 * distributed under the License is distributed on an "AS IS" BASIS, +014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +015 * See the License for the specific language governing permissions and +016 * limitations under the License. +017 */ +018package org.apache.hadoop.hbase.client; +019 +020import static org.junit.Assert.assertEquals; +021import static org.junit.Assert.assertFalse; +022import static org.junit.Assert.assertNotNull; +023import static org.junit.Assert.assertNull; +024import static org.junit.Assert.assertTrue; +025 +026import java.io.IOException; +027import java.lang.reflect.Field; +028import java.lang.reflect.Modifier; +029import java.net.SocketTimeoutException; +030import java.util.ArrayList; +031import java.util.List; +032import java.util.concurrent.ExecutorService; +033import java.util.concurrent.SynchronousQueue; +034import java.util.concurrent.ThreadLocalRandom; +035import java.util.concurrent.ThreadPoolExecutor; +036import java.util.concurrent.TimeUnit; +037import java.util.concurrent.atomic.AtomicBoolean; +038import java.util.concurrent.atomic.AtomicInteger; +039import java.util.concurrent.atomic.AtomicReference; +040import org.apache.hadoop.conf.Configuration; +041import org.apache.hadoop.hbase.CategoryBasedTimeout; +042import org.apache.hadoop.hbase.Cell; +043import org.apache.hadoop.hbase.HBaseTestingUtility; +044import org.apache.hadoop.hbase.HConstants; +045import org.apache.hadoop.hbase.HRegionLocation; +046import org.apache.hadoop.hbase.RegionLocations; +047import org.apache.hadoop.hbase.ServerName; +048import org.apache.hadoop.hbase.TableName; +049import org.apache.hadoop.hbase.Waiter; +050import org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil; +051import org.apache.hadoop.hbase.exceptions.DeserializationException; +052import org.apache.hadoop.hbase.exceptions.RegionMovedException; +053import org.apache.hadoop.hbase.filter.Filter; +054import org.apache.hadoop.hbase.filter.FilterBase; +055import org.apache.hadoop.hbase.ipc.RpcClient; +056import org.apache.hadoop.hbase.master.HMaster; +057import org.apache.hadoop.hbase.regionserver.HRegion; +058import org.apache.hadoop.hbase.regionserver.HRegionServer; +059import org.apache.hadoop.hbase.regionserver.Region; +060import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException; +061import org.apache.hadoop.hbase.testclassification.LargeTests; +062import org.apache.hadoop.hbase.util.Bytes; +063import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +064import org.apache.hadoop.hbase.util.JVMClusterUtil; +065import org.apache.hadoop.hbase.util.ManualEnvironmentEdge; +066import org.apache.hadoop.hbase.util.Threads; +067import org.junit.AfterClass; +068import org.junit.Assert; +069import org.junit.BeforeClass; +070import org.junit.Ignore; +071import org.junit.Rule; +072import org.junit.Test; +073import org.junit.experimental.categories.Category; +074import org.junit.rules.TestName; +075import org.junit.rules.TestRule; +076import org.slf4j.Logger; +077import org.slf4j.LoggerFactory; +078 +079import org.apache.hbase.thirdparty.com.google.common.collect.Lists; +080 +081/** +082 * This class is for testing HBaseConnectionManager features +083 */ +084@Category({LargeTests.class}) +085public class TestConnectionImplementation { +086 @Rule +087 public final TestRule timeout = CategoryBasedTimeout.builder().withTimeout(this.getClass()) +088 .withLookingForStuckThread(true).build(); +089 private static final Logger LOG = LoggerFactory.getLogger(TestConnectionImplementation.class); +090 private final static HBaseTestingUtility
[14/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/org/apache/hadoop/hbase/client/class-use/TestHCM.SleepCoprocessor.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/client/class-use/TestHCM.SleepCoprocessor.html b/testdevapidocs/org/apache/hadoop/hbase/client/class-use/TestHCM.SleepCoprocessor.html deleted file mode 100644 index e83a425..000 --- a/testdevapidocs/org/apache/hadoop/hbase/client/class-use/TestHCM.SleepCoprocessor.html +++ /dev/null @@ -1,125 +0,0 @@ -http://www.w3.org/TR/html4/loose.dtd;> - - - - - -Uses of Class org.apache.hadoop.hbase.client.TestHCM.SleepCoprocessor (Apache HBase 3.0.0-SNAPSHOT Test API) - - - - - - - -JavaScript is disabled on your browser. - - - - - -Skip navigation links - - - - -Overview -Package -Class -Use -Tree -Deprecated -Index -Help - - - - -Prev -Next - - -Frames -NoFrames - - -AllClasses - - - - - - - - - - -Uses of Classorg.apache.hadoop.hbase.client.TestHCM.SleepCoprocessor - -No usage of org.apache.hadoop.hbase.client.TestHCM.SleepCoprocessor - - - - -Skip navigation links - - - - -Overview -Package -Class -Use -Tree -Deprecated -Index -Help - - - - -Prev -Next - - -Frames -NoFrames - - -AllClasses - - - - - - - - - -Copyright 20072018 https://www.apache.org/;>The Apache Software Foundation. All rights reserved. - - http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/org/apache/hadoop/hbase/client/class-use/TestHCM.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/client/class-use/TestHCM.html b/testdevapidocs/org/apache/hadoop/hbase/client/class-use/TestHCM.html deleted file mode 100644 index f44cef5..000 --- a/testdevapidocs/org/apache/hadoop/hbase/client/class-use/TestHCM.html +++ /dev/null @@ -1,125 +0,0 @@ -http://www.w3.org/TR/html4/loose.dtd;> - - - - - -Uses of Class org.apache.hadoop.hbase.client.TestHCM (Apache HBase 3.0.0-SNAPSHOT Test API) - - - - - - - -JavaScript is disabled on your browser. - - - - - -Skip navigation links - - - - -Overview -Package -Class -Use -Tree -Deprecated -Index -Help - - - - -Prev -Next - - -Frames -NoFrames - - -AllClasses - - - - - - - - - - -Uses of Classorg.apache.hadoop.hbase.client.TestHCM - -No usage of org.apache.hadoop.hbase.client.TestHCM - - - - -Skip navigation links - - - - -Overview -Package -Class -Use -Tree -Deprecated -Index -Help - - - - -Prev -Next - - -Frames -NoFrames - - -AllClasses - - - - - - - - - -Copyright 20072018 https://www.apache.org/;>The Apache Software Foundation. All rights reserved. - - http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/org/apache/hadoop/hbase/client/package-frame.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/client/package-frame.html b/testdevapidocs/org/apache/hadoop/hbase/client/package-frame.html index 51cba7a..4e2f3d0 100644 --- a/testdevapidocs/org/apache/hadoop/hbase/client/package-frame.html +++ b/testdevapidocs/org/apache/hadoop/hbase/client/package-frame.html @@ -20,6 +20,11 @@ Classes AbstractTestAsyncTableScan +AbstractTestCIOperationTimeout +AbstractTestCIRpcTimeout +AbstractTestCITimeout +AbstractTestCITimeout.SleepAndFailFirstTime +AbstractTestCITimeout.SleepCoprocessor AbstractTestResultScannerCursor AbstractTestScanCursor AbstractTestScanCursor.SparseFilter @@ -107,6 +112,15 @@ TestBufferedMutatorParams
[32/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html index 163ade0..802fc2f 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html @@ -254,3512 +254,3505 @@ 246 protected MemStoreFlusher cacheFlusher; 247 248 protected HeapMemoryManager hMemManager; -249 protected CountDownLatch initLatch = null; -250 -251 /** -252 * Cluster connection to be shared by services. -253 * Initialized at server startup and closed when server shuts down. -254 * Clients must never close it explicitly. -255 */ -256 protected ClusterConnection clusterConnection; -257 -258 /* -259 * Long-living meta table locator, which is created when the server is started and stopped -260 * when server shuts down. References to this locator shall be used to perform according -261 * operations in EventHandlers. Primary reason for this decision is to make it mockable -262 * for tests. -263 */ -264 protected MetaTableLocator metaTableLocator; -265 -266 /** -267 * Go here to get table descriptors. -268 */ -269 protected TableDescriptors tableDescriptors; -270 -271 // Replication services. If no replication, this handler will be null. -272 protected ReplicationSourceService replicationSourceHandler; -273 protected ReplicationSinkService replicationSinkHandler; -274 -275 // Compactions -276 public CompactSplit compactSplitThread; -277 -278 /** -279 * Map of regions currently being served by this region server. Key is the -280 * encoded region name. All access should be synchronized. -281 */ -282 protected final MapString, HRegion onlineRegions = new ConcurrentHashMap(); -283 -284 /** -285 * Map of encoded region names to the DataNode locations they should be hosted on -286 * We store the value as InetSocketAddress since this is used only in HDFS -287 * API (create() that takes favored nodes as hints for placing file blocks). -288 * We could have used ServerName here as the value class, but we'd need to -289 * convert it to InetSocketAddress at some point before the HDFS API call, and -290 * it seems a bit weird to store ServerName since ServerName refers to RegionServers -291 * and here we really mean DataNode locations. -292 */ -293 protected final MapString, InetSocketAddress[] regionFavoredNodesMap = -294 new ConcurrentHashMap(); -295 -296 // Leases -297 protected Leases leases; -298 -299 // Instance of the hbase executor executorService. -300 protected ExecutorService executorService; -301 -302 // If false, the file system has become unavailable -303 protected volatile boolean fsOk; -304 protected HFileSystem fs; -305 protected HFileSystem walFs; -306 -307 // Set when a report to the master comes back with a message asking us to -308 // shutdown. Also set by call to stop when debugging or running unit tests -309 // of HRegionServer in isolation. -310 private volatile boolean stopped = false; -311 -312 // Go down hard. Used if file system becomes unavailable and also in -313 // debugging and unit tests. -314 private volatile boolean abortRequested; -315 -316 ConcurrentMapString, Integer rowlocks = new ConcurrentHashMap(); -317 -318 // A state before we go into stopped state. At this stage we're closing user -319 // space regions. -320 private boolean stopping = false; -321 -322 volatile boolean killed = false; -323 -324 protected final Configuration conf; -325 -326 private Path rootDir; -327 private Path walRootDir; -328 -329 protected final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); -330 -331 final int numRetries; -332 protected final int threadWakeFrequency; -333 protected final int msgInterval; -334 -335 protected final int numRegionsToReport; -336 -337 // Stub to do region server status calls against the master. -338 private volatile RegionServerStatusService.BlockingInterface rssStub; -339 private volatile LockService.BlockingInterface lockStub; -340 // RPC client. Used to make the stub above that does region server status checking. -341 RpcClient rpcClient; -342 -343 private RpcRetryingCallerFactory rpcRetryingCallerFactory; -344 private RpcControllerFactory rpcControllerFactory; -345 -346 private UncaughtExceptionHandler uncaughtExceptionHandler; -347 -348 // Info server. Default access so can be used by unit tests. REGIONSERVER -349 // is name of the webapp and the attribute name used stuffing this instance -350 // into web context. -351 protected InfoServer infoServer; -352 private JvmPauseMonitor pauseMonitor; -353 -354 /**
hbase-site git commit: INFRA-10751 Empty commit
Repository: hbase-site Updated Branches: refs/heads/asf-site 0b638133a -> 27d54dda8 INFRA-10751 Empty commit Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/27d54dda Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/27d54dda Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/27d54dda Branch: refs/heads/asf-site Commit: 27d54dda822ed6f0d895cffd2701f64adfb8fab9 Parents: 0b63813 Author: jenkinsAuthored: Fri Jan 12 15:31:50 2018 + Committer: jenkins Committed: Fri Jan 12 15:31:50 2018 + -- --
[37/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/devapidocs/src-html/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.html index 928cae1..0dbf815 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.html @@ -140,216 +140,226 @@ 132checkPeerConfig(peerConfig); 133ReplicationPeerDescription desc = checkPeerExists(peerId); 134ReplicationPeerConfig oldPeerConfig = desc.getPeerConfig(); -135if (!StringUtils.isBlank(peerConfig.getClusterKey()) -136 !peerConfig.getClusterKey().equals(oldPeerConfig.getClusterKey())) { -137 throw new DoNotRetryIOException( -138 "Changing the cluster key on an existing peer is not allowed. Existing key '" + -139oldPeerConfig.getClusterKey() + "' for peer " + peerId + " does not match new key '" + -140peerConfig.getClusterKey() + "'"); -141} -142 -143if (!StringUtils.isBlank(peerConfig.getReplicationEndpointImpl()) -144 !peerConfig.getReplicationEndpointImpl().equals(oldPeerConfig.getReplicationEndpointImpl())) { -145 throw new DoNotRetryIOException("Changing the replication endpoint implementation class " + -146"on an existing peer is not allowed. Existing class '" + -147 oldPeerConfig.getReplicationEndpointImpl() + "' for peer " + peerId + -148" does not match new class '" + peerConfig.getReplicationEndpointImpl() + "'"); -149} -150 } -151 -152 public void addPeer(String peerId, ReplicationPeerConfig peerConfig, boolean enabled) -153 throws ReplicationException { -154if (peers.containsKey(peerId)) { -155 // this should be a retry, just return -156 return; -157} -158ReplicationPeerConfig copiedPeerConfig = ReplicationPeerConfig.newBuilder(peerConfig).build(); -159peerStorage.addPeer(peerId, copiedPeerConfig, enabled); -160peers.put(peerId, new ReplicationPeerDescription(peerId, enabled, copiedPeerConfig)); -161 } -162 -163 public void removePeer(String peerId) throws ReplicationException { -164if (!peers.containsKey(peerId)) { -165 // this should be a retry, just return -166 return; -167} -168peerStorage.removePeer(peerId); -169peers.remove(peerId); -170 } -171 -172 private void setPeerState(String peerId, boolean enabled) throws ReplicationException { -173ReplicationPeerDescription desc = peers.get(peerId); -174if (desc.isEnabled() == enabled) { -175 // this should be a retry, just return -176 return; -177} -178peerStorage.setPeerState(peerId, enabled); -179peers.put(peerId, new ReplicationPeerDescription(peerId, enabled, desc.getPeerConfig())); -180 } -181 -182 public void enablePeer(String peerId) throws ReplicationException { -183setPeerState(peerId, true); -184 } -185 -186 public void disablePeer(String peerId) throws ReplicationException { -187setPeerState(peerId, false); -188 } -189 -190 public void updatePeerConfig(String peerId, ReplicationPeerConfig peerConfig) -191 throws ReplicationException { -192// the checking rules are too complicated here so we give up checking whether this is a retry. -193ReplicationPeerDescription desc = peers.get(peerId); -194ReplicationPeerConfig oldPeerConfig = desc.getPeerConfig(); -195ReplicationPeerConfigBuilder newPeerConfigBuilder = -196 ReplicationPeerConfig.newBuilder(peerConfig); -197// we need to use the new conf to overwrite the old one. -198 newPeerConfigBuilder.putAllConfiguration(oldPeerConfig.getConfiguration()); -199 newPeerConfigBuilder.putAllConfiguration(peerConfig.getConfiguration()); -200 newPeerConfigBuilder.putAllConfiguration(oldPeerConfig.getConfiguration()); -201 newPeerConfigBuilder.putAllConfiguration(peerConfig.getConfiguration()); -202ReplicationPeerConfig newPeerConfig = newPeerConfigBuilder.build(); -203peerStorage.updatePeerConfig(peerId, newPeerConfig); -204peers.put(peerId, new ReplicationPeerDescription(peerId, desc.isEnabled(), newPeerConfig)); -205 } -206 -207 public ListReplicationPeerDescription listPeers(Pattern pattern) { -208if (pattern == null) { -209 return new ArrayList(peers.values()); -210} -211return peers.values().stream().filter(r - pattern.matcher(r.getPeerId()).matches()) -212.collect(Collectors.toList()); -213 } -214 -215 public OptionalReplicationPeerConfig getPeerConfig(String peerId) { -216ReplicationPeerDescription desc = peers.get(peerId); -217return desc != null ?
[02/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestConnectionImplementation.html -- diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestConnectionImplementation.html b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestConnectionImplementation.html index 3068847..a1ce1bf 100644 --- a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestConnectionImplementation.html +++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestConnectionImplementation.html @@ -23,83 +23,1033 @@ 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ -018 -019package org.apache.hadoop.hbase.client; -020 -021import static org.junit.Assert.fail; -022 -023import org.apache.hadoop.hbase.HBaseTestingUtility; -024import org.apache.hadoop.hbase.HConstants; -025import org.apache.hadoop.hbase.ServerName; -026import org.apache.hadoop.hbase.testclassification.ClientTests; -027import org.apache.hadoop.hbase.testclassification.MediumTests; -028import org.junit.AfterClass; -029import org.junit.BeforeClass; -030import org.junit.Test; -031import org.junit.experimental.categories.Category; -032 -033import java.net.UnknownHostException; -034 -035/** -036 * Tests that we fail fast when hostname resolution is not working and do not cache -037 * unresolved InetSocketAddresses. -038 */ -039@Category({MediumTests.class, ClientTests.class}) -040public class TestConnectionImplementation { -041 private static HBaseTestingUtility testUtil; -042 private static ConnectionImplementation conn; -043 -044 @BeforeClass -045 public static void setupBeforeClass() throws Exception { -046testUtil = HBaseTestingUtility.createLocalHTU(); -047testUtil.startMiniCluster(); -048conn = (ConnectionImplementation) testUtil.getConnection(); -049 } -050 -051 @AfterClass -052 public static void teardownAfterClass() throws Exception { -053conn.close(); -054testUtil.shutdownMiniCluster(); -055 } -056 -057 @Test(expected = UnknownHostException.class) -058 public void testGetAdminBadHostname() throws Exception { -059// verify that we can get an instance with the cluster hostname -060ServerName master = testUtil.getHBaseCluster().getMaster().getServerName(); -061try { -062 conn.getAdmin(master); -063} catch (UnknownHostException uhe) { -064 fail("Obtaining admin to the cluster master should have succeeded"); -065} -066 -067// test that we fail to get a client to an unresolvable hostname, which -068// means it won't be cached -069ServerName badHost = -070 ServerName.valueOf("unknownhost.invalid:" + HConstants.DEFAULT_MASTER_PORT, -071System.currentTimeMillis()); -072conn.getAdmin(badHost); -073fail("Obtaining admin to unresolvable hostname should have failed"); -074 } -075 -076 @Test(expected = UnknownHostException.class) -077 public void testGetClientBadHostname() throws Exception { -078// verify that we can get an instance with the cluster hostname -079ServerName rs = testUtil.getHBaseCluster().getRegionServer(0).getServerName(); -080try { -081 conn.getClient(rs); -082} catch (UnknownHostException uhe) { -083 fail("Obtaining client to the cluster regionserver should have succeeded"); -084} -085 -086// test that we fail to get a client to an unresolvable hostname, which -087// means it won't be cached -088ServerName badHost = -089 ServerName.valueOf("unknownhost.invalid:" + HConstants.DEFAULT_REGIONSERVER_PORT, -090System.currentTimeMillis()); -091conn.getAdmin(badHost); -092fail("Obtaining client to unresolvable hostname should have failed"); -093 } -094} +018package org.apache.hadoop.hbase.client; +019 +020import static org.junit.Assert.assertEquals; +021import static org.junit.Assert.assertFalse; +022import static org.junit.Assert.assertNotNull; +023import static org.junit.Assert.assertNull; +024import static org.junit.Assert.assertTrue; +025 +026import java.io.IOException; +027import java.lang.reflect.Field; +028import java.lang.reflect.Modifier; +029import java.net.SocketTimeoutException; +030import java.util.ArrayList; +031import java.util.List; +032import java.util.concurrent.ExecutorService; +033import java.util.concurrent.SynchronousQueue; +034import java.util.concurrent.ThreadLocalRandom; +035import java.util.concurrent.ThreadPoolExecutor; +036import java.util.concurrent.TimeUnit; +037import java.util.concurrent.atomic.AtomicBoolean; +038import java.util.concurrent.atomic.AtomicInteger; +039import java.util.concurrent.atomic.AtomicReference; +040import org.apache.hadoop.conf.Configuration; +041import org.apache.hadoop.hbase.CategoryBasedTimeout; +042import org.apache.hadoop.hbase.Cell; +043import
[30/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.ZKTask.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.ZKTask.html b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.ZKTask.html index 5b66298..ea864e9 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.ZKTask.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.ZKTask.html @@ -39,319 +39,329 @@ 031import java.util.concurrent.atomic.AtomicBoolean; 032 033import org.apache.hadoop.conf.Configuration; -034import org.apache.yetus.audience.InterfaceAudience; -035import org.apache.zookeeper.KeeperException; -036import org.apache.zookeeper.KeeperException.Code; -037import org.apache.zookeeper.ZooKeeper; -038import org.apache.zookeeper.data.Stat; -039import org.slf4j.Logger; -040import org.slf4j.LoggerFactory; -041import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; -042 -043/** -044 * A very simple read only zookeeper implementation without watcher support. -045 */ -046@InterfaceAudience.Private -047public final class ReadOnlyZKClient implements Closeable { -048 -049 private static final Logger LOG = LoggerFactory.getLogger(ReadOnlyZKClient.class); -050 -051 public static final String RECOVERY_RETRY = "zookeeper.recovery.retry"; -052 -053 private static final int DEFAULT_RECOVERY_RETRY = 30; -054 -055 public static final String RECOVERY_RETRY_INTERVAL_MILLIS = -056 "zookeeper.recovery.retry.intervalmill"; -057 -058 private static final int DEFAULT_RECOVERY_RETRY_INTERVAL_MILLIS = 1000; -059 -060 public static final String KEEPALIVE_MILLIS = "zookeeper.keep-alive.time"; -061 -062 private static final int DEFAULT_KEEPALIVE_MILLIS = 6; -063 -064 private static final EnumSetCode FAIL_FAST_CODES = EnumSet.of(Code.NOAUTH, Code.AUTHFAILED); -065 -066 private final String connectString; -067 -068 private final int sessionTimeoutMs; -069 -070 private final int maxRetries; -071 -072 private final int retryIntervalMs; -073 -074 private final int keepAliveTimeMs; -075 -076 private static abstract class Task implements Delayed { -077 -078protected long time = System.nanoTime(); -079 -080public boolean needZk() { -081 return false; -082} -083 -084public void exec(ZooKeeper zk) { -085} -086 -087public void connectFailed(IOException e) { -088} -089 -090public void closed(IOException e) { -091} -092 -093@Override -094public int compareTo(Delayed o) { -095 Task that = (Task) o; -096 int c = Long.compare(time, that.time); -097 if (c != 0) { -098return c; -099 } -100 return Integer.compare(System.identityHashCode(this), System.identityHashCode(that)); -101} -102 -103@Override -104public long getDelay(TimeUnit unit) { -105 return unit.convert(time - System.nanoTime(), TimeUnit.NANOSECONDS); -106} -107 } -108 -109 private static final Task CLOSE = new Task() { -110 }; -111 -112 private final DelayQueueTask tasks = new DelayQueue(); -113 -114 private final AtomicBoolean closed = new AtomicBoolean(false); -115 -116 private ZooKeeper zookeeper; -117 -118 private String getId() { -119return String.format("0x%08x", System.identityHashCode(this)); -120 } -121 -122 public ReadOnlyZKClient(Configuration conf) { -123this.connectString = ZKConfig.getZKQuorumServersString(conf); -124this.sessionTimeoutMs = conf.getInt(ZK_SESSION_TIMEOUT, DEFAULT_ZK_SESSION_TIMEOUT); -125this.maxRetries = conf.getInt(RECOVERY_RETRY, DEFAULT_RECOVERY_RETRY); -126this.retryIntervalMs = -127 conf.getInt(RECOVERY_RETRY_INTERVAL_MILLIS, DEFAULT_RECOVERY_RETRY_INTERVAL_MILLIS); -128this.keepAliveTimeMs = conf.getInt(KEEPALIVE_MILLIS, DEFAULT_KEEPALIVE_MILLIS); -129LOG.info("Start read only zookeeper connection " + getId() + " to " + connectString + -130", session timeout " + sessionTimeoutMs + " ms, retries " + maxRetries + -131", retry interval " + retryIntervalMs + " ms, keep alive " + keepAliveTimeMs + " ms"); -132Thread t = new Thread(this::run, "ReadOnlyZKClient"); -133t.setDaemon(true); -134t.start(); -135 } -136 -137 private abstract class ZKTaskT extends Task { -138 -139protected final String path; -140 -141private final CompletableFutureT future; -142 -143private final String operationType; -144 -145private int retries; -146 -147protected ZKTask(String path, CompletableFutureT future, String operationType) { -148 this.path = path; -149 this.future = future; -150 this.operationType = operationType; -151} -152 -153protected final void onComplete(ZooKeeper zk, int rc, T ret, boolean errorIfNoNode) { -154
[10/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/src-html/org/apache/hadoop/hbase/TestCellUtil.NonExtendedCell.html -- diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestCellUtil.NonExtendedCell.html b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestCellUtil.NonExtendedCell.html index 232ef56..bc3a6d0 100644 --- a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestCellUtil.NonExtendedCell.html +++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestCellUtil.NonExtendedCell.html @@ -29,610 +29,626 @@ 021import static org.junit.Assert.assertEquals; 022import static org.junit.Assert.assertFalse; 023import static org.junit.Assert.assertTrue; -024 -025import java.io.ByteArrayOutputStream; -026import java.io.IOException; -027import java.math.BigDecimal; -028import java.nio.ByteBuffer; -029import java.util.ArrayList; -030import java.util.List; -031import java.util.NavigableMap; -032import java.util.TreeMap; -033import org.apache.hadoop.hbase.testclassification.MiscTests; -034import org.apache.hadoop.hbase.testclassification.SmallTests; -035import org.apache.hadoop.hbase.util.Bytes; -036import org.junit.Assert; -037import org.junit.Test; -038import org.junit.experimental.categories.Category; -039 -040@Category({MiscTests.class, SmallTests.class}) -041public class TestCellUtil { -042 /** -043 * CellScannable used in test. Returns a {@link TestCellScanner} -044 */ -045 private static class TestCellScannable implements CellScannable { -046private final int cellsCount; -047TestCellScannable(final int cellsCount) { -048 this.cellsCount = cellsCount; -049} -050@Override -051public CellScanner cellScanner() { -052 return new TestCellScanner(this.cellsCount); -053} -054 } -055 -056 /** -057 * CellScanner used in test. -058 */ -059 private static class TestCellScanner implements CellScanner { -060private int count = 0; -061private Cell current = null; -062private final int cellsCount; -063 -064TestCellScanner(final int cellsCount) { -065 this.cellsCount = cellsCount; -066} -067 -068@Override -069public Cell current() { -070 return this.current; -071} -072 -073@Override -074public boolean advance() throws IOException { -075 if (this.count cellsCount) { -076this.current = new TestCell(this.count); -077this.count++; -078return true; -079 } -080 return false; -081} -082 } -083 -084 /** -085 * Cell used in test. Has row only. -086 */ -087 private static class TestCell implements Cell { -088private final byte [] row; -089 -090TestCell(final int i) { -091 this.row = Bytes.toBytes(i); -092} -093 -094@Override -095public byte[] getRowArray() { -096 return this.row; -097} -098 -099@Override -100public int getRowOffset() { -101 return 0; -102} -103 -104@Override -105public short getRowLength() { -106 return (short)this.row.length; -107} -108 -109@Override -110public byte[] getFamilyArray() { -111 // TODO Auto-generated method stub -112 return null; -113} -114 -115@Override -116public int getFamilyOffset() { -117 // TODO Auto-generated method stub -118 return 0; -119} -120 -121@Override -122public byte getFamilyLength() { -123 // TODO Auto-generated method stub -124 return 0; -125} -126 -127@Override -128public byte[] getQualifierArray() { -129 // TODO Auto-generated method stub -130 return null; -131} -132 -133@Override -134public int getQualifierOffset() { -135 // TODO Auto-generated method stub -136 return 0; -137} -138 -139@Override -140public int getQualifierLength() { -141 // TODO Auto-generated method stub -142 return 0; -143} -144 -145@Override -146public long getTimestamp() { -147 // TODO Auto-generated method stub -148 return 0; -149} -150 -151@Override -152public byte getTypeByte() { -153 // TODO Auto-generated method stub -154 return 0; -155} -156 -157@Override -158public byte[] getValueArray() { -159 // TODO Auto-generated method stub -160 return null; -161} -162 -163@Override -164public int getValueOffset() { -165 // TODO Auto-generated method stub -166 return 0; -167} -168 -169@Override -170public int getValueLength() { -171 // TODO Auto-generated method stub -172 return 0; -173} -174 -175@Override -176public byte[] getTagsArray() { -177 // TODO Auto-generated method stub -178 return null; -179} -180 -181@Override -182public int getTagsOffset() { -183 // TODO Auto-generated method stub -184 return 0; -185} -186 -187@Override -188public long getSequenceId() { -189
[05/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/src-html/org/apache/hadoop/hbase/TestKeyValue.MockKeyValue.html -- diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestKeyValue.MockKeyValue.html b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestKeyValue.MockKeyValue.html index a4b20ad..e46ae3f 100644 --- a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestKeyValue.MockKeyValue.html +++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestKeyValue.MockKeyValue.html @@ -745,13 +745,8 @@ 737public byte[] getTagsArray() { 738 return this.kv.getTagsArray(); 739} -740 -741@Override -742public Type getType() { -743 return PrivateCellUtil.toType(getTypeByte()); -744} -745 } -746} +740 } +741} http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/src-html/org/apache/hadoop/hbase/TestKeyValue.html -- diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestKeyValue.html b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestKeyValue.html index a4b20ad..e46ae3f 100644 --- a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestKeyValue.html +++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestKeyValue.html @@ -745,13 +745,8 @@ 737public byte[] getTagsArray() { 738 return this.kv.getTagsArray(); 739} -740 -741@Override -742public Type getType() { -743 return PrivateCellUtil.toType(getTypeByte()); -744} -745 } -746} +740 } +741} http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/src-html/org/apache/hadoop/hbase/client/AbstractTestCIOperationTimeout.html -- diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/AbstractTestCIOperationTimeout.html b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/AbstractTestCIOperationTimeout.html new file mode 100644 index 000..68404b4 --- /dev/null +++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/AbstractTestCIOperationTimeout.html @@ -0,0 +1,155 @@ +http://www.w3.org/TR/html4/loose.dtd;> + + +Source code + + + + +001/** +002 * Licensed to the Apache Software Foundation (ASF) under one +003 * or more contributor license agreements. See the NOTICE file +004 * distributed with this work for additional information +005 * regarding copyright ownership. The ASF licenses this file +006 * to you under the Apache License, Version 2.0 (the +007 * "License"); you may not use this file except in compliance +008 * with the License. You may obtain a copy of the License at +009 * +010 * http://www.apache.org/licenses/LICENSE-2.0 +011 * +012 * Unless required by applicable law or agreed to in writing, software +013 * distributed under the License is distributed on an "AS IS" BASIS, +014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +015 * See the License for the specific language governing permissions and +016 * limitations under the License. +017 */ +018package org.apache.hadoop.hbase.client; +019 +020import static org.junit.Assert.fail; +021 +022import java.io.IOException; +023import java.net.SocketTimeoutException; +024import org.apache.hadoop.hbase.TableName; +025import org.junit.Before; +026import org.junit.Test; +027import org.slf4j.Logger; +028import org.slf4j.LoggerFactory; +029 +030/** +031 * Based class for testing operation timeout logic for {@link ConnectionImplementation}. +032 */ +033public abstract class AbstractTestCIOperationTimeout extends AbstractTestCITimeout { +034 +035 private static final Logger LOG = LoggerFactory.getLogger(AbstractTestCIOperationTimeout.class); +036 +037 private TableName tableName; +038 +039 @Before +040 public void setUp() throws IOException { +041tableName = TableName.valueOf(name.getMethodName()); +042TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName) +043 .addCoprocessor(SleepAndFailFirstTime.class.getName()) +044 .addColumnFamily(ColumnFamilyDescriptorBuilder.of(FAM_NAM)).build(); +045 TEST_UTIL.getAdmin().createTable(htd); +046 } +047 +048 protected abstract void execute(Table table) throws IOException; +049 +050 /** +051 * Test that an operation can fail if we read the global operation timeout, even if the individual +052 * timeout is fine. We do that with: +053 * ul +054 * liclient side: an operation timeout of 30 seconds/li +055 * liserver side: we sleep 20 second at each attempt. The first work fails, the second one +056 * succeeds. But the client won't wait that much, because 20 + 20 30, so the client timed out +057 * when the server answers./li +058 * /ul +059 */ +060 @Test +061 public void testOperationTimeout() throws IOException { +062TableBuilder builder = +063
[34/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html index 163ade0..802fc2f 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html @@ -254,3512 +254,3505 @@ 246 protected MemStoreFlusher cacheFlusher; 247 248 protected HeapMemoryManager hMemManager; -249 protected CountDownLatch initLatch = null; -250 -251 /** -252 * Cluster connection to be shared by services. -253 * Initialized at server startup and closed when server shuts down. -254 * Clients must never close it explicitly. -255 */ -256 protected ClusterConnection clusterConnection; -257 -258 /* -259 * Long-living meta table locator, which is created when the server is started and stopped -260 * when server shuts down. References to this locator shall be used to perform according -261 * operations in EventHandlers. Primary reason for this decision is to make it mockable -262 * for tests. -263 */ -264 protected MetaTableLocator metaTableLocator; -265 -266 /** -267 * Go here to get table descriptors. -268 */ -269 protected TableDescriptors tableDescriptors; -270 -271 // Replication services. If no replication, this handler will be null. -272 protected ReplicationSourceService replicationSourceHandler; -273 protected ReplicationSinkService replicationSinkHandler; -274 -275 // Compactions -276 public CompactSplit compactSplitThread; -277 -278 /** -279 * Map of regions currently being served by this region server. Key is the -280 * encoded region name. All access should be synchronized. -281 */ -282 protected final MapString, HRegion onlineRegions = new ConcurrentHashMap(); -283 -284 /** -285 * Map of encoded region names to the DataNode locations they should be hosted on -286 * We store the value as InetSocketAddress since this is used only in HDFS -287 * API (create() that takes favored nodes as hints for placing file blocks). -288 * We could have used ServerName here as the value class, but we'd need to -289 * convert it to InetSocketAddress at some point before the HDFS API call, and -290 * it seems a bit weird to store ServerName since ServerName refers to RegionServers -291 * and here we really mean DataNode locations. -292 */ -293 protected final MapString, InetSocketAddress[] regionFavoredNodesMap = -294 new ConcurrentHashMap(); -295 -296 // Leases -297 protected Leases leases; -298 -299 // Instance of the hbase executor executorService. -300 protected ExecutorService executorService; -301 -302 // If false, the file system has become unavailable -303 protected volatile boolean fsOk; -304 protected HFileSystem fs; -305 protected HFileSystem walFs; -306 -307 // Set when a report to the master comes back with a message asking us to -308 // shutdown. Also set by call to stop when debugging or running unit tests -309 // of HRegionServer in isolation. -310 private volatile boolean stopped = false; -311 -312 // Go down hard. Used if file system becomes unavailable and also in -313 // debugging and unit tests. -314 private volatile boolean abortRequested; -315 -316 ConcurrentMapString, Integer rowlocks = new ConcurrentHashMap(); -317 -318 // A state before we go into stopped state. At this stage we're closing user -319 // space regions. -320 private boolean stopping = false; -321 -322 volatile boolean killed = false; -323 -324 protected final Configuration conf; -325 -326 private Path rootDir; -327 private Path walRootDir; -328 -329 protected final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); -330 -331 final int numRetries; -332 protected final int threadWakeFrequency; -333 protected final int msgInterval; -334 -335 protected final int numRegionsToReport; -336 -337 // Stub to do region server status calls against the master. -338 private volatile RegionServerStatusService.BlockingInterface rssStub; -339 private volatile LockService.BlockingInterface lockStub; -340 // RPC client. Used to make the stub above that does region server status checking. -341 RpcClient rpcClient; -342 -343 private RpcRetryingCallerFactory rpcRetryingCallerFactory; -344 private RpcControllerFactory rpcControllerFactory; -345 -346 private UncaughtExceptionHandler uncaughtExceptionHandler; -347 -348 // Info server. Default access so can be used by unit tests. REGIONSERVER -349 // is name of the webapp and the attribute name used stuffing this instance -350 // into web context.
[39/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html index fa489c5..1fc3ca7 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html @@ -47,3488 +47,3492 @@ 039import java.util.Map.Entry; 040import java.util.Objects; 041import java.util.Set; -042import java.util.concurrent.CountDownLatch; -043import java.util.concurrent.ExecutionException; -044import java.util.concurrent.Future; -045import java.util.concurrent.TimeUnit; -046import java.util.concurrent.TimeoutException; -047import java.util.concurrent.atomic.AtomicInteger; -048import java.util.concurrent.atomic.AtomicReference; -049import java.util.function.Function; -050import java.util.regex.Pattern; -051import java.util.stream.Collectors; -052import javax.servlet.ServletException; -053import javax.servlet.http.HttpServlet; -054import javax.servlet.http.HttpServletRequest; -055import javax.servlet.http.HttpServletResponse; -056import org.apache.hadoop.conf.Configuration; -057import org.apache.hadoop.fs.Path; -058import org.apache.hadoop.hbase.ClusterMetrics; -059import org.apache.hadoop.hbase.ClusterMetrics.Option; -060import org.apache.hadoop.hbase.ClusterMetricsBuilder; -061import org.apache.hadoop.hbase.CoordinatedStateException; -062import org.apache.hadoop.hbase.DoNotRetryIOException; -063import org.apache.hadoop.hbase.HBaseIOException; -064import org.apache.hadoop.hbase.HBaseInterfaceAudience; -065import org.apache.hadoop.hbase.HConstants; -066import org.apache.hadoop.hbase.InvalidFamilyOperationException; -067import org.apache.hadoop.hbase.MasterNotRunningException; -068import org.apache.hadoop.hbase.MetaTableAccessor; -069import org.apache.hadoop.hbase.NamespaceDescriptor; -070import org.apache.hadoop.hbase.PleaseHoldException; -071import org.apache.hadoop.hbase.ReplicationPeerNotFoundException; -072import org.apache.hadoop.hbase.ServerLoad; -073import org.apache.hadoop.hbase.ServerMetricsBuilder; -074import org.apache.hadoop.hbase.ServerName; -075import org.apache.hadoop.hbase.TableDescriptors; -076import org.apache.hadoop.hbase.TableName; -077import org.apache.hadoop.hbase.TableNotDisabledException; -078import org.apache.hadoop.hbase.TableNotFoundException; -079import org.apache.hadoop.hbase.UnknownRegionException; -080import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; -081import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; -082import org.apache.hadoop.hbase.client.MasterSwitchType; -083import org.apache.hadoop.hbase.client.RegionInfo; -084import org.apache.hadoop.hbase.client.Result; -085import org.apache.hadoop.hbase.client.TableDescriptor; -086import org.apache.hadoop.hbase.client.TableDescriptorBuilder; -087import org.apache.hadoop.hbase.client.TableState; -088import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; -089import org.apache.hadoop.hbase.exceptions.DeserializationException; -090import org.apache.hadoop.hbase.exceptions.MergeRegionException; -091import org.apache.hadoop.hbase.executor.ExecutorType; -092import org.apache.hadoop.hbase.favored.FavoredNodesManager; -093import org.apache.hadoop.hbase.favored.FavoredNodesPromoter; -094import org.apache.hadoop.hbase.http.InfoServer; -095import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; -096import org.apache.hadoop.hbase.ipc.RpcServer; -097import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; -098import org.apache.hadoop.hbase.log.HBaseMarkers; -099import org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode; -100import org.apache.hadoop.hbase.master.assignment.AssignmentManager; -101import org.apache.hadoop.hbase.master.assignment.MergeTableRegionsProcedure; -102import org.apache.hadoop.hbase.master.assignment.RegionStates; -103import org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode; -104import org.apache.hadoop.hbase.master.balancer.BalancerChore; -105import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer; -106import org.apache.hadoop.hbase.master.balancer.ClusterStatusChore; -107import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory; -108import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; -109import org.apache.hadoop.hbase.master.cleaner.LogCleaner; -110import org.apache.hadoop.hbase.master.cleaner.ReplicationMetaCleaner; -111import org.apache.hadoop.hbase.master.locking.LockManager; -112import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan; -113import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType; -114import
[04/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestCIDeleteRpcTimeout.html -- diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestCIDeleteRpcTimeout.html b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestCIDeleteRpcTimeout.html new file mode 100644 index 000..22bbba1 --- /dev/null +++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestCIDeleteRpcTimeout.html @@ -0,0 +1,104 @@ +http://www.w3.org/TR/html4/loose.dtd;> + + +Source code + + + + +001/** +002 * Licensed to the Apache Software Foundation (ASF) under one +003 * or more contributor license agreements. See the NOTICE file +004 * distributed with this work for additional information +005 * regarding copyright ownership. The ASF licenses this file +006 * to you under the Apache License, Version 2.0 (the +007 * "License"); you may not use this file except in compliance +008 * with the License. You may obtain a copy of the License at +009 * +010 * http://www.apache.org/licenses/LICENSE-2.0 +011 * +012 * Unless required by applicable law or agreed to in writing, software +013 * distributed under the License is distributed on an "AS IS" BASIS, +014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +015 * See the License for the specific language governing permissions and +016 * limitations under the License. +017 */ +018package org.apache.hadoop.hbase.client; +019 +020import java.io.IOException; +021import org.apache.hadoop.hbase.testclassification.ClientTests; +022import org.apache.hadoop.hbase.testclassification.MediumTests; +023import org.junit.experimental.categories.Category; +024 +025@Category({ ClientTests.class, MediumTests.class }) +026public class TestCIDeleteRpcTimeout extends AbstractTestCIRpcTimeout { +027 +028 @Override +029 protected void execute(Table table) throws IOException { +030table.delete(new Delete(FAM_NAM)); +031 } +032} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestCIGetOperationTimeout.html -- diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestCIGetOperationTimeout.html b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestCIGetOperationTimeout.html new file mode 100644 index 000..7f4d982 --- /dev/null +++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestCIGetOperationTimeout.html @@ -0,0 +1,104 @@ +http://www.w3.org/TR/html4/loose.dtd;> + + +Source code + + + + +001/** +002 * Licensed to the Apache Software Foundation (ASF) under one +003 * or more contributor license agreements. See the NOTICE file +004 * distributed with this work for additional information +005 * regarding copyright ownership. The ASF licenses this file +006 * to you under the Apache License, Version 2.0 (the +007 * "License"); you may not use this file except in compliance +008 * with the License. You may obtain a copy of the License at +009 * +010 * http://www.apache.org/licenses/LICENSE-2.0 +011 * +012 * Unless required by applicable law or agreed to in writing, software +013 * distributed under the License is distributed on an "AS IS" BASIS, +014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +015 * See the License for the specific language governing permissions and +016 * limitations under the License. +017 */ +018package org.apache.hadoop.hbase.client; +019 +020import java.io.IOException; +021import org.apache.hadoop.hbase.testclassification.ClientTests; +022import org.apache.hadoop.hbase.testclassification.LargeTests; +023import org.junit.experimental.categories.Category; +024 +025@Category({ ClientTests.class, LargeTests.class }) +026public class TestCIGetOperationTimeout extends AbstractTestCIOperationTimeout { +027 +028 @Override +029 protected void execute(Table table) throws IOException { +030table.get(new Get(FAM_NAM)); +031 } +032} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestCIGetRpcTimeout.html -- diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestCIGetRpcTimeout.html b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestCIGetRpcTimeout.html new file mode 100644 index 000..9c835ce --- /dev/null +++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestCIGetRpcTimeout.html @@ -0,0 +1,104 @@
[49/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/checkstyle.rss -- diff --git a/checkstyle.rss b/checkstyle.rss index d63eae9..a9a5588 100644 --- a/checkstyle.rss +++ b/checkstyle.rss @@ -25,8 +25,8 @@ under the License. en-us 2007 - 2018 The Apache Software Foundation - File: 3487, - Errors: 18149, + File: 3499, + Errors: 18148, Warnings: 0, Infos: 0 @@ -368,6 +368,20 @@ under the License. + http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.client.TestCIPutRpcTimeout.java;>org/apache/hadoop/hbase/client/TestCIPutRpcTimeout.java + + + 0 + + + 0 + + + 0 + + + + http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint.java;>org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java @@ -1026,6 +1040,20 @@ under the License. + http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.client.TestCIGetRpcTimeout.java;>org/apache/hadoop/hbase/client/TestCIGetRpcTimeout.java + + + 0 + + + 0 + + + 0 + + + + http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.backup.BackupMergeJob.java;>org/apache/hadoop/hbase/backup/BackupMergeJob.java @@ -6626,6 +6654,20 @@ under the License. + http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.client.TestCIBadHostname.java;>org/apache/hadoop/hbase/client/TestCIBadHostname.java + + + 0 + + + 0 + + + 1 + + + + http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.mob.TestCachedMobFile.java;>org/apache/hadoop/hbase/mob/TestCachedMobFile.java @@ -8922,6 +8964,20 @@ under the License. + http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.client.TestCIPutOperationTimeout.java;>org/apache/hadoop/hbase/client/TestCIPutOperationTimeout.java + + + 0 + + + 0 + + + 0 + + + + http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.util.TestLoadTestKVGenerator.java;>org/apache/hadoop/hbase/util/TestLoadTestKVGenerator.java @@ -10252,6 +10308,20 @@ under the License. + http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.zookeeper.ZooKeeperHelper.java;>org/apache/hadoop/hbase/zookeeper/ZooKeeperHelper.java + + + 0 + + + 0 + + + 1 + + + + http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.master.normalizer.SplitNormalizationPlan.java;>org/apache/hadoop/hbase/master/normalizer/SplitNormalizationPlan.java @@ -11190,6 +11260,20 @@ under the License. + http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.client.TestCISleep.java;>org/apache/hadoop/hbase/client/TestCISleep.java + + + 0 + + + 0 + + + 0 + + + + http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.http.HttpServerUtil.java;>org/apache/hadoop/hbase/http/HttpServerUtil.java @@ -11288,6 +11372,20 @@ under the License.
[48/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/devapidocs/org/apache/hadoop/hbase/Cell.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/Cell.html b/devapidocs/org/apache/hadoop/hbase/Cell.html index 1966c4e..6c178f3 100644 --- a/devapidocs/org/apache/hadoop/hbase/Cell.html +++ b/devapidocs/org/apache/hadoop/hbase/Cell.html @@ -18,8 +18,8 @@ catch(err) { } //--> -var methods = {"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":6,"i7":6,"i8":6,"i9":38,"i10":38,"i11":38,"i12":38,"i13":6,"i14":6,"i15":38,"i16":6,"i17":6,"i18":6}; -var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],4:["t3","Abstract Methods"],32:["t6","Deprecated Methods"]}; +var methods = {"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":6,"i7":6,"i8":6,"i9":38,"i10":38,"i11":38,"i12":38,"i13":6,"i14":18,"i15":38,"i16":6,"i17":6,"i18":6}; +var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],4:["t3","Abstract Methods"],16:["t5","Default Methods"],32:["t6","Deprecated Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; var tableTab = "tableTab"; @@ -180,7 +180,7 @@ public interface -All MethodsInstance MethodsAbstract MethodsDeprecated Methods +All MethodsInstance MethodsAbstract MethodsDefault MethodsDeprecated Methods Modifier and Type Method and Description @@ -265,9 +265,9 @@ public interface getTimestamp() -Cell.Type +default Cell.Type getType() -Returns the type of cell in a human readable format using Cell.Type +Returns the type of cell in a human readable format using Cell.Type. @@ -576,8 +576,10 @@ int getType -Cell.TypegetType() -Returns the type of cell in a human readable format using Cell.Type +defaultCell.TypegetType() +Returns the type of cell in a human readable format using Cell.Type. + Note : This does not expose the internal types of Cells like KeyValue.Type.Maximum and + KeyValue.Type.Minimum Returns: The data type this cell: one of Put, Delete, etc http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/devapidocs/org/apache/hadoop/hbase/ExtendedCell.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/ExtendedCell.html b/devapidocs/org/apache/hadoop/hbase/ExtendedCell.html index 11b8675..b89e7cc 100644 --- a/devapidocs/org/apache/hadoop/hbase/ExtendedCell.html +++ b/devapidocs/org/apache/hadoop/hbase/ExtendedCell.html @@ -18,7 +18,7 @@ catch(err) { } //--> -var methods = {"i0":18,"i1":18,"i2":6,"i3":18,"i4":6,"i5":6,"i6":6,"i7":18,"i8":6,"i9":6,"i10":6,"i11":6,"i12":18,"i13":18}; +var methods = {"i0":18,"i1":18,"i2":6,"i3":18,"i4":6,"i5":6,"i6":6,"i7":6,"i8":6,"i9":6,"i10":6,"i11":18,"i12":18}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],4:["t3","Abstract Methods"],16:["t5","Default Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -213,41 +213,35 @@ extends getTagsOffset() -default Cell.Type -getType() -Returns the type of cell in a human readable format using Cell.Type - - - byte getTypeByte() - + void setSequenceId(longseqId) Sets with the given seqId. - + void setTimestamp(byte[]ts) Sets with the given timestamp. - + void setTimestamp(longts) Sets with the given timestamp. - + default void write(http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true; title="class or interface in java.nio">ByteBufferbuf, intoffset) Write this Cell into the given buf's offset in a KeyValue format. - + default int write(http://docs.oracle.com/javase/8/docs/api/java/io/OutputStream.html?is-external=true; title="class or interface in java.io">OutputStreamout, booleanwithTags) @@ -267,7 +261,7 @@ extends Cell -getFamilyArray, getFamilyLength, getFamilyOffset, getQualifierArray, getQualifierLength, getQualifierOffset, getRowArray, getRowLength, getRowOffset, getTimestamp, getValueArray, getValueLength, getValueOffset +getFamilyArray, getFamilyLength, getFamilyOffset, getQualifierArray, getQualifierLength, getQualifierOffset, getRowArray, getRowLength, getRowOffset, getTimestamp, getType, getValueArray, getValueLength, getValueOffset @@ -523,32 +517,13 @@ extends - - - - -getType -defaultCell.TypegetType() -Returns the type of cell in a human readable format using Cell.Type - - Note : This does not expose the internal types of Cells like KeyValue.Type.Maximum and - KeyValue.Type.Minimum - -Specified by: -getTypein interfaceCell -Returns: -The data type this cell: one of Put, Delete, etc - - - getTypeByte -bytegetTypeByte() +bytegetTypeByte() Specified by: getTypeBytein interfaceCell http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/devapidocs/org/apache/hadoop/hbase/IndividualBytesFieldCell.html -- diff --git
[47/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/devapidocs/org/apache/hadoop/hbase/class-use/Cell.Type.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/Cell.Type.html b/devapidocs/org/apache/hadoop/hbase/class-use/Cell.Type.html index 0cfd150..63a5d79 100644 --- a/devapidocs/org/apache/hadoop/hbase/class-use/Cell.Type.html +++ b/devapidocs/org/apache/hadoop/hbase/class-use/Cell.Type.html @@ -102,6 +102,19 @@ Uses of Cell.Type in org.apache.hadoop.hbase + +Fields in org.apache.hadoop.hbase declared as Cell.Type + +Modifier and Type +Field and Description + + + +private static Cell.Type[] +Cell.Type.CODE_ARRAY + + + Methods in org.apache.hadoop.hbase that return Cell.Type @@ -110,41 +123,31 @@ -Cell.Type -ByteBufferKeyOnlyKeyValue.getType() - - -Cell.Type +default Cell.Type Cell.getType() -Returns the type of cell in a human readable format using Cell.Type +Returns the type of cell in a human readable format using Cell.Type. - + Cell.Type PrivateCellUtil.FirstOnRowCell.getType() - + Cell.Type PrivateCellUtil.FirstOnRowByteBufferExtendedCell.getType() - + Cell.Type PrivateCellUtil.LastOnRowByteBufferExtendedCell.getType() - + Cell.Type PrivateCellUtil.LastOnRowCell.getType() - + Cell.Type PrivateCellUtil.FirstOnRowDeleteFamilyCell.getType() - -default Cell.Type -ExtendedCell.getType() -Returns the type of cell in a human readable format using Cell.Type - - static Cell.Type PrivateCellUtil.toType(bytetype) http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/devapidocs/org/apache/hadoop/hbase/class-use/CellBuilder.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/CellBuilder.html b/devapidocs/org/apache/hadoop/hbase/class-use/CellBuilder.html index d66df48..3dfa0a4 100644 --- a/devapidocs/org/apache/hadoop/hbase/class-use/CellBuilder.html +++ b/devapidocs/org/apache/hadoop/hbase/class-use/CellBuilder.html @@ -115,7 +115,8 @@ interface RawCellBuilder -Allows creating a cell with Tag +Allows creating a cell with Tag + An instance of this type can be acquired using RegionCoprocessorEnvironment#getCellBuilder. http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/devapidocs/org/apache/hadoop/hbase/class-use/ZooKeeperConnectionException.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/ZooKeeperConnectionException.html b/devapidocs/org/apache/hadoop/hbase/class-use/ZooKeeperConnectionException.html index bcc708f..d6fa9cb 100644 --- a/devapidocs/org/apache/hadoop/hbase/class-use/ZooKeeperConnectionException.html +++ b/devapidocs/org/apache/hadoop/hbase/class-use/ZooKeeperConnectionException.html @@ -250,6 +250,13 @@ private void ZKWatcher.createBaseZNodes() + +static org.apache.zookeeper.ZooKeeper +ZooKeeperHelper.ensureConnectedZooKeeper(org.apache.zookeeper.ZooKeeperzookeeper, +inttimeout) +Ensure passed zookeeper is connected. + + http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/devapidocs/org/apache/hadoop/hbase/client/Mutation.CellWrapper.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/Mutation.CellWrapper.html b/devapidocs/org/apache/hadoop/hbase/client/Mutation.CellWrapper.html index 1c9afb8..da5398c 100644 --- a/devapidocs/org/apache/hadoop/hbase/client/Mutation.CellWrapper.html +++ b/devapidocs/org/apache/hadoop/hbase/client/Mutation.CellWrapper.html @@ -359,7 +359,7 @@ implements Methods inherited from interfaceorg.apache.hadoop.hbase.ExtendedCell -deepClone, getChunkId, getSerializedSize, getType, write, write +deepClone, getChunkId, getSerializedSize, write, write @@ -368,6 +368,13 @@ implements Methods inherited from interfaceorg.apache.hadoop.hbase.RawCell checkForTagsLength + + + + +Methods inherited from interfaceorg.apache.hadoop.hbase.Cell +getType + http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/devapidocs/org/apache/hadoop/hbase/client/package-tree.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/package-tree.html b/devapidocs/org/apache/hadoop/hbase/client/package-tree.html index 395534e..15c3744 100644 --- a/devapidocs/org/apache/hadoop/hbase/client/package-tree.html +++ b/devapidocs/org/apache/hadoop/hbase/client/package-tree.html @@ -548,23 +548,23 @@ java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true; title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true; title="class or interface in java.lang">ComparableT,
[44/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html b/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html index e7a6a06..74a1674 100644 --- a/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html +++ b/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html @@ -207,11 +207,11 @@ java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true; title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true; title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true; title="class or interface in java.io">Serializable) org.apache.hadoop.hbase.quotas.OperationQuota.OperationType -org.apache.hadoop.hbase.quotas.ThrottleType -org.apache.hadoop.hbase.quotas.QuotaType org.apache.hadoop.hbase.quotas.SpaceViolationPolicy -org.apache.hadoop.hbase.quotas.ThrottlingException.Type org.apache.hadoop.hbase.quotas.QuotaScope +org.apache.hadoop.hbase.quotas.QuotaType +org.apache.hadoop.hbase.quotas.ThrottleType +org.apache.hadoop.hbase.quotas.ThrottlingException.Type http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/devapidocs/org/apache/hadoop/hbase/regionserver/ByteBufferChunkKeyValue.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/ByteBufferChunkKeyValue.html b/devapidocs/org/apache/hadoop/hbase/regionserver/ByteBufferChunkKeyValue.html index 30e601c..87b6822 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/ByteBufferChunkKeyValue.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/ByteBufferChunkKeyValue.html @@ -241,18 +241,18 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--; title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--; title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--; title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--; title="class or interface in java.lang">notify, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notifyAll--; title="class or interface in java.lang">notifyAll, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--; title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-ex ternal=true#wait-long-" title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-int-; title="class or interface in java.lang">wait - + -Methods inherited from interfaceorg.apache.hadoop.hbase.ExtendedCell -getType +Methods inherited from interfaceorg.apache.hadoop.hbase.RawCell +checkForTagsLength, cloneTags, getTag, getTags - + -Methods inherited from interfaceorg.apache.hadoop.hbase.RawCell -checkForTagsLength, cloneTags, getTag, getTags +Methods inherited from interfaceorg.apache.hadoop.hbase.Cell +getType http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html index 4d85e5b..88061a5 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html @@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab"; -private static class HRegionServer.CompactionChecker +private static class HRegionServer.CompactionChecker extends ScheduledChore @@ -233,7 +233,7 @@ extends instance -private finalHRegionServer instance +private finalHRegionServer instance @@ -242,7 +242,7 @@ extends majorCompactPriority -private finalint majorCompactPriority +private finalint majorCompactPriority @@ -251,7 +251,7 @@ extends DEFAULT_PRIORITY -private static finalint DEFAULT_PRIORITY +private static finalint DEFAULT_PRIORITY See Also: Constant Field Values @@ -264,7 +264,7 @@ extends iteration -privatelong iteration +privatelong iteration @@ -281,7 +281,7 @@ extends CompactionChecker -CompactionChecker(HRegionServerh,
[31/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.Task.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.Task.html b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.Task.html index 5b66298..ea864e9 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.Task.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.Task.html @@ -39,319 +39,329 @@ 031import java.util.concurrent.atomic.AtomicBoolean; 032 033import org.apache.hadoop.conf.Configuration; -034import org.apache.yetus.audience.InterfaceAudience; -035import org.apache.zookeeper.KeeperException; -036import org.apache.zookeeper.KeeperException.Code; -037import org.apache.zookeeper.ZooKeeper; -038import org.apache.zookeeper.data.Stat; -039import org.slf4j.Logger; -040import org.slf4j.LoggerFactory; -041import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; -042 -043/** -044 * A very simple read only zookeeper implementation without watcher support. -045 */ -046@InterfaceAudience.Private -047public final class ReadOnlyZKClient implements Closeable { -048 -049 private static final Logger LOG = LoggerFactory.getLogger(ReadOnlyZKClient.class); -050 -051 public static final String RECOVERY_RETRY = "zookeeper.recovery.retry"; -052 -053 private static final int DEFAULT_RECOVERY_RETRY = 30; -054 -055 public static final String RECOVERY_RETRY_INTERVAL_MILLIS = -056 "zookeeper.recovery.retry.intervalmill"; -057 -058 private static final int DEFAULT_RECOVERY_RETRY_INTERVAL_MILLIS = 1000; -059 -060 public static final String KEEPALIVE_MILLIS = "zookeeper.keep-alive.time"; -061 -062 private static final int DEFAULT_KEEPALIVE_MILLIS = 6; -063 -064 private static final EnumSetCode FAIL_FAST_CODES = EnumSet.of(Code.NOAUTH, Code.AUTHFAILED); -065 -066 private final String connectString; -067 -068 private final int sessionTimeoutMs; -069 -070 private final int maxRetries; -071 -072 private final int retryIntervalMs; -073 -074 private final int keepAliveTimeMs; -075 -076 private static abstract class Task implements Delayed { -077 -078protected long time = System.nanoTime(); -079 -080public boolean needZk() { -081 return false; -082} -083 -084public void exec(ZooKeeper zk) { -085} -086 -087public void connectFailed(IOException e) { -088} -089 -090public void closed(IOException e) { -091} -092 -093@Override -094public int compareTo(Delayed o) { -095 Task that = (Task) o; -096 int c = Long.compare(time, that.time); -097 if (c != 0) { -098return c; -099 } -100 return Integer.compare(System.identityHashCode(this), System.identityHashCode(that)); -101} -102 -103@Override -104public long getDelay(TimeUnit unit) { -105 return unit.convert(time - System.nanoTime(), TimeUnit.NANOSECONDS); -106} -107 } -108 -109 private static final Task CLOSE = new Task() { -110 }; -111 -112 private final DelayQueueTask tasks = new DelayQueue(); -113 -114 private final AtomicBoolean closed = new AtomicBoolean(false); -115 -116 private ZooKeeper zookeeper; -117 -118 private String getId() { -119return String.format("0x%08x", System.identityHashCode(this)); -120 } -121 -122 public ReadOnlyZKClient(Configuration conf) { -123this.connectString = ZKConfig.getZKQuorumServersString(conf); -124this.sessionTimeoutMs = conf.getInt(ZK_SESSION_TIMEOUT, DEFAULT_ZK_SESSION_TIMEOUT); -125this.maxRetries = conf.getInt(RECOVERY_RETRY, DEFAULT_RECOVERY_RETRY); -126this.retryIntervalMs = -127 conf.getInt(RECOVERY_RETRY_INTERVAL_MILLIS, DEFAULT_RECOVERY_RETRY_INTERVAL_MILLIS); -128this.keepAliveTimeMs = conf.getInt(KEEPALIVE_MILLIS, DEFAULT_KEEPALIVE_MILLIS); -129LOG.info("Start read only zookeeper connection " + getId() + " to " + connectString + -130", session timeout " + sessionTimeoutMs + " ms, retries " + maxRetries + -131", retry interval " + retryIntervalMs + " ms, keep alive " + keepAliveTimeMs + " ms"); -132Thread t = new Thread(this::run, "ReadOnlyZKClient"); -133t.setDaemon(true); -134t.start(); -135 } -136 -137 private abstract class ZKTaskT extends Task { -138 -139protected final String path; -140 -141private final CompletableFutureT future; -142 -143private final String operationType; -144 -145private int retries; -146 -147protected ZKTask(String path, CompletableFutureT future, String operationType) { -148 this.path = path; -149 this.future = future; -150 this.operationType = operationType; -151} -152 -153protected final void onComplete(ZooKeeper zk, int rc, T ret, boolean errorIfNoNode) { -154 tasks.add(new
[50/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/checkstyle-aggregate.html -- diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html index 527ae34..a34f1c8 100644 --- a/checkstyle-aggregate.html +++ b/checkstyle-aggregate.html @@ -7,7 +7,7 @@ - + Apache HBase Checkstyle Results @@ -286,10 +286,10 @@ Warnings Errors -3487 +3499 0 0 -18149 +18148 Files @@ -2039,120 +2039,120 @@ 0 1 +org/apache/hadoop/hbase/client/TestCIBadHostname.java +0 +0 +1 + org/apache/hadoop/hbase/client/TestCheckAndMutate.java 0 0 3 - + org/apache/hadoop/hbase/client/TestClientExponentialBackoff.java 0 0 4 - + org/apache/hadoop/hbase/client/TestClientNoCluster.java 0 0 33 - + org/apache/hadoop/hbase/client/TestClientOperationInterrupt.java 0 0 1 - + org/apache/hadoop/hbase/client/TestClientPushback.java 0 0 9 - + org/apache/hadoop/hbase/client/TestClientScanner.java 0 0 11 - + org/apache/hadoop/hbase/client/TestClientScannerRPCTimeout.java 0 0 2 - + org/apache/hadoop/hbase/client/TestClientTimeouts.java 0 0 4 - + org/apache/hadoop/hbase/client/TestCloneSnapshotFromClient.java 0 0 6 - + org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorBuilder.java 0 0 1 - + org/apache/hadoop/hbase/client/TestConnectionImplementation.java 0 0 -1 - +15 + org/apache/hadoop/hbase/client/TestConnectionUtils.java 0 0 2 - + org/apache/hadoop/hbase/client/TestDropTimeoutRequest.java 0 0 2 - + org/apache/hadoop/hbase/client/TestEnableTable.java 0 0 5 - + org/apache/hadoop/hbase/client/TestFastFail.java 0 0 7 - + org/apache/hadoop/hbase/client/TestFromClientSide.java 0 0 155 - + org/apache/hadoop/hbase/client/TestFromClientSide3.java 0 0 24 - + org/apache/hadoop/hbase/client/TestFromClientSideNoCodec.java 0 0 2 - + org/apache/hadoop/hbase/client/TestFromClientSideScanExcpetion.java 0 0 2 - + org/apache/hadoop/hbase/client/TestFromClientSideWithCoprocessor.java 0 0 1 - + org/apache/hadoop/hbase/client/TestGet.java 0 0 5 - + org/apache/hadoop/hbase/client/TestHBaseAdminNoCluster.java 0 0 11 - -org/apache/hadoop/hbase/client/TestHCM.java -0 -0 -18 org/apache/hadoop/hbase/client/TestHTableMultiplexer.java 0 @@ -11309,40 +11309,50 @@ 0 15 -org/apache/hadoop/hbase/zookeeper/TestMetricsZooKeeperSourceImpl.java +org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java 0 0 1 -org/apache/hadoop/hbase/zookeeper/TestZKLeaderManager.java +org/apache/hadoop/hbase/zookeeper/TestMetricsZooKeeperSourceImpl.java 0 0 1 +org/apache/hadoop/hbase/zookeeper/TestZKLeaderManager.java +0 +0 +1 + org/apache/hadoop/hbase/zookeeper/ZKConfig.java 0 0 3 - + org/apache/hadoop/hbase/zookeeper/ZKMainServer.java 0 0 3 - + org/apache/hadoop/hbase/zookeeper/ZKMetadata.java 0 0 1 - + org/apache/hadoop/hbase/zookeeper/ZKWatcher.java 0 0 5 - + org/apache/hadoop/hbase/zookeeper/ZNodePaths.java 0 0 26 + +org/apache/hadoop/hbase/zookeeper/ZooKeeperHelper.java +0 +0 +1 org/apache/hadoop/metrics2/impl/JmxCacheBuster.java 0 @@ -11505,7 +11515,7 @@ caseIndent: 2 basicOffset: 2 lineWrappingIndentation: 2 -5335 +5336 Error javadoc @@ -11535,7 +11545,7 @@ max: 100 ignorePattern: ^package.*|^import.*|a href|href|http://|https://|ftp://|org.apache.thrift.|com.google.protobuf.|hbase.protobuf.generated -1710 +1708 Error @@ -33718,6 +33728,21 @@ Line is longer than 100 characters (found 103). 53 +org/apache/hadoop/hbase/client/TestCIBadHostname.java + + +Severity +Category +Rule +Message +Line + +Error +imports +ImportOrder +Wrong order for 'java.net.UnknownHostException' import. +32 + org/apache/hadoop/hbase/client/TestCheckAndMutate.java @@ -34262,10 +34287,94 @@ Line Error -imports -ImportOrder -Wrong order for 'java.net.UnknownHostException' import. -33 +blocks +NeedBraces +'if' construct must use '{}'s. +315 + +Error +javadoc +NonEmptyAtclauseDescription +At-clause should have a non-empty description. +427 + +Error +javadoc +NonEmptyAtclauseDescription +At-clause should have a non-empty description. +524 + +Error +sizes +MethodLength +Method length is 189 lines (max allowed is 150). +526 + +Error +whitespace +ParenPad +'(' is followed by whitespace. +591 + +Error +blocks +EmptyBlock +Must have at least one statement. +698 + +Error +javadoc +NonEmptyAtclauseDescription +At-clause should have a non-empty description. +719 + +Error +indentation +Indentation +'for' have incorrect indentation level 7, expected level should be 6. +898 + +Error +indentation +Indentation +'if' have incorrect indentation level 9, expected level should be 8. +899 + +Error +sizes +LineLength +Line is longer than 100 characters (found 101). +899 + +Error +indentation +Indentation +'if' child have incorrect indentation level 11, expected level should be 10. +901 + +Error +indentation +Indentation +'if' child have incorrect
[21/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/org/apache/hadoop/hbase/client/TestCIBadHostname.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/client/TestCIBadHostname.html b/testdevapidocs/org/apache/hadoop/hbase/client/TestCIBadHostname.html new file mode 100644 index 000..866db4a --- /dev/null +++ b/testdevapidocs/org/apache/hadoop/hbase/client/TestCIBadHostname.html @@ -0,0 +1,382 @@ +http://www.w3.org/TR/html4/loose.dtd;> + + + + + +TestCIBadHostname (Apache HBase 3.0.0-SNAPSHOT Test API) + + + + + +var methods = {"i0":9,"i1":9,"i2":10,"i3":10}; +var tabs = {65535:["t0","All Methods"],1:["t1","Static Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; +var altColor = "altColor"; +var rowColor = "rowColor"; +var tableTab = "tableTab"; +var activeTableTab = "activeTableTab"; + + +JavaScript is disabled on your browser. + + + + + +Skip navigation links + + + + +Overview +Package +Class +Use +Tree +Deprecated +Index +Help + + + + +PrevClass +NextClass + + +Frames +NoFrames + + +AllClasses + + + + + + + +Summary: +Nested| +Field| +Constr| +Method + + +Detail: +Field| +Constr| +Method + + + + + + + + +org.apache.hadoop.hbase.client +Class TestCIBadHostname + + + +http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">java.lang.Object + + +org.apache.hadoop.hbase.client.TestCIBadHostname + + + + + + + + +public class TestCIBadHostname +extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object +Tests that we fail fast when hostname resolution is not working and do not cache + unresolved InetSocketAddresses. + + + + + + + + + + + +Field Summary + +Fields + +Modifier and Type +Field and Description + + +private static org.apache.hadoop.hbase.client.ConnectionImplementation +conn + + +private static HBaseTestingUtility +testUtil + + + + + + + + + +Constructor Summary + +Constructors + +Constructor and Description + + +TestCIBadHostname() + + + + + + + + + +Method Summary + +All MethodsStatic MethodsInstance MethodsConcrete Methods + +Modifier and Type +Method and Description + + +static void +setupBeforeClass() + + +static void +teardownAfterClass() + + +void +testGetAdminBadHostname() + + +void +testGetClientBadHostname() + + + + + + +Methods inherited from classjava.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object +http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--; title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-; title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--; title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--; title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--; title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--; title="class or interface in java.lang">notify, http://docs.oracle.com/javase/8/docs/api/java/lang /Object.html?is-external=true#notifyAll--" title="class or interface in java.lang">notifyAll, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--; title="class or interface in java.lang">toString, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--; title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-; title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-int-; title="class or interface in java.lang">wait + + + + + + + + + + + + + + +Field Detail + + + + + +testUtil +private staticHBaseTestingUtility testUtil + + + + + + + +conn +private staticorg.apache.hadoop.hbase.client.ConnectionImplementation conn + + + + + + + + + +Constructor Detail + + + + + +TestCIBadHostname +publicTestCIBadHostname() + + + + + + + + + +Method Detail + + + + + +setupBeforeClass +public
[36/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html index 163ade0..802fc2f 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html @@ -254,3512 +254,3505 @@ 246 protected MemStoreFlusher cacheFlusher; 247 248 protected HeapMemoryManager hMemManager; -249 protected CountDownLatch initLatch = null; -250 -251 /** -252 * Cluster connection to be shared by services. -253 * Initialized at server startup and closed when server shuts down. -254 * Clients must never close it explicitly. -255 */ -256 protected ClusterConnection clusterConnection; -257 -258 /* -259 * Long-living meta table locator, which is created when the server is started and stopped -260 * when server shuts down. References to this locator shall be used to perform according -261 * operations in EventHandlers. Primary reason for this decision is to make it mockable -262 * for tests. -263 */ -264 protected MetaTableLocator metaTableLocator; -265 -266 /** -267 * Go here to get table descriptors. -268 */ -269 protected TableDescriptors tableDescriptors; -270 -271 // Replication services. If no replication, this handler will be null. -272 protected ReplicationSourceService replicationSourceHandler; -273 protected ReplicationSinkService replicationSinkHandler; -274 -275 // Compactions -276 public CompactSplit compactSplitThread; -277 -278 /** -279 * Map of regions currently being served by this region server. Key is the -280 * encoded region name. All access should be synchronized. -281 */ -282 protected final MapString, HRegion onlineRegions = new ConcurrentHashMap(); -283 -284 /** -285 * Map of encoded region names to the DataNode locations they should be hosted on -286 * We store the value as InetSocketAddress since this is used only in HDFS -287 * API (create() that takes favored nodes as hints for placing file blocks). -288 * We could have used ServerName here as the value class, but we'd need to -289 * convert it to InetSocketAddress at some point before the HDFS API call, and -290 * it seems a bit weird to store ServerName since ServerName refers to RegionServers -291 * and here we really mean DataNode locations. -292 */ -293 protected final MapString, InetSocketAddress[] regionFavoredNodesMap = -294 new ConcurrentHashMap(); -295 -296 // Leases -297 protected Leases leases; -298 -299 // Instance of the hbase executor executorService. -300 protected ExecutorService executorService; -301 -302 // If false, the file system has become unavailable -303 protected volatile boolean fsOk; -304 protected HFileSystem fs; -305 protected HFileSystem walFs; -306 -307 // Set when a report to the master comes back with a message asking us to -308 // shutdown. Also set by call to stop when debugging or running unit tests -309 // of HRegionServer in isolation. -310 private volatile boolean stopped = false; -311 -312 // Go down hard. Used if file system becomes unavailable and also in -313 // debugging and unit tests. -314 private volatile boolean abortRequested; -315 -316 ConcurrentMapString, Integer rowlocks = new ConcurrentHashMap(); -317 -318 // A state before we go into stopped state. At this stage we're closing user -319 // space regions. -320 private boolean stopping = false; -321 -322 volatile boolean killed = false; -323 -324 protected final Configuration conf; -325 -326 private Path rootDir; -327 private Path walRootDir; -328 -329 protected final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); -330 -331 final int numRetries; -332 protected final int threadWakeFrequency; -333 protected final int msgInterval; -334 -335 protected final int numRegionsToReport; -336 -337 // Stub to do region server status calls against the master. -338 private volatile RegionServerStatusService.BlockingInterface rssStub; -339 private volatile LockService.BlockingInterface lockStub; -340 // RPC client. Used to make the stub above that does region server status checking. -341 RpcClient rpcClient; -342 -343 private RpcRetryingCallerFactory rpcRetryingCallerFactory; -344 private RpcControllerFactory rpcControllerFactory; -345 -346 private UncaughtExceptionHandler uncaughtExceptionHandler; -347 -348 // Info server. Default access so can be used by unit tests. REGIONSERVER -349 // is name of the webapp and the attribute name used stuffing this instance -350 // into web context. -351
[40/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html index fa489c5..1fc3ca7 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html @@ -47,3488 +47,3492 @@ 039import java.util.Map.Entry; 040import java.util.Objects; 041import java.util.Set; -042import java.util.concurrent.CountDownLatch; -043import java.util.concurrent.ExecutionException; -044import java.util.concurrent.Future; -045import java.util.concurrent.TimeUnit; -046import java.util.concurrent.TimeoutException; -047import java.util.concurrent.atomic.AtomicInteger; -048import java.util.concurrent.atomic.AtomicReference; -049import java.util.function.Function; -050import java.util.regex.Pattern; -051import java.util.stream.Collectors; -052import javax.servlet.ServletException; -053import javax.servlet.http.HttpServlet; -054import javax.servlet.http.HttpServletRequest; -055import javax.servlet.http.HttpServletResponse; -056import org.apache.hadoop.conf.Configuration; -057import org.apache.hadoop.fs.Path; -058import org.apache.hadoop.hbase.ClusterMetrics; -059import org.apache.hadoop.hbase.ClusterMetrics.Option; -060import org.apache.hadoop.hbase.ClusterMetricsBuilder; -061import org.apache.hadoop.hbase.CoordinatedStateException; -062import org.apache.hadoop.hbase.DoNotRetryIOException; -063import org.apache.hadoop.hbase.HBaseIOException; -064import org.apache.hadoop.hbase.HBaseInterfaceAudience; -065import org.apache.hadoop.hbase.HConstants; -066import org.apache.hadoop.hbase.InvalidFamilyOperationException; -067import org.apache.hadoop.hbase.MasterNotRunningException; -068import org.apache.hadoop.hbase.MetaTableAccessor; -069import org.apache.hadoop.hbase.NamespaceDescriptor; -070import org.apache.hadoop.hbase.PleaseHoldException; -071import org.apache.hadoop.hbase.ReplicationPeerNotFoundException; -072import org.apache.hadoop.hbase.ServerLoad; -073import org.apache.hadoop.hbase.ServerMetricsBuilder; -074import org.apache.hadoop.hbase.ServerName; -075import org.apache.hadoop.hbase.TableDescriptors; -076import org.apache.hadoop.hbase.TableName; -077import org.apache.hadoop.hbase.TableNotDisabledException; -078import org.apache.hadoop.hbase.TableNotFoundException; -079import org.apache.hadoop.hbase.UnknownRegionException; -080import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; -081import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; -082import org.apache.hadoop.hbase.client.MasterSwitchType; -083import org.apache.hadoop.hbase.client.RegionInfo; -084import org.apache.hadoop.hbase.client.Result; -085import org.apache.hadoop.hbase.client.TableDescriptor; -086import org.apache.hadoop.hbase.client.TableDescriptorBuilder; -087import org.apache.hadoop.hbase.client.TableState; -088import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; -089import org.apache.hadoop.hbase.exceptions.DeserializationException; -090import org.apache.hadoop.hbase.exceptions.MergeRegionException; -091import org.apache.hadoop.hbase.executor.ExecutorType; -092import org.apache.hadoop.hbase.favored.FavoredNodesManager; -093import org.apache.hadoop.hbase.favored.FavoredNodesPromoter; -094import org.apache.hadoop.hbase.http.InfoServer; -095import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; -096import org.apache.hadoop.hbase.ipc.RpcServer; -097import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; -098import org.apache.hadoop.hbase.log.HBaseMarkers; -099import org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode; -100import org.apache.hadoop.hbase.master.assignment.AssignmentManager; -101import org.apache.hadoop.hbase.master.assignment.MergeTableRegionsProcedure; -102import org.apache.hadoop.hbase.master.assignment.RegionStates; -103import org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode; -104import org.apache.hadoop.hbase.master.balancer.BalancerChore; -105import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer; -106import org.apache.hadoop.hbase.master.balancer.ClusterStatusChore; -107import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory; -108import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; -109import org.apache.hadoop.hbase.master.cleaner.LogCleaner; -110import org.apache.hadoop.hbase.master.cleaner.ReplicationMetaCleaner; -111import org.apache.hadoop.hbase.master.locking.LockManager; -112import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan; -113import
[18/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/org/apache/hadoop/hbase/client/TestConnectionImplementation.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/client/TestConnectionImplementation.html b/testdevapidocs/org/apache/hadoop/hbase/client/TestConnectionImplementation.html index d79305e..197a966 100644 --- a/testdevapidocs/org/apache/hadoop/hbase/client/TestConnectionImplementation.html +++ b/testdevapidocs/org/apache/hadoop/hbase/client/TestConnectionImplementation.html @@ -18,7 +18,7 @@ catch(err) { } //--> -var methods = {"i0":9,"i1":9,"i2":10,"i3":10}; +var methods = {"i0":9,"i1":9,"i2":10,"i3":9,"i4":9,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10}; var tabs = {65535:["t0","All Methods"],1:["t1","Static Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -50,7 +50,7 @@ var activeTableTab = "activeTableTab"; PrevClass -NextClass +NextClass Frames @@ -74,7 +74,7 @@ var activeTableTab = "activeTableTab"; Summary: -Nested| +Nested| Field| Constr| Method @@ -109,16 +109,34 @@ var activeTableTab = "activeTableTab"; -public class TestConnectionImplementation +public class TestConnectionImplementation extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object -Tests that we fail fast when hostname resolution is not working and do not cache - unresolved InetSocketAddresses. +This class is for testing HBaseConnectionManager features + + + + + +Nested Class Summary + +Nested Classes + +Modifier and Type +Class and Description + + +static class +TestConnectionImplementation.BlockingFilter + + + + @@ -132,12 +150,56 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? Field and Description -private static org.apache.hadoop.hbase.client.ConnectionImplementation -conn +private static byte[] +FAM_NAM + + +private static org.slf4j.Logger +LOG + + +org.junit.rules.TestName +name + + +private static byte[] +ROW + + +private static byte[] +ROW_X + + +private static int +RPC_RETRY + + +protected static http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicBoolean.html?is-external=true; title="class or interface in java.util.concurrent.atomic">AtomicBoolean +syncBlockingFilter + + +private static org.apache.hadoop.hbase.TableName +TABLE_NAME + + +private static org.apache.hadoop.hbase.TableName +TABLE_NAME1 + + +private static org.apache.hadoop.hbase.TableName +TABLE_NAME2 + + +private static org.apache.hadoop.hbase.TableName +TABLE_NAME3 private static HBaseTestingUtility -testUtil +TEST_UTIL + + +org.junit.rules.TestRule +timeout @@ -172,20 +234,118 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? Method and Description -static void -setupBeforeClass() +private static void +assertEqualsWithJitter(longexpected, + longactual) -static void -teardownAfterClass() +private static void +assertEqualsWithJitter(longexpected, + longactual, + longjitterBase) -void -testGetAdminBadHostname() +private int +setNumTries(org.apache.hadoop.hbase.client.ConnectionImplementationhci, + intnewVal) +static void +setUpBeforeClass() + + +static void +tearDownAfterClass() + + +void +testAdminFactory() +Naive test to check that Connection#getAdmin returns a properly constructed HBaseAdmin object + + + +void +testCacheSeqNums() +Test that stale cache updates don't override newer cached values. + + + +void +testClosing() + + +void +testClusterConnection() + + +void +testClusterStatus() + + +void +testConnection() +This test checks that one can connect to the cluster with only the + ZooKeeper quorum set. + + + +private void +testConnectionClose(booleanallowsInterrupt) + + +void +testConnectionCloseAllowsInterrupt() +Test that we can handle connection close: it will trigger a retry, but the calls will finish. + + + +void +testConnectionCut() +Test that the connection to the dead server is cut immediately when we receive the + notification. + + + +void +testConnectionIdle() +Test that connection can become idle without breaking everything. + + + void -testGetClientBadHostname() +testConnectionManagement() +Test that Connection or Pool are not closed when managed externally + + + +void +testConnectionNotAllowsInterrupt() + + +void +testConnectionRideOverClusterRestart() + + +void +testCreateConnection() +Trivial test to verify that nobody messes with + ConnectionFactory.createConnection(Configuration) + + + +void +testErrorBackoffTimeCalculation() + + +void +testMulti() + + +void +testRegionCaching() +Test that when we delete a location
[07/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/src-html/org/apache/hadoop/hbase/TestCellUtil.TestCellScanner.html -- diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestCellUtil.TestCellScanner.html b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestCellUtil.TestCellScanner.html index 232ef56..bc3a6d0 100644 --- a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestCellUtil.TestCellScanner.html +++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestCellUtil.TestCellScanner.html @@ -29,610 +29,626 @@ 021import static org.junit.Assert.assertEquals; 022import static org.junit.Assert.assertFalse; 023import static org.junit.Assert.assertTrue; -024 -025import java.io.ByteArrayOutputStream; -026import java.io.IOException; -027import java.math.BigDecimal; -028import java.nio.ByteBuffer; -029import java.util.ArrayList; -030import java.util.List; -031import java.util.NavigableMap; -032import java.util.TreeMap; -033import org.apache.hadoop.hbase.testclassification.MiscTests; -034import org.apache.hadoop.hbase.testclassification.SmallTests; -035import org.apache.hadoop.hbase.util.Bytes; -036import org.junit.Assert; -037import org.junit.Test; -038import org.junit.experimental.categories.Category; -039 -040@Category({MiscTests.class, SmallTests.class}) -041public class TestCellUtil { -042 /** -043 * CellScannable used in test. Returns a {@link TestCellScanner} -044 */ -045 private static class TestCellScannable implements CellScannable { -046private final int cellsCount; -047TestCellScannable(final int cellsCount) { -048 this.cellsCount = cellsCount; -049} -050@Override -051public CellScanner cellScanner() { -052 return new TestCellScanner(this.cellsCount); -053} -054 } -055 -056 /** -057 * CellScanner used in test. -058 */ -059 private static class TestCellScanner implements CellScanner { -060private int count = 0; -061private Cell current = null; -062private final int cellsCount; -063 -064TestCellScanner(final int cellsCount) { -065 this.cellsCount = cellsCount; -066} -067 -068@Override -069public Cell current() { -070 return this.current; -071} -072 -073@Override -074public boolean advance() throws IOException { -075 if (this.count cellsCount) { -076this.current = new TestCell(this.count); -077this.count++; -078return true; -079 } -080 return false; -081} -082 } -083 -084 /** -085 * Cell used in test. Has row only. -086 */ -087 private static class TestCell implements Cell { -088private final byte [] row; -089 -090TestCell(final int i) { -091 this.row = Bytes.toBytes(i); -092} -093 -094@Override -095public byte[] getRowArray() { -096 return this.row; -097} -098 -099@Override -100public int getRowOffset() { -101 return 0; -102} -103 -104@Override -105public short getRowLength() { -106 return (short)this.row.length; -107} -108 -109@Override -110public byte[] getFamilyArray() { -111 // TODO Auto-generated method stub -112 return null; -113} -114 -115@Override -116public int getFamilyOffset() { -117 // TODO Auto-generated method stub -118 return 0; -119} -120 -121@Override -122public byte getFamilyLength() { -123 // TODO Auto-generated method stub -124 return 0; -125} -126 -127@Override -128public byte[] getQualifierArray() { -129 // TODO Auto-generated method stub -130 return null; -131} -132 -133@Override -134public int getQualifierOffset() { -135 // TODO Auto-generated method stub -136 return 0; -137} -138 -139@Override -140public int getQualifierLength() { -141 // TODO Auto-generated method stub -142 return 0; -143} -144 -145@Override -146public long getTimestamp() { -147 // TODO Auto-generated method stub -148 return 0; -149} -150 -151@Override -152public byte getTypeByte() { -153 // TODO Auto-generated method stub -154 return 0; -155} -156 -157@Override -158public byte[] getValueArray() { -159 // TODO Auto-generated method stub -160 return null; -161} -162 -163@Override -164public int getValueOffset() { -165 // TODO Auto-generated method stub -166 return 0; -167} -168 -169@Override -170public int getValueLength() { -171 // TODO Auto-generated method stub -172 return 0; -173} -174 -175@Override -176public byte[] getTagsArray() { -177 // TODO Auto-generated method stub -178 return null; -179} -180 -181@Override -182public int getTagsOffset() { -183 // TODO Auto-generated method stub -184 return 0; -185} -186 -187@Override -188public long getSequenceId() { -189
[41/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/devapidocs/org/apache/hadoop/hbase/zookeeper/ZooKeeperHelper.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/zookeeper/ZooKeeperHelper.html b/devapidocs/org/apache/hadoop/hbase/zookeeper/ZooKeeperHelper.html new file mode 100644 index 000..6d0efdd --- /dev/null +++ b/devapidocs/org/apache/hadoop/hbase/zookeeper/ZooKeeperHelper.html @@ -0,0 +1,314 @@ +http://www.w3.org/TR/html4/loose.dtd;> + + + + + +ZooKeeperHelper (Apache HBase 3.0.0-SNAPSHOT API) + + + + + +var methods = {"i0":9,"i1":9}; +var tabs = {65535:["t0","All Methods"],1:["t1","Static Methods"],8:["t4","Concrete Methods"]}; +var altColor = "altColor"; +var rowColor = "rowColor"; +var tableTab = "tableTab"; +var activeTableTab = "activeTableTab"; + + +JavaScript is disabled on your browser. + + + + + +Skip navigation links + + + + +Overview +Package +Class +Use +Tree +Deprecated +Index +Help + + + + +PrevClass +NextClass + + +Frames +NoFrames + + +AllClasses + + + + + + + +Summary: +Nested| +Field| +Constr| +Method + + +Detail: +Field| +Constr| +Method + + + + + + + + +org.apache.hadoop.hbase.zookeeper +Class ZooKeeperHelper + + + +http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">java.lang.Object + + +org.apache.hadoop.hbase.zookeeper.ZooKeeperHelper + + + + + + + + +@InterfaceAudience.Private +public final class ZooKeeperHelper +extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object +Methods that help working with ZooKeeper + + + + + + + + + + + +Constructor Summary + +Constructors + +Modifier +Constructor and Description + + +private +ZooKeeperHelper() + + + + + + + + + +Method Summary + +All MethodsStatic MethodsConcrete Methods + +Modifier and Type +Method and Description + + +static org.apache.zookeeper.ZooKeeper +ensureConnectedZooKeeper(org.apache.zookeeper.ZooKeeperzookeeper, +inttimeout) +Ensure passed zookeeper is connected. + + + +static org.apache.zookeeper.ZooKeeper +getConnectedZooKeeper(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringconnectString, + intsessionTimeoutMs) +Get a ZooKeeper instance and wait until it connected before returning. + + + + + + + +Methods inherited from classjava.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object +http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--; title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-; title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--; title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--; title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--; title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--; title="class or interface in java.lang">notify, http://docs.oracle.com/javase/8/docs/api/java/lang /Object.html?is-external=true#notifyAll--" title="class or interface in java.lang">notifyAll, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--; title="class or interface in java.lang">toString, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--; title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-; title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-int-; title="class or interface in java.lang">wait + + + + + + + + + + + + + + +Constructor Detail + + + + + +ZooKeeperHelper +privateZooKeeperHelper() + + + + + + + + + +Method Detail + + + + + +getConnectedZooKeeper +public staticorg.apache.zookeeper.ZooKeepergetConnectedZooKeeper(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface
[09/51] [partial] hbase-site git commit: Published site at .
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/src-html/org/apache/hadoop/hbase/TestCellUtil.TestCell.html -- diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestCellUtil.TestCell.html b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestCellUtil.TestCell.html index 232ef56..bc3a6d0 100644 --- a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestCellUtil.TestCell.html +++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestCellUtil.TestCell.html @@ -29,610 +29,626 @@ 021import static org.junit.Assert.assertEquals; 022import static org.junit.Assert.assertFalse; 023import static org.junit.Assert.assertTrue; -024 -025import java.io.ByteArrayOutputStream; -026import java.io.IOException; -027import java.math.BigDecimal; -028import java.nio.ByteBuffer; -029import java.util.ArrayList; -030import java.util.List; -031import java.util.NavigableMap; -032import java.util.TreeMap; -033import org.apache.hadoop.hbase.testclassification.MiscTests; -034import org.apache.hadoop.hbase.testclassification.SmallTests; -035import org.apache.hadoop.hbase.util.Bytes; -036import org.junit.Assert; -037import org.junit.Test; -038import org.junit.experimental.categories.Category; -039 -040@Category({MiscTests.class, SmallTests.class}) -041public class TestCellUtil { -042 /** -043 * CellScannable used in test. Returns a {@link TestCellScanner} -044 */ -045 private static class TestCellScannable implements CellScannable { -046private final int cellsCount; -047TestCellScannable(final int cellsCount) { -048 this.cellsCount = cellsCount; -049} -050@Override -051public CellScanner cellScanner() { -052 return new TestCellScanner(this.cellsCount); -053} -054 } -055 -056 /** -057 * CellScanner used in test. -058 */ -059 private static class TestCellScanner implements CellScanner { -060private int count = 0; -061private Cell current = null; -062private final int cellsCount; -063 -064TestCellScanner(final int cellsCount) { -065 this.cellsCount = cellsCount; -066} -067 -068@Override -069public Cell current() { -070 return this.current; -071} -072 -073@Override -074public boolean advance() throws IOException { -075 if (this.count cellsCount) { -076this.current = new TestCell(this.count); -077this.count++; -078return true; -079 } -080 return false; -081} -082 } -083 -084 /** -085 * Cell used in test. Has row only. -086 */ -087 private static class TestCell implements Cell { -088private final byte [] row; -089 -090TestCell(final int i) { -091 this.row = Bytes.toBytes(i); -092} -093 -094@Override -095public byte[] getRowArray() { -096 return this.row; -097} -098 -099@Override -100public int getRowOffset() { -101 return 0; -102} -103 -104@Override -105public short getRowLength() { -106 return (short)this.row.length; -107} -108 -109@Override -110public byte[] getFamilyArray() { -111 // TODO Auto-generated method stub -112 return null; -113} -114 -115@Override -116public int getFamilyOffset() { -117 // TODO Auto-generated method stub -118 return 0; -119} -120 -121@Override -122public byte getFamilyLength() { -123 // TODO Auto-generated method stub -124 return 0; -125} -126 -127@Override -128public byte[] getQualifierArray() { -129 // TODO Auto-generated method stub -130 return null; -131} -132 -133@Override -134public int getQualifierOffset() { -135 // TODO Auto-generated method stub -136 return 0; -137} -138 -139@Override -140public int getQualifierLength() { -141 // TODO Auto-generated method stub -142 return 0; -143} -144 -145@Override -146public long getTimestamp() { -147 // TODO Auto-generated method stub -148 return 0; -149} -150 -151@Override -152public byte getTypeByte() { -153 // TODO Auto-generated method stub -154 return 0; -155} -156 -157@Override -158public byte[] getValueArray() { -159 // TODO Auto-generated method stub -160 return null; -161} -162 -163@Override -164public int getValueOffset() { -165 // TODO Auto-generated method stub -166 return 0; -167} -168 -169@Override -170public int getValueLength() { -171 // TODO Auto-generated method stub -172 return 0; -173} -174 -175@Override -176public byte[] getTagsArray() { -177 // TODO Auto-generated method stub -178 return null; -179} -180 -181@Override -182public int getTagsOffset() { -183 // TODO Auto-generated method stub -184 return 0; -185} -186 -187@Override -188public long getSequenceId() { -189 // TODO Auto-generated method
[20/37] hbase git commit: HBASE-19642 Fix locking for peer modification procedure
HBASE-19642 Fix locking for peer modification procedure Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/fe03c7c0 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/fe03c7c0 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/fe03c7c0 Branch: refs/heads/HBASE-19397-branch-2 Commit: fe03c7c0720189c1dbdec376ae79e35198d0bb38 Parents: e9ca451 Author: zhangduoAuthored: Wed Dec 27 18:27:13 2017 +0800 Committer: zhangduo Committed: Fri Jan 12 21:46:18 2018 +0800 -- .../procedure/MasterProcedureScheduler.java | 14 + .../master/replication/ModifyPeerProcedure.java | 21 +--- 2 files changed, 32 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/fe03c7c0/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java index a25217c..4ecb3b1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java @@ -610,6 +610,20 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler { public boolean requireExclusiveLock(Procedure proc) { return requirePeerExclusiveLock((PeerProcedureInterface) proc); } + +@Override +public boolean isAvailable() { + if (isEmpty()) { +return false; + } + if (getLockStatus().hasExclusiveLock()) { +// if we have an exclusive lock already taken +// only child of the lock owner can be executed +Procedure nextProc = peek(); +return nextProc != null && getLockStatus().hasLockAccess(nextProc); + } + return true; +} } // http://git-wip-us.apache.org/repos/asf/hbase/blob/fe03c7c0/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java index 279fbc7..a682606 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java @@ -46,6 +46,8 @@ public abstract class ModifyPeerProcedure protected String peerId; + private volatile boolean locked; + // used to keep compatible with old client where we can only returns after updateStorage. protected ProcedurePrepareLatch latch; @@ -145,17 +147,30 @@ public abstract class ModifyPeerProcedure @Override protected LockState acquireLock(MasterProcedureEnv env) { -return env.getProcedureScheduler().waitPeerExclusiveLock(this, peerId) - ? LockState.LOCK_EVENT_WAIT - : LockState.LOCK_ACQUIRED; +if (env.getProcedureScheduler().waitPeerExclusiveLock(this, peerId)) { + return LockState.LOCK_EVENT_WAIT; +} +locked = true; +return LockState.LOCK_ACQUIRED; } @Override protected void releaseLock(MasterProcedureEnv env) { +locked = false; env.getProcedureScheduler().wakePeerExclusiveLock(this, peerId); } @Override + protected boolean holdLock(MasterProcedureEnv env) { +return true; + } + + @Override + protected boolean hasLock(MasterProcedureEnv env) { +return locked; + } + + @Override protected void rollbackState(MasterProcedureEnv env, PeerModificationState state) throws IOException, InterruptedException { if (state == PeerModificationState.PRE_PEER_MODIFICATION) {
[16/37] hbase git commit: HBASE-19573 Rewrite ReplicationPeer with the new replication storage interface
HBASE-19573 Rewrite ReplicationPeer with the new replication storage interface Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/659204ad Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/659204ad Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/659204ad Branch: refs/heads/HBASE-19397-branch-2 Commit: 659204ad4c58b4c77a14734ff720edc160ef3d9d Parents: a412814 Author: Guanghao ZhangAuthored: Tue Dec 26 11:39:34 2017 +0800 Committer: zhangduo Committed: Fri Jan 12 21:46:18 2018 +0800 -- .../replication/VerifyReplication.java | 5 - .../hbase/replication/ReplicationPeer.java | 42 ++-- .../hbase/replication/ReplicationPeerImpl.java | 169 ++ .../replication/ReplicationPeerZKImpl.java | 233 --- .../hbase/replication/ReplicationPeers.java | 4 +- .../replication/ReplicationPeersZKImpl.java | 23 +- .../replication/TestReplicationStateBasic.java | 7 +- .../regionserver/PeerProcedureHandlerImpl.java | 29 +-- 8 files changed, 216 insertions(+), 296 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/659204ad/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java -- diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java index 9065f4e..09d4b4b 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java @@ -51,7 +51,6 @@ import org.apache.hadoop.hbase.mapreduce.TableSplit; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationFactory; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; -import org.apache.hadoop.hbase.replication.ReplicationPeerZKImpl; import org.apache.hadoop.hbase.replication.ReplicationPeers; import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; import org.apache.hadoop.hbase.util.Bytes; @@ -333,7 +332,6 @@ public class VerifyReplication extends Configured implements Tool { private static Pair getPeerQuorumConfig( final Configuration conf, String peerId) throws IOException { ZKWatcher localZKW = null; -ReplicationPeerZKImpl peer = null; try { localZKW = new ZKWatcher(conf, "VerifyReplication", new Abortable() { @@ -354,9 +352,6 @@ public class VerifyReplication extends Configured implements Tool { throw new IOException( "An error occurred while trying to connect to the remove peer cluster", e); } finally { - if (peer != null) { -peer.close(); - } if (localZKW != null) { localZKW.close(); } http://git-wip-us.apache.org/repos/asf/hbase/blob/659204ad/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java -- diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java index b66d76d..4846018 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java @@ -26,7 +26,6 @@ import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; - /** * ReplicationPeer manages enabled / disabled state for the peer. */ @@ -49,65 +48,52 @@ public interface ReplicationPeer { String getId(); /** - * Get the peer config object - * @return the ReplicationPeerConfig for this peer - */ - public ReplicationPeerConfig getPeerConfig(); - - /** - * Get the peer config object. if loadFromBackingStore is true, it will load from backing store - * directly and update its load peer config. otherwise, just return the local cached peer config. - * @return the ReplicationPeerConfig for this peer - */ - public ReplicationPeerConfig getPeerConfig(boolean loadFromBackingStore) - throws ReplicationException; - - /** * Returns the state of the peer by reading local cache. * @return the enabled state */ PeerState getPeerState(); /** - * Returns the state of peer, if loadFromBackingStore is true, it will load from backing
[08/37] hbase git commit: HBASE-19525 RS side changes for moving peer modification from zk watcher to procedure
HBASE-19525 RS side changes for moving peer modification from zk watcher to procedure Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c6564a23 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c6564a23 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c6564a23 Branch: refs/heads/HBASE-19397-branch-2 Commit: c6564a23595bc11b1138bdd7ec5403f2b1d17ac8 Parents: c7127cb Author: huzhengAuthored: Wed Dec 20 10:47:18 2017 +0800 Committer: zhangduo Committed: Fri Jan 12 21:42:57 2018 +0800 -- .../hadoop/hbase/protobuf/ProtobufUtil.java | 11 +- .../hbase/shaded/protobuf/ProtobufUtil.java | 13 +- .../hbase/replication/ReplicationListener.java | 14 -- .../hbase/replication/ReplicationPeer.java | 28 ++- .../replication/ReplicationPeerZKImpl.java | 186 --- .../replication/ReplicationPeersZKImpl.java | 19 +- .../replication/ReplicationTrackerZKImpl.java | 73 +- .../regionserver/ReplicationSourceService.java | 9 +- .../handler/RSProcedureHandler.java | 3 + .../replication/BaseReplicationEndpoint.java| 2 +- .../regionserver/PeerProcedureHandler.java | 38 .../regionserver/PeerProcedureHandlerImpl.java | 81 +++ .../regionserver/RefreshPeerCallable.java | 39 +++- .../replication/regionserver/Replication.java | 10 + .../regionserver/ReplicationSource.java | 9 +- .../regionserver/ReplicationSourceManager.java | 37 ++- .../TestReplicationAdminUsingProcedure.java | 226 +++ .../replication/DummyModifyPeerProcedure.java | 48 .../TestDummyModifyPeerProcedure.java | 80 --- .../TestReplicationTrackerZKImpl.java | 51 - .../TestReplicationSourceManager.java | 32 ++- 21 files changed, 535 insertions(+), 474 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/c6564a23/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java -- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index 7b63cd6..7d83687 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase.protobuf; +import static org.apache.hadoop.hbase.protobuf.ProtobufMagic.PB_MAGIC; + import com.google.protobuf.ByteString; import com.google.protobuf.CodedInputStream; import com.google.protobuf.InvalidProtocolBufferException; @@ -200,7 +202,7 @@ public final class ProtobufUtil { * byte array that is bytes.length plus {@link ProtobufMagic#PB_MAGIC}.length. */ public static byte [] prependPBMagic(final byte [] bytes) { -return Bytes.add(ProtobufMagic.PB_MAGIC, bytes); +return Bytes.add(PB_MAGIC, bytes); } /** @@ -225,10 +227,11 @@ public final class ProtobufUtil { * @param bytes bytes to check * @throws DeserializationException if we are missing the pb magic prefix */ - public static void expectPBMagicPrefix(final byte [] bytes) throws DeserializationException { + public static void expectPBMagicPrefix(final byte[] bytes) throws DeserializationException { if (!isPBMagicPrefix(bytes)) { - throw new DeserializationException("Missing pb magic " + - Bytes.toString(ProtobufMagic.PB_MAGIC) + " prefix"); + String bytesPrefix = bytes == null ? "null" : Bytes.toStringBinary(bytes, 0, PB_MAGIC.length); + throw new DeserializationException( + "Missing pb magic " + Bytes.toString(PB_MAGIC) + " prefix, bytes: " + bytesPrefix); } } http://git-wip-us.apache.org/repos/asf/hbase/blob/c6564a23/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java -- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index b26802f..5e6b3db 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase.shaded.protobuf; +import static org.apache.hadoop.hbase.protobuf.ProtobufMagic.PB_MAGIC; + import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; @@ -280,7 +282,7 @@ public final class ProtobufUtil { * byte array
[06/37] hbase git commit: HBASE-19536 Client side changes for moving peer modification from zk watcher to procedure
HBASE-19536 Client side changes for moving peer modification from zk watcher to procedure Signed-off-by: zhangduoProject: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/df411c4e Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/df411c4e Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/df411c4e Branch: refs/heads/HBASE-19397-branch-2 Commit: df411c4eb3962a665806e1d3209ec2264bf10963 Parents: 8c9bf6f Author: Guanghao Zhang Authored: Tue Dec 19 15:50:57 2017 +0800 Committer: zhangduo Committed: Fri Jan 12 21:42:57 2018 +0800 -- .../org/apache/hadoop/hbase/client/Admin.java | 87 ++- .../apache/hadoop/hbase/client/HBaseAdmin.java | 149 ++- .../hadoop/hbase/client/RawAsyncHBaseAdmin.java | 82 +- 3 files changed, 238 insertions(+), 80 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/df411c4e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java -- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index f61b32e..6729473 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -2466,7 +2466,7 @@ public interface Admin extends Abortable, Closeable { /** * Add a new replication peer for replicating data to slave cluster. * @param peerId a short name that identifies the peer - * @param peerConfig configuration for the replication slave cluster + * @param peerConfig configuration for the replication peer * @throws IOException if a remote or network exception occurs */ default void addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig) @@ -2477,7 +2477,7 @@ public interface Admin extends Abortable, Closeable { /** * Add a new replication peer for replicating data to slave cluster. * @param peerId a short name that identifies the peer - * @param peerConfig configuration for the replication slave cluster + * @param peerConfig configuration for the replication peer * @param enabled peer state, true if ENABLED and false if DISABLED * @throws IOException if a remote or network exception occurs */ @@ -2485,6 +2485,37 @@ public interface Admin extends Abortable, Closeable { throws IOException; /** + * Add a new replication peer but does not block and wait for it. + * + * You can use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw + * ExecutionException if there was an error while executing the operation or TimeoutException in + * case the wait timeout was not long enough to allow the operation to complete. + * @param peerId a short name that identifies the peer + * @param peerConfig configuration for the replication peer + * @return the result of the async operation + * @throws IOException IOException if a remote or network exception occurs + */ + default Future addReplicationPeerAsync(String peerId, ReplicationPeerConfig peerConfig) + throws IOException { +return addReplicationPeerAsync(peerId, peerConfig, true); + } + + /** + * Add a new replication peer but does not block and wait for it. + * + * You can use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw + * ExecutionException if there was an error while executing the operation or TimeoutException in + * case the wait timeout was not long enough to allow the operation to complete. + * @param peerId a short name that identifies the peer + * @param peerConfig configuration for the replication peer + * @param enabled peer state, true if ENABLED and false if DISABLED + * @return the result of the async operation + * @throws IOException IOException if a remote or network exception occurs + */ + Future addReplicationPeerAsync(String peerId, ReplicationPeerConfig peerConfig, + boolean enabled) throws IOException; + + /** * Remove a peer and stop the replication. * @param peerId a short name that identifies the peer * @throws IOException if a remote or network exception occurs @@ -2492,6 +2523,18 @@ public interface Admin extends Abortable, Closeable { void removeReplicationPeer(String peerId) throws IOException; /** + * Remove a replication peer but does not block and wait for it. + * + * You can use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw + * ExecutionException if there was an error while executing the operation or TimeoutException in + * case the wait timeout was not long enough to
[29/37] hbase git commit: HBASE-19636 All rs should already start work with the new peer change when replication peer procedure is finished
HBASE-19636 All rs should already start work with the new peer change when replication peer procedure is finished Signed-off-by: zhangduoProject: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bce92be7 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bce92be7 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bce92be7 Branch: refs/heads/HBASE-19397-branch-2 Commit: bce92be70a4db42ad653f9ee6e1f40f219ec79cc Parents: 0e71ff1 Author: Guanghao Zhang Authored: Thu Jan 4 16:58:01 2018 +0800 Committer: zhangduo Committed: Fri Jan 12 21:46:35 2018 +0800 -- .../replication/ReplicationPeerConfig.java | 1 - .../hbase/replication/ReplicationPeerImpl.java | 4 +- .../hbase/replication/ReplicationQueueInfo.java | 23 +- .../hbase/replication/ReplicationUtils.java | 56 ++ .../replication/TestReplicationStateZKImpl.java | 22 - .../regionserver/ReplicationSourceService.java | 3 +- .../regionserver/PeerProcedureHandler.java | 3 + .../regionserver/PeerProcedureHandlerImpl.java | 50 +- .../RecoveredReplicationSource.java | 6 +- .../RecoveredReplicationSourceShipper.java | 8 +- .../replication/regionserver/Replication.java | 15 +- .../regionserver/ReplicationSource.java | 34 +- .../regionserver/ReplicationSourceFactory.java | 4 +- .../ReplicationSourceInterface.java | 8 +- .../regionserver/ReplicationSourceManager.java | 895 ++- .../regionserver/ReplicationSourceShipper.java | 6 +- .../ReplicationSourceWALReader.java | 2 +- .../replication/ReplicationSourceDummy.java | 2 +- .../replication/TestNamespaceReplication.java | 57 +- .../TestReplicationSourceManager.java | 11 +- .../TestReplicationSourceManagerZkImpl.java | 1 - 21 files changed, 659 insertions(+), 552 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/bce92be7/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java -- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java index fdae288..bf8d030 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java @@ -25,7 +25,6 @@ import java.util.List; import java.util.Map; import java.util.Set; import java.util.TreeMap; - import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; http://git-wip-us.apache.org/repos/asf/hbase/blob/bce92be7/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java -- diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java index 3e17025..604e0bb 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerImpl.java @@ -1,5 +1,4 @@ -/* - * +/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,6 +27,7 @@ import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public class ReplicationPeerImpl implements ReplicationPeer { + private final Configuration conf; private final String id; http://git-wip-us.apache.org/repos/asf/hbase/blob/bce92be7/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java -- diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java index ecd888f..cd65f9b 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java @@ -29,7 +29,7 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.ServerName; /** - * This class is responsible for the parsing logic for a znode representing a queue. + * This class is
[30/37] hbase git commit: HBASE-19687 Move the logic in ReplicationZKNodeCleaner to ReplicationChecker and remove ReplicationZKNodeCleanerChore
HBASE-19687 Move the logic in ReplicationZKNodeCleaner to ReplicationChecker and remove ReplicationZKNodeCleanerChore Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4027b869 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4027b869 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4027b869 Branch: refs/heads/HBASE-19397-branch-2 Commit: 4027b869bc91c5fb5ccc8e62c7f67994ac02a1e4 Parents: 97d8b99 Author: zhangduoAuthored: Wed Jan 3 09:39:44 2018 +0800 Committer: zhangduo Committed: Fri Jan 12 21:46:35 2018 +0800 -- .../replication/VerifyReplication.java | 6 +- .../hbase/replication/ReplicationPeers.java | 26 +-- .../hbase/replication/ReplicationUtils.java | 69 +++ .../replication/TestReplicationStateBasic.java | 2 +- .../org/apache/hadoop/hbase/master/HMaster.java | 13 -- .../cleaner/ReplicationZKNodeCleaner.java | 192 --- .../cleaner/ReplicationZKNodeCleanerChore.java | 54 -- .../replication/ReplicationPeerManager.java | 18 +- .../org/apache/hadoop/hbase/util/HBaseFsck.java | 13 +- .../hbase/util/hbck/ReplicationChecker.java | 109 +++ .../cleaner/TestReplicationZKNodeCleaner.java | 109 --- .../hbase/util/TestHBaseFsckReplication.java| 101 ++ .../hadoop/hbase/util/hbck/HbckTestingUtil.java | 6 +- 13 files changed, 259 insertions(+), 459 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/4027b869/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java -- diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java index fe45762..fac4875 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java @@ -50,8 +50,8 @@ import org.apache.hadoop.hbase.mapreduce.TableSplit; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.replication.ReplicationPeerStorage; -import org.apache.hadoop.hbase.replication.ReplicationPeers; import org.apache.hadoop.hbase.replication.ReplicationStorageFactory; +import org.apache.hadoop.hbase.replication.ReplicationUtils; import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; @@ -345,10 +345,10 @@ public class VerifyReplication extends Configured implements Tool { } }); ReplicationPeerStorage storage = - ReplicationStorageFactory.getReplicationPeerStorage(localZKW, conf); +ReplicationStorageFactory.getReplicationPeerStorage(localZKW, conf); ReplicationPeerConfig peerConfig = storage.getPeerConfig(peerId); return Pair.newPair(peerConfig, -ReplicationPeers.getPeerClusterConfiguration(peerConfig, conf)); +ReplicationUtils.getPeerClusterConfiguration(peerConfig, conf)); } catch (ReplicationException e) { throw new IOException("An error occurred while trying to connect to the remove peer cluster", e); http://git-wip-us.apache.org/repos/asf/hbase/blob/4027b869/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java -- diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java index 45940a5..fcbc350 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java @@ -17,14 +17,11 @@ */ package org.apache.hadoop.hbase.replication; -import java.io.IOException; import java.util.Collections; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.CompoundConfiguration; -import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.yetus.audience.InterfaceAudience; @@ -106,25 +103,6 @@ public class ReplicationPeers { return
[25/37] hbase git commit: HBASE-19711 TestReplicationAdmin.testConcurrentPeerOperations hangs
HBASE-19711 TestReplicationAdmin.testConcurrentPeerOperations hangs Signed-off-by: zhangduoProject: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7ca9840b Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7ca9840b Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7ca9840b Branch: refs/heads/HBASE-19397-branch-2 Commit: 7ca9840bc6e652d92e8205cd6c462a990b223d25 Parents: a3cdfb6 Author: Guanghao Zhang Authored: Fri Jan 5 15:39:06 2018 +0800 Committer: zhangduo Committed: Fri Jan 12 21:46:35 2018 +0800 -- .../procedure/MasterProcedureScheduler.java | 23 1 file changed, 19 insertions(+), 4 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/7ca9840b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java index 4ecb3b1..0400de4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java @@ -402,7 +402,7 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler { @Override public void completionCleanup(final Procedure proc) { if (proc instanceof TableProcedureInterface) { - TableProcedureInterface iProcTable = (TableProcedureInterface)proc; + TableProcedureInterface iProcTable = (TableProcedureInterface) proc; boolean tableDeleted; if (proc.hasException()) { Exception procEx = proc.getException().unwrapRemoteException(); @@ -423,9 +423,7 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler { } } else if (proc instanceof PeerProcedureInterface) { PeerProcedureInterface iProcPeer = (PeerProcedureInterface) proc; - if (iProcPeer.getPeerOperationType() == PeerOperationType.REMOVE) { -removePeerQueue(iProcPeer.getPeerId()); - } + tryCleanupPeerQueue(iProcPeer.getPeerId(), proc); } else { // No cleanup for ServerProcedureInterface types, yet. return; @@ -514,6 +512,23 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler { locking.removePeerLock(peerId); } + private void tryCleanupPeerQueue(String peerId, Procedure procedure) { +schedLock(); +try { + PeerQueue queue = AvlTree.get(peerMap, peerId, PEER_QUEUE_KEY_COMPARATOR); + if (queue == null) { +return; + } + + final LockAndQueue lock = locking.getPeerLock(peerId); + if (queue.isEmpty() && lock.tryExclusiveLock(procedure)) { +removeFromRunQueue(peerRunQueue, queue); +removePeerQueue(peerId); + } +} finally { + schedUnlock(); +} + } private static boolean isPeerProcedure(Procedure proc) { return proc instanceof PeerProcedureInterface;
[12/37] hbase git commit: HBASE-19630 Add peer cluster key check when add new replication peer
HBASE-19630 Add peer cluster key check when add new replication peer Signed-off-by: zhangduoProject: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/404f5e3a Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/404f5e3a Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/404f5e3a Branch: refs/heads/HBASE-19397-branch-2 Commit: 404f5e3aceee8c13f6aa67b00ec4e92df136a46e Parents: 659204a Author: Guanghao Zhang Authored: Tue Dec 26 21:10:00 2017 +0800 Committer: zhangduo Committed: Fri Jan 12 21:46:18 2018 +0800 -- .../replication/ReplicationPeerManager.java | 54 .../replication/TestReplicationAdmin.java | 22 2 files changed, 54 insertions(+), 22 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/404f5e3a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java index 84abfeb..b78cbce 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.master.replication; +import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.List; @@ -42,6 +43,7 @@ import org.apache.hadoop.hbase.replication.ReplicationPeerStorage; import org.apache.hadoop.hbase.replication.ReplicationQueueInfo; import org.apache.hadoop.hbase.replication.ReplicationQueueStorage; import org.apache.hadoop.hbase.replication.ReplicationStorageFactory; +import org.apache.hadoop.hbase.zookeeper.ZKConfig; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.yetus.audience.InterfaceAudience; @@ -216,36 +218,36 @@ public final class ReplicationPeerManager { return desc != null ? Optional.of(desc.getPeerConfig()) : Optional.empty(); } - /** - * If replicate_all flag is true, it means all user tables will be replicated to peer cluster. - * Then allow config exclude namespaces or exclude table-cfs which can't be replicated to peer - * cluster. - * - * If replicate_all flag is false, it means all user tables can't be replicated to peer cluster. - * Then allow to config namespaces or table-cfs which will be replicated to peer cluster. - */ - private static void checkPeerConfig(ReplicationPeerConfig peerConfig) - throws DoNotRetryIOException { + private void checkPeerConfig(ReplicationPeerConfig peerConfig) throws DoNotRetryIOException { +checkClusterKey(peerConfig.getClusterKey()); + if (peerConfig.replicateAllUserTables()) { - if ((peerConfig.getNamespaces() != null && !peerConfig.getNamespaces().isEmpty()) || -(peerConfig.getTableCFsMap() != null && !peerConfig.getTableCFsMap().isEmpty())) { -throw new DoNotRetryIOException("Need clean namespaces or table-cfs config firstly " + - "when you want replicate all cluster"); + // If replicate_all flag is true, it means all user tables will be replicated to peer cluster. + // Then allow config exclude namespaces or exclude table-cfs which can't be replicated to peer + // cluster. + if ((peerConfig.getNamespaces() != null && !peerConfig.getNamespaces().isEmpty()) + || (peerConfig.getTableCFsMap() != null && !peerConfig.getTableCFsMap().isEmpty())) { +throw new DoNotRetryIOException("Need clean namespaces or table-cfs config firstly " ++ "when you want replicate all cluster"); } checkNamespacesAndTableCfsConfigConflict(peerConfig.getExcludeNamespaces(), peerConfig.getExcludeTableCFsMap()); } else { - if ((peerConfig.getExcludeNamespaces() != null && -!peerConfig.getExcludeNamespaces().isEmpty()) || -(peerConfig.getExcludeTableCFsMap() != null && - !peerConfig.getExcludeTableCFsMap().isEmpty())) { + // If replicate_all flag is false, it means all user tables can't be replicated to peer + // cluster. Then allow to config namespaces or table-cfs which will be replicated to peer + // cluster. + if ((peerConfig.getExcludeNamespaces() != null + && !peerConfig.getExcludeNamespaces().isEmpty()) + || (peerConfig.getExcludeTableCFsMap() != null + && !peerConfig.getExcludeTableCFsMap().isEmpty())) { throw new DoNotRetryIOException( -"Need
[36/37] hbase git commit: HBASE-19633 Clean up the replication queues in the postPeerModification stage when removing a peer
HBASE-19633 Clean up the replication queues in the postPeerModification stage when removing a peer Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3fd65444 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3fd65444 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3fd65444 Branch: refs/heads/HBASE-19397-branch-2 Commit: 3fd65444852aba985fc26b650ebd840045cab71f Parents: 808f61d Author: zhangduoAuthored: Tue Jan 2 09:57:23 2018 +0800 Committer: zhangduo Committed: Fri Jan 12 21:46:35 2018 +0800 -- .../replication/ReplicationPeerConfig.java | 2 +- .../replication/VerifyReplication.java | 34 ++--- .../hbase/replication/ReplicationPeers.java | 32 ++-- .../replication/ZKReplicationQueueStorage.java | 3 +- .../replication/ZKReplicationStorageBase.java | 4 +- .../replication/TestReplicationStateBasic.java | 10 + .../org/apache/hadoop/hbase/master/HMaster.java | 4 +- .../master/replication/AddPeerProcedure.java| 5 +-- .../replication/DisablePeerProcedure.java | 3 +- .../master/replication/EnablePeerProcedure.java | 3 +- .../master/replication/ModifyPeerProcedure.java | 34 + .../replication/RefreshPeerProcedure.java | 17 - .../master/replication/RemovePeerProcedure.java | 7 ++-- .../replication/ReplicationPeerManager.java | 31 +++- .../replication/UpdatePeerConfigProcedure.java | 3 +- .../hbase/regionserver/HRegionServer.java | 18 - .../RemoteProcedureResultReporter.java | 3 +- .../regionserver/RefreshPeerCallable.java | 5 ++- .../regionserver/ReplicationSourceManager.java | 39 +++- .../TestReplicationAdminUsingProcedure.java | 7 ++-- 20 files changed, 134 insertions(+), 130 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/3fd65444/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java -- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java index b80ee16..fdae288 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java @@ -27,8 +27,8 @@ import java.util.Set; import java.util.TreeMap; import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** * A configuration for the replication peer cluster. http://git-wip-us.apache.org/repos/asf/hbase/blob/3fd65444/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java -- diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java index f0070f0..fe45762 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java @@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.mapreduce.replication; import java.io.IOException; import java.util.Arrays; import java.util.UUID; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FileSystem; @@ -45,13 +44,14 @@ import org.apache.hadoop.hbase.filter.PrefixFilter; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.mapreduce.TableInputFormat; import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil; -import org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat; import org.apache.hadoop.hbase.mapreduce.TableMapper; +import org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat; import org.apache.hadoop.hbase.mapreduce.TableSplit; import org.apache.hadoop.hbase.replication.ReplicationException; -import org.apache.hadoop.hbase.replication.ReplicationFactory; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; +import org.apache.hadoop.hbase.replication.ReplicationPeerStorage; import org.apache.hadoop.hbase.replication.ReplicationPeers; +import org.apache.hadoop.hbase.replication.ReplicationStorageFactory; import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; import
[32/37] hbase git commit: HBASE-19661 Replace ReplicationStateZKBase with ZKReplicationStorageBase
HBASE-19661 Replace ReplicationStateZKBase with ZKReplicationStorageBase Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/80130fa8 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/80130fa8 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/80130fa8 Branch: refs/heads/HBASE-19397-branch-2 Commit: 80130fa8f24fabd724260e32b976034ab1e244b5 Parents: 4027b86 Author: huzhengAuthored: Fri Dec 29 15:55:28 2017 +0800 Committer: zhangduo Committed: Fri Jan 12 21:46:35 2018 +0800 -- .../hbase/replication/ReplicationFactory.java | 5 +- .../replication/ReplicationStateZKBase.java | 153 --- .../replication/ReplicationTrackerZKImpl.java | 21 +-- .../replication/ZKReplicationPeerStorage.java | 24 ++- .../replication/ZKReplicationStorageBase.java | 13 +- .../org/apache/hadoop/hbase/master/HMaster.java | 4 +- .../master/ReplicationPeerConfigUpgrader.java | 128 .../regionserver/DumpReplicationQueues.java | 18 +-- .../replication/regionserver/Replication.java | 3 +- .../org/apache/hadoop/hbase/util/HBaseFsck.java | 3 +- .../TestReplicationTrackerZKImpl.java | 3 +- .../replication/master/TestTableCFsUpdater.java | 41 ++--- .../TestReplicationSourceManager.java | 6 +- 13 files changed, 136 insertions(+), 286 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/80130fa8/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java -- diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java index 6c66aff..2a970ba 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java @@ -33,9 +33,8 @@ public class ReplicationFactory { return new ReplicationPeers(zk, conf); } - public static ReplicationTracker getReplicationTracker(ZKWatcher zookeeper, - final ReplicationPeers replicationPeers, Configuration conf, Abortable abortable, + public static ReplicationTracker getReplicationTracker(ZKWatcher zookeeper, Abortable abortable, Stoppable stopper) { -return new ReplicationTrackerZKImpl(zookeeper, replicationPeers, conf, abortable, stopper); +return new ReplicationTrackerZKImpl(zookeeper, abortable, stopper); } } http://git-wip-us.apache.org/repos/asf/hbase/blob/80130fa8/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java -- diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java deleted file mode 100644 index f49537c..000 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java +++ /dev/null @@ -1,153 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.replication; - -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.util.List; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Abortable; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.hbase.thirdparty.com.google.protobuf.CodedOutputStream; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos; -import org.apache.hadoop.hbase.zookeeper.ZKConfig; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZKWatcher; -import
[26/37] hbase git commit: HBASE-19623 Create replication endpoint asynchronously when adding a replication source
HBASE-19623 Create replication endpoint asynchronously when adding a replication source Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c4e95db1 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c4e95db1 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c4e95db1 Branch: refs/heads/HBASE-19397-branch-2 Commit: c4e95db113dcdf4705bd3c73c5b6499d9f05a904 Parents: 3fd6544 Author: zhangduoAuthored: Tue Jan 2 13:25:58 2018 +0800 Committer: zhangduo Committed: Fri Jan 12 21:46:35 2018 +0800 -- .../hbase/replication/ReplicationPeer.java | 8 ++ .../hbase/replication/ReplicationPeers.java | 18 +-- .../replication/ZKReplicationPeerStorage.java | 7 +- .../replication/TestReplicationStateBasic.java | 20 +--- .../TestZKReplicationPeerStorage.java | 14 +-- .../HBaseInterClusterReplicationEndpoint.java | 17 ++- .../RecoveredReplicationSource.java | 13 +-- .../regionserver/ReplicationSource.java | 110 +++ .../ReplicationSourceInterface.java | 8 +- .../regionserver/ReplicationSourceManager.java | 47 +--- .../client/TestAsyncReplicationAdminApi.java| 2 - .../replication/TestReplicationAdmin.java | 2 - .../replication/ReplicationSourceDummy.java | 7 +- .../replication/TestReplicationSource.java | 27 +++-- .../TestReplicationSourceManager.java | 8 +- 15 files changed, 127 insertions(+), 181 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/c4e95db1/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java -- diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java index 4846018..2da3cce 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java @@ -54,6 +54,14 @@ public interface ReplicationPeer { PeerState getPeerState(); /** + * Test whether the peer is enabled. + * @return {@code true} if enabled, otherwise {@code false}. + */ + default boolean isPeerEnabled() { +return getPeerState() == PeerState.ENABLED; + } + + /** * Get the peer config object * @return the ReplicationPeerConfig for this peer */ http://git-wip-us.apache.org/repos/asf/hbase/blob/c4e95db1/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java -- diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java index 422801b..45940a5 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.replication; import java.io.IOException; +import java.util.Collections; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; @@ -86,21 +87,6 @@ public class ReplicationPeers { } /** - * Get the peer state for the specified connected remote slave cluster. The value might be read - * from cache, so it is recommended to use {@link #peerStorage } to read storage directly if - * reading the state after enabling or disabling it. - * @param peerId a short that identifies the cluster - * @return true if replication is enabled, false otherwise. - */ - public boolean isPeerEnabled(String peerId) { -ReplicationPeer replicationPeer = this.peerCache.get(peerId); -if (replicationPeer == null) { - throw new IllegalArgumentException("Peer with id= " + peerId + " is not cached"); -} -return replicationPeer.getPeerState() == PeerState.ENABLED; - } - - /** * Returns the ReplicationPeerImpl for the specified cached peer. This ReplicationPeer will * continue to track changes to the Peer's state and config. This method returns null if no peer * has been cached with the given peerId. @@ -117,7 +103,7 @@ public class ReplicationPeers { * @return a Set of Strings for peerIds */ public Set getAllPeerIds() { -return peerCache.keySet(); +return Collections.unmodifiableSet(peerCache.keySet()); } public static Configuration getPeerClusterConfiguration(ReplicationPeerConfig peerConfig,
[34/37] hbase git commit: HBASE-19544 Add UTs for testing concurrent modifications on replication peer
HBASE-19544 Add UTs for testing concurrent modifications on replication peer Signed-off-by: zhangduoProject: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/97d8b993 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/97d8b993 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/97d8b993 Branch: refs/heads/HBASE-19397-branch-2 Commit: 97d8b993664022d57d3dd392b3fe98b420429353 Parents: f0771ea Author: Guanghao Zhang Authored: Tue Jan 2 17:07:41 2018 +0800 Committer: zhangduo Committed: Fri Jan 12 21:46:35 2018 +0800 -- .../replication/TestReplicationAdmin.java | 69 1 file changed, 69 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/97d8b993/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java -- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java index f9629bd..9edd4a2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java @@ -31,6 +31,7 @@ import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.atomic.AtomicLong; import java.util.regex.Pattern; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -56,6 +57,8 @@ import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Unit testing of ReplicationAdmin @@ -63,6 +66,8 @@ import org.junit.rules.TestName; @Category({MediumTests.class, ClientTests.class}) public class TestReplicationAdmin { + private static final Logger LOG = LoggerFactory.getLogger(TestReplicationAdmin.class); + private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); @@ -112,6 +117,70 @@ public class TestReplicationAdmin { } @Test + public void testConcurrentPeerOperations() throws Exception { +int threadNum = 5; +AtomicLong successCount = new AtomicLong(0); + +// Test concurrent add peer operation +Thread[] addPeers = new Thread[threadNum]; +for (int i = 0; i < threadNum; i++) { + addPeers[i] = new Thread(() -> { +try { + hbaseAdmin.addReplicationPeer(ID_ONE, +ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE).build()); + successCount.incrementAndGet(); +} catch (Exception e) { + LOG.debug("Got exception when add replication peer", e); +} + }); + addPeers[i].start(); +} +for (Thread addPeer : addPeers) { + addPeer.join(); +} +assertEquals(1, successCount.get()); + +// Test concurrent remove peer operation +successCount.set(0); +Thread[] removePeers = new Thread[threadNum]; +for (int i = 0; i < threadNum; i++) { + removePeers[i] = new Thread(() -> { +try { + hbaseAdmin.removeReplicationPeer(ID_ONE); + successCount.incrementAndGet(); +} catch (Exception e) { + LOG.debug("Got exception when remove replication peer", e); +} + }); + removePeers[i].start(); +} +for (Thread removePeer : removePeers) { + removePeer.join(); +} +assertEquals(1, successCount.get()); + +// Test concurrent add peer operation again +successCount.set(0); +addPeers = new Thread[threadNum]; +for (int i = 0; i < threadNum; i++) { + addPeers[i] = new Thread(() -> { +try { + hbaseAdmin.addReplicationPeer(ID_ONE, +ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE).build()); + successCount.incrementAndGet(); +} catch (Exception e) { + LOG.debug("Got exception when add replication peer", e); +} + }); + addPeers[i].start(); +} +for (Thread addPeer : addPeers) { + addPeer.join(); +} +assertEquals(1, successCount.get()); + } + + @Test public void testAddInvalidPeer() { ReplicationPeerConfigBuilder builder = ReplicationPeerConfig.newBuilder(); builder.setClusterKey(KEY_ONE);
[31/37] hbase git commit: HBASE-19707 Race in start and terminate of a replication source after we async start replicatione endpoint
HBASE-19707 Race in start and terminate of a replication source after we async start replicatione endpoint Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a3cdfb6f Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a3cdfb6f Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a3cdfb6f Branch: refs/heads/HBASE-19397-branch-2 Commit: a3cdfb6f28cc2d482fa906234b9c181cd7e54667 Parents: bce92be Author: zhangduoAuthored: Fri Jan 5 18:28:44 2018 +0800 Committer: zhangduo Committed: Fri Jan 12 21:46:35 2018 +0800 -- .../RecoveredReplicationSource.java | 16 +- .../regionserver/ReplicationSource.java | 202 ++- 2 files changed, 116 insertions(+), 102 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/a3cdfb6f/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java index 1be9a88..3cae0f2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RecoveredReplicationSource.java @@ -68,7 +68,7 @@ public class RecoveredReplicationSource extends ReplicationSource { LOG.debug("Someone has beat us to start a worker thread for wal group " + walGroupId); } else { LOG.debug("Starting up worker for wal group " + walGroupId); - worker.startup(getUncaughtExceptionHandler()); + worker.startup(this::uncaughtException); worker.setWALReader( startNewWALReader(worker.getName(), walGroupId, queue, worker.getStartPosition())); workerThreads.put(walGroupId, worker); @@ -76,13 +76,13 @@ public class RecoveredReplicationSource extends ReplicationSource { } @Override - protected ReplicationSourceWALReader startNewWALReader(String threadName, - String walGroupId, PriorityBlockingQueue queue, long startPosition) { -ReplicationSourceWALReader walReader = new RecoveredReplicationSourceWALReader(fs, -conf, queue, startPosition, walEntryFilter, this); -Threads.setDaemonThreadRunning(walReader, threadName -+ ".replicationSource.replicationWALReaderThread." + walGroupId + "," + queueId, - getUncaughtExceptionHandler()); + protected ReplicationSourceWALReader startNewWALReader(String threadName, String walGroupId, + PriorityBlockingQueue queue, long startPosition) { +ReplicationSourceWALReader walReader = + new RecoveredReplicationSourceWALReader(fs, conf, queue, startPosition, walEntryFilter, this); +Threads.setDaemonThreadRunning(walReader, + threadName + ".replicationSource.replicationWALReaderThread." + walGroupId + "," + queueId, + this::uncaughtException); return walReader; } http://git-wip-us.apache.org/repos/asf/hbase/blob/a3cdfb6f/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index 6b622ee..923d893 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -76,7 +76,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists; * */ @InterfaceAudience.Private -public class ReplicationSource extends Thread implements ReplicationSourceInterface { +public class ReplicationSource implements ReplicationSourceInterface { private static final Logger LOG = LoggerFactory.getLogger(ReplicationSource.class); // Queues of logs to process, entry in format of walGroupId->queue, @@ -115,10 +115,8 @@ public class ReplicationSource extends Thread implements ReplicationSourceInterf private MetricsSource metrics; // WARN threshold for the number of queued logs, defaults to 2 private int logQueueWarnThreshold; - // whether the replication endpoint has been initialized - private volatile boolean endpointInitialized = false; // ReplicationEndpoint which will handle the actual replication - private ReplicationEndpoint replicationEndpoint; + private volatile
[27/37] hbase git commit: HBASE-19719 Fix checkstyle issues
HBASE-19719 Fix checkstyle issues Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e70b7541 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e70b7541 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e70b7541 Branch: refs/heads/HBASE-19397-branch-2 Commit: e70b754107a6aefac98dc908213648592b2f74a3 Parents: 7ca9840 Author: zhangduoAuthored: Sat Jan 6 08:30:55 2018 +0800 Committer: zhangduo Committed: Fri Jan 12 21:46:35 2018 +0800 -- .../hbase/replication/ReplicationStorageFactory.java | 2 +- .../master/assignment/RegionTransitionProcedure.java | 4 ++-- .../hbase/master/procedure/RSProcedureDispatcher.java | 13 ++--- .../master/ReplicationPeerConfigUpgrader.java | 8 4 files changed, 13 insertions(+), 14 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/e70b7541/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java -- diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java index 60d0749..462cfed 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java @@ -27,7 +27,7 @@ import org.apache.yetus.audience.InterfaceAudience; * For now we only have zk based implementation. */ @InterfaceAudience.Private -public class ReplicationStorageFactory { +public final class ReplicationStorageFactory { private ReplicationStorageFactory() { } http://git-wip-us.apache.org/repos/asf/hbase/blob/e70b7541/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java index 1724a38..8277dbe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java @@ -36,11 +36,11 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; + import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - /** * Base class for the Assign and Unassign Procedure. * http://git-wip-us.apache.org/repos/asf/hbase/blob/e70b7541/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java index 0f68f31..a6d57d2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.procedure; import java.io.IOException; @@ -36,6 +35,12 @@ import org.apache.hadoop.ipc.RemoteException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.collect.ArrayListMultimap; +import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; +import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; +import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; +import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; @@ -47,12 +52,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionR
[19/37] hbase git commit: HBASE-19592 Add UTs to test retry on update zk failure
HBASE-19592 Add UTs to test retry on update zk failure Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e9ca451d Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e9ca451d Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e9ca451d Branch: refs/heads/HBASE-19397-branch-2 Commit: e9ca451dca2ed3cc32efa580b8d947817c4abb4f Parents: 404f5e3 Author: zhangduoAuthored: Tue Dec 26 20:39:00 2017 +0800 Committer: zhangduo Committed: Fri Jan 12 21:46:18 2018 +0800 -- .../replication/ReplicationPeerManager.java | 5 +- .../TestReplicationProcedureRetry.java | 200 +++ 2 files changed, 202 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/e9ca451d/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java index b78cbce..f4ccce8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.java @@ -53,7 +53,7 @@ import org.apache.yetus.audience.InterfaceAudience; * Used to add/remove a replication peer. */ @InterfaceAudience.Private -public final class ReplicationPeerManager { +public class ReplicationPeerManager { private final ReplicationPeerStorage peerStorage; @@ -61,8 +61,7 @@ public final class ReplicationPeerManager { private final ConcurrentMap peers; - private ReplicationPeerManager(ReplicationPeerStorage peerStorage, - ReplicationQueueStorage queueStorage, + ReplicationPeerManager(ReplicationPeerStorage peerStorage, ReplicationQueueStorage queueStorage, ConcurrentMap peers) { this.peerStorage = peerStorage; this.queueStorage = queueStorage; http://git-wip-us.apache.org/repos/asf/hbase/blob/e9ca451d/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationProcedureRetry.java -- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationProcedureRetry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationProcedureRetry.java new file mode 100644 index 000..ab35b46 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationProcedureRetry.java @@ -0,0 +1,200 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.spy; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.replication.ReplicationPeerManager; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.ReplicationTests; +import org.apache.zookeeper.KeeperException; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.mockito.invocation.InvocationOnMock; +
[05/37] hbase git commit: HBASE-19216 Implement a general framework to execute remote procedure on RS
HBASE-19216 Implement a general framework to execute remote procedure on RS Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a92d2226 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a92d2226 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a92d2226 Branch: refs/heads/HBASE-19397-branch-2 Commit: a92d222677296b1bb0f6bbad748ba3b23c702d3c Parents: 72702ee Author: zhangduoAuthored: Fri Dec 15 21:06:44 2017 +0800 Committer: zhangduo Committed: Fri Jan 12 21:42:57 2018 +0800 -- .../hbase/procedure2/LockedResourceType.java| 4 +- .../procedure2/RemoteProcedureDispatcher.java | 23 +- .../src/main/protobuf/Admin.proto | 9 +- .../src/main/protobuf/MasterProcedure.proto | 30 +++ .../src/main/protobuf/RegionServerStatus.proto | 15 ++ .../apache/hadoop/hbase/executor/EventType.java | 26 ++- .../hadoop/hbase/executor/ExecutorType.java | 3 +- .../org/apache/hadoop/hbase/master/HMaster.java | 33 ++- .../hadoop/hbase/master/MasterRpcServices.java | 13 ++ .../assignment/RegionTransitionProcedure.java | 18 +- .../procedure/MasterProcedureScheduler.java | 224 +-- .../procedure/PeerProcedureInterface.java | 34 +++ .../master/procedure/RSProcedureDispatcher.java | 101 + .../master/replication/ModifyPeerProcedure.java | 127 +++ .../master/replication/RefreshPeerCallable.java | 67 ++ .../replication/RefreshPeerProcedure.java | 197 .../hbase/procedure2/RSProcedureCallable.java | 43 .../hbase/regionserver/HRegionServer.java | 90 ++-- .../hbase/regionserver/RSRpcServices.java | 56 +++-- .../handler/RSProcedureHandler.java | 51 + .../assignment/TestAssignmentManager.java | 20 +- .../replication/DummyModifyPeerProcedure.java | 41 .../TestDummyModifyPeerProcedure.java | 80 +++ .../security/access/TestAccessController.java | 1 + 24 files changed, 1122 insertions(+), 184 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/a92d2226/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java -- diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java index c5fe62b..dc9b5d4 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java @@ -1,4 +1,4 @@ -/* +/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,5 +22,5 @@ import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public enum LockedResourceType { - SERVER, NAMESPACE, TABLE, REGION + SERVER, NAMESPACE, TABLE, REGION, PEER } http://git-wip-us.apache.org/repos/asf/hbase/blob/a92d2226/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java -- diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java index 71932b8..78c49fb 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java @@ -226,13 +226,30 @@ public abstract class RemoteProcedureDispatcher { +/** + * For building the remote operation. + */ RemoteOperation remoteCallBuild(TEnv env, TRemote remote); -void remoteCallCompleted(TEnv env, TRemote remote, RemoteOperation response); + +/** + * Called when the executeProcedure call is failed. + */ void remoteCallFailed(TEnv env, TRemote remote, IOException exception); + +/** + * Called when RS tells the remote procedure is succeeded through the + * {@code reportProcedureDone} method. + */ +void remoteOperationCompleted(TEnv env); + +/** + * Called when RS tells the remote procedure is failed through the {@code reportProcedureDone} + * method. + * @param error the error message + */ +void
[11/37] hbase git commit: HBASE-19543 Abstract a replication storage interface to extract the zk specific code
HBASE-19543 Abstract a replication storage interface to extract the zk specific code Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/226e9679 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/226e9679 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/226e9679 Branch: refs/heads/HBASE-19397-branch-2 Commit: 226e9679cc96955ca590b80a82ce0b46a95bbbc6 Parents: c6564a2 Author: zhangduoAuthored: Fri Dec 22 14:37:28 2017 +0800 Committer: zhangduo Committed: Fri Jan 12 21:46:18 2018 +0800 -- .../hadoop/hbase/util/CollectionUtils.java | 3 + hbase-replication/pom.xml | 12 + .../replication/ReplicationPeerStorage.java | 74 .../replication/ReplicationQueueStorage.java| 164 +++ .../replication/ReplicationStateZKBase.java | 1 - .../replication/ReplicationStorageFactory.java | 49 +++ .../replication/ZKReplicationPeerStorage.java | 164 +++ .../replication/ZKReplicationQueueStorage.java | 425 +++ .../replication/ZKReplicationStorageBase.java | 75 .../TestZKReplicationPeerStorage.java | 171 .../TestZKReplicationQueueStorage.java | 171 .../org/apache/hadoop/hbase/master/HMaster.java | 36 +- .../hadoop/hbase/master/MasterServices.java | 6 +- .../master/procedure/MasterProcedureEnv.java| 24 +- .../master/replication/AddPeerProcedure.java| 6 +- .../replication/DisablePeerProcedure.java | 7 +- .../master/replication/EnablePeerProcedure.java | 6 +- .../master/replication/ModifyPeerProcedure.java | 41 +- .../master/replication/RemovePeerProcedure.java | 6 +- .../master/replication/ReplicationManager.java | 199 - .../replication/ReplicationPeerManager.java | 331 +++ .../replication/UpdatePeerConfigProcedure.java | 7 +- .../replication/TestReplicationAdmin.java | 63 ++- .../hbase/master/MockNoopMasterServices.java| 12 +- .../hbase/master/TestMasterNoCluster.java | 4 +- .../TestReplicationDisableInactivePeer.java | 6 +- 26 files changed, 1750 insertions(+), 313 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/226e9679/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CollectionUtils.java -- diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CollectionUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CollectionUtils.java index 875b124..8bbb6f1 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CollectionUtils.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CollectionUtils.java @@ -107,6 +107,9 @@ public class CollectionUtils { return list.get(list.size() - 1); } + public static List nullToEmpty(List list) { +return list != null ? list : Collections.emptyList(); + } /** * In HBASE-16648 we found that ConcurrentHashMap.get is much faster than computeIfAbsent if the * value already exists. Notice that the implementation does not guarantee that the supplier will http://git-wip-us.apache.org/repos/asf/hbase/blob/226e9679/hbase-replication/pom.xml -- diff --git a/hbase-replication/pom.xml b/hbase-replication/pom.xml index bd593d3..b28e852 100644 --- a/hbase-replication/pom.xml +++ b/hbase-replication/pom.xml @@ -121,6 +121,18 @@ org.apache.hbase hbase-zookeeper + + org.apache.hbase + hbase-common + test-jar + test + + + org.apache.hbase + hbase-zookeeper + test-jar + test + org.apache.commons http://git-wip-us.apache.org/repos/asf/hbase/blob/226e9679/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java -- diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java new file mode 100644 index 000..e00cd0d --- /dev/null +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java @@ -0,0 +1,74 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You
[15/37] hbase git commit: HBASE-19599 Remove ReplicationQueuesClient, use ReplicationQueueStorage directly
HBASE-19599 Remove ReplicationQueuesClient, use ReplicationQueueStorage directly Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0cce5c5e Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0cce5c5e Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0cce5c5e Branch: refs/heads/HBASE-19397-branch-2 Commit: 0cce5c5e7e27d33634af2831a855b45729262760 Parents: 226e967 Author: zhangduoAuthored: Mon Dec 25 18:49:56 2017 +0800 Committer: zhangduo Committed: Fri Jan 12 21:46:18 2018 +0800 -- .../hbase/replication/ReplicationFactory.java | 19 +- .../replication/ReplicationPeersZKImpl.java | 21 +- .../replication/ReplicationQueueStorage.java| 26 +- .../replication/ReplicationQueuesClient.java| 93 - .../ReplicationQueuesClientArguments.java | 40 -- .../ReplicationQueuesClientZKImpl.java | 176 - .../replication/ZKReplicationQueueStorage.java | 90 - .../replication/TestReplicationStateBasic.java | 378 +++ .../replication/TestReplicationStateZKImpl.java | 148 .../TestZKReplicationQueueStorage.java | 74 .../cleaner/ReplicationZKNodeCleaner.java | 71 ++-- .../cleaner/ReplicationZKNodeCleanerChore.java | 5 +- .../replication/ReplicationPeerManager.java | 31 +- .../master/ReplicationHFileCleaner.java | 109 ++ .../master/ReplicationLogCleaner.java | 35 +- .../regionserver/DumpReplicationQueues.java | 78 ++-- .../hbase/util/hbck/ReplicationChecker.java | 14 +- .../client/TestAsyncReplicationAdminApi.java| 31 +- .../replication/TestReplicationAdmin.java | 2 + .../hbase/master/cleaner/TestLogsCleaner.java | 30 +- .../cleaner/TestReplicationHFileCleaner.java| 59 +-- .../cleaner/TestReplicationZKNodeCleaner.java | 12 +- .../replication/TestReplicationStateBasic.java | 378 --- .../replication/TestReplicationStateZKImpl.java | 227 --- .../TestReplicationSourceManagerZkImpl.java | 84 ++--- 25 files changed, 905 insertions(+), 1326 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/0cce5c5e/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java -- diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java index 9f4ad18..6c1c213 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java @@ -1,5 +1,4 @@ -/* - * +/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,20 +36,14 @@ public class ReplicationFactory { args); } - public static ReplicationQueuesClient - getReplicationQueuesClient(ReplicationQueuesClientArguments args) throws Exception { -return (ReplicationQueuesClient) ConstructorUtils -.invokeConstructor(ReplicationQueuesClientZKImpl.class, args); - } - - public static ReplicationPeers getReplicationPeers(final ZKWatcher zk, Configuration conf, - Abortable abortable) { + public static ReplicationPeers getReplicationPeers(ZKWatcher zk, Configuration conf, + Abortable abortable) { return getReplicationPeers(zk, conf, null, abortable); } - public static ReplicationPeers getReplicationPeers(final ZKWatcher zk, Configuration conf, - final ReplicationQueuesClient queuesClient, Abortable abortable) { -return new ReplicationPeersZKImpl(zk, conf, queuesClient, abortable); + public static ReplicationPeers getReplicationPeers(ZKWatcher zk, Configuration conf, + ReplicationQueueStorage queueStorage, Abortable abortable) { +return new ReplicationPeersZKImpl(zk, conf, queueStorage, abortable); } public static ReplicationTracker getReplicationTracker(ZKWatcher zookeeper, http://git-wip-us.apache.org/repos/asf/hbase/blob/0cce5c5e/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java -- diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java index 8ed0150..7de4619 100644 ---
[03/37] hbase git commit: HBASE-19564 Procedure id is missing in the response of peer related operations
HBASE-19564 Procedure id is missing in the response of peer related operations Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b5950cd4 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b5950cd4 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b5950cd4 Branch: refs/heads/HBASE-19397-branch-2 Commit: b5950cd4af966d20847403c5b50ad983b926a789 Parents: df411c4 Author: zhangduoAuthored: Wed Dec 20 20:57:37 2017 +0800 Committer: zhangduo Committed: Fri Jan 12 21:42:57 2018 +0800 -- .../hadoop/hbase/master/MasterRpcServices.java | 24 ++-- .../master/replication/ModifyPeerProcedure.java | 4 +--- 2 files changed, 13 insertions(+), 15 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/b5950cd4/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index 8025a51..72bf2d1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -1886,10 +1886,10 @@ public class MasterRpcServices extends RSRpcServices public AddReplicationPeerResponse addReplicationPeer(RpcController controller, AddReplicationPeerRequest request) throws ServiceException { try { - master.addReplicationPeer(request.getPeerId(), -ReplicationPeerConfigUtil.convert(request.getPeerConfig()), request.getPeerState() -.getState().equals(ReplicationState.State.ENABLED)); - return AddReplicationPeerResponse.newBuilder().build(); + long procId = master.addReplicationPeer(request.getPeerId(), +ReplicationPeerConfigUtil.convert(request.getPeerConfig()), + request.getPeerState().getState().equals(ReplicationState.State.ENABLED)); + return AddReplicationPeerResponse.newBuilder().setProcId(procId).build(); } catch (ReplicationException | IOException e) { throw new ServiceException(e); } @@ -1899,8 +1899,8 @@ public class MasterRpcServices extends RSRpcServices public RemoveReplicationPeerResponse removeReplicationPeer(RpcController controller, RemoveReplicationPeerRequest request) throws ServiceException { try { - master.removeReplicationPeer(request.getPeerId()); - return RemoveReplicationPeerResponse.newBuilder().build(); + long procId = master.removeReplicationPeer(request.getPeerId()); + return RemoveReplicationPeerResponse.newBuilder().setProcId(procId).build(); } catch (ReplicationException | IOException e) { throw new ServiceException(e); } @@ -1910,8 +1910,8 @@ public class MasterRpcServices extends RSRpcServices public EnableReplicationPeerResponse enableReplicationPeer(RpcController controller, EnableReplicationPeerRequest request) throws ServiceException { try { - master.enableReplicationPeer(request.getPeerId()); - return EnableReplicationPeerResponse.newBuilder().build(); + long procId = master.enableReplicationPeer(request.getPeerId()); + return EnableReplicationPeerResponse.newBuilder().setProcId(procId).build(); } catch (ReplicationException | IOException e) { throw new ServiceException(e); } @@ -1921,8 +1921,8 @@ public class MasterRpcServices extends RSRpcServices public DisableReplicationPeerResponse disableReplicationPeer(RpcController controller, DisableReplicationPeerRequest request) throws ServiceException { try { - master.disableReplicationPeer(request.getPeerId()); - return DisableReplicationPeerResponse.newBuilder().build(); + long procId = master.disableReplicationPeer(request.getPeerId()); + return DisableReplicationPeerResponse.newBuilder().setProcId(procId).build(); } catch (ReplicationException | IOException e) { throw new ServiceException(e); } @@ -1948,9 +1948,9 @@ public class MasterRpcServices extends RSRpcServices public UpdateReplicationPeerConfigResponse updateReplicationPeerConfig(RpcController controller, UpdateReplicationPeerConfigRequest request) throws ServiceException { try { - master.updateReplicationPeerConfig(request.getPeerId(), + long procId = master.updateReplicationPeerConfig(request.getPeerId(), ReplicationPeerConfigUtil.convert(request.getPeerConfig())); - return UpdateReplicationPeerConfigResponse.newBuilder().build(); + return UpdateReplicationPeerConfigResponse.newBuilder().setProcId(procId).build(); } catch
[33/37] hbase git commit: HBASE-19697 Remove TestReplicationAdminUsingProcedure
HBASE-19697 Remove TestReplicationAdminUsingProcedure Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/66993249 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/66993249 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/66993249 Branch: refs/heads/HBASE-19397-branch-2 Commit: 669932494bc2e092754fba59d9d9f1a976a9b2b5 Parents: 80130fa Author: zhangduoAuthored: Wed Jan 3 21:13:57 2018 +0800 Committer: zhangduo Committed: Fri Jan 12 21:46:35 2018 +0800 -- .../TestReplicationAdminUsingProcedure.java | 225 --- 1 file changed, 225 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/66993249/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminUsingProcedure.java -- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminUsingProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminUsingProcedure.java deleted file mode 100644 index 1300376..000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminUsingProcedure.java +++ /dev/null @@ -1,225 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client.replication; - -import java.io.IOException; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.ResultScanner; -import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; -import org.apache.hadoop.hbase.replication.TestReplicationBase; -import org.apache.hadoop.hbase.testclassification.ClientTests; -import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.log4j.Logger; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; -import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; - -@Category({ MediumTests.class, ClientTests.class }) -public class TestReplicationAdminUsingProcedure extends TestReplicationBase { - - private static final String PEER_ID = "2"; - private static final Logger LOG = Logger.getLogger(TestReplicationAdminUsingProcedure.class); - - @BeforeClass - public static void setUpBeforeClass() throws Exception { -conf1.setInt("hbase.multihconnection.threads.max", 10); - -// Start the master & slave mini cluster. -TestReplicationBase.setUpBeforeClass(); - -// Remove the replication peer -hbaseAdmin.removeReplicationPeer(PEER_ID); - } - - private void loadData(int startRowKey, int endRowKey) throws IOException { -for (int i = startRowKey; i < endRowKey; i++) { - byte[] rowKey = Bytes.add(row, Bytes.toBytes(i)); - Put put = new Put(rowKey); - put.addColumn(famName, null, Bytes.toBytes(i)); - htable1.put(put); -} - } - - private void waitForReplication(int expectedRows, int retries) - throws IOException, InterruptedException { -Scan scan; -for (int i = 0; i < retries; i++) { - scan = new Scan(); - if (i == retries - 1) { -throw new IOException("Waited too much time for normal batch replication"); - } - try (ResultScanner scanner = htable2.getScanner(scan)) { -int count = 0; -for (Result res : scanner) { - count++; -} -if (count != expectedRows) { - LOG.info("Only got " + count + " rows, expected rows: " + expectedRows); - Thread.sleep(SLEEP_TIME); -} else { - return; -} - } -} - } - - @Before - public void setUp() throws IOException { -
[10/37] hbase git commit: HBASE-19543 Abstract a replication storage interface to extract the zk specific code
http://git-wip-us.apache.org/repos/asf/hbase/blob/226e9679/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java deleted file mode 100644 index b6f8784..000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java +++ /dev/null @@ -1,199 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.master.replication; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.regex.Pattern; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Abortable; -import org.apache.hadoop.hbase.ReplicationPeerNotFoundException; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.replication.BaseReplicationEndpoint; -import org.apache.hadoop.hbase.replication.ReplicationException; -import org.apache.hadoop.hbase.replication.ReplicationFactory; -import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; -import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; -import org.apache.hadoop.hbase.replication.ReplicationPeers; -import org.apache.hadoop.hbase.replication.ReplicationQueuesClient; -import org.apache.hadoop.hbase.replication.ReplicationQueuesClientArguments; -import org.apache.hadoop.hbase.zookeeper.ZKWatcher; -import org.apache.yetus.audience.InterfaceAudience; - -/** - * Manages and performs all replication admin operations. - * - * Used to add/remove a replication peer. - */ -@InterfaceAudience.Private -public class ReplicationManager { - private final ReplicationQueuesClient replicationQueuesClient; - private final ReplicationPeers replicationPeers; - - public ReplicationManager(Configuration conf, ZKWatcher zkw, Abortable abortable) - throws IOException { -try { - this.replicationQueuesClient = ReplicationFactory - .getReplicationQueuesClient(new ReplicationQueuesClientArguments(conf, abortable, zkw)); - this.replicationQueuesClient.init(); - this.replicationPeers = ReplicationFactory.getReplicationPeers(zkw, conf, -this.replicationQueuesClient, abortable); - this.replicationPeers.init(); -} catch (Exception e) { - throw new IOException("Failed to construct ReplicationManager", e); -} - } - - public void addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig, boolean enabled) - throws ReplicationException { -checkPeerConfig(peerConfig); -replicationPeers.registerPeer(peerId, peerConfig, enabled); -replicationPeers.peerConnected(peerId); - } - - public void removeReplicationPeer(String peerId) throws ReplicationException { -replicationPeers.peerDisconnected(peerId); -replicationPeers.unregisterPeer(peerId); - } - - public void enableReplicationPeer(String peerId) throws ReplicationException { -this.replicationPeers.enablePeer(peerId); - } - - public void disableReplicationPeer(String peerId) throws ReplicationException { -this.replicationPeers.disablePeer(peerId); - } - - public ReplicationPeerConfig getPeerConfig(String peerId) - throws ReplicationException, ReplicationPeerNotFoundException { -ReplicationPeerConfig peerConfig = replicationPeers.getReplicationPeerConfig(peerId); -if (peerConfig == null) { - throw new ReplicationPeerNotFoundException(peerId); -} -return peerConfig; - } - - public void updatePeerConfig(String peerId, ReplicationPeerConfig peerConfig) - throws ReplicationException, IOException { -checkPeerConfig(peerConfig); -this.replicationPeers.updatePeerConfig(peerId, peerConfig); - } - - public List listReplicationPeers(Pattern pattern) - throws ReplicationException { -List peers = new ArrayList<>(); -List peerIds = replicationPeers.getAllPeerIds(); -for (String peerId :
[17/37] hbase git commit: HBASE-19617 Remove ReplicationQueues, use ReplicationQueueStorage directly
http://git-wip-us.apache.org/repos/asf/hbase/blob/930be731/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java index b6cf54d..4b9ed74 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceInterface.java @@ -22,7 +22,6 @@ import java.io.IOException; import java.util.List; import java.util.UUID; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -32,9 +31,10 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.replication.ReplicationEndpoint; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationPeers; -import org.apache.hadoop.hbase.replication.ReplicationQueues; +import org.apache.hadoop.hbase.replication.ReplicationQueueStorage; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.wal.WAL.Entry; +import org.apache.yetus.audience.InterfaceAudience; /** * Interface that defines a replication source @@ -47,15 +47,10 @@ public interface ReplicationSourceInterface { * @param conf the configuration to use * @param fs the file system to use * @param manager the manager to use - * @param replicationQueues - * @param replicationPeers * @param server the server for this region server - * @param peerClusterZnode - * @param clusterId - * @throws IOException */ void init(Configuration conf, FileSystem fs, ReplicationSourceManager manager, - ReplicationQueues replicationQueues, ReplicationPeers replicationPeers, Server server, + ReplicationQueueStorage queueStorage, ReplicationPeers replicationPeers, Server server, String peerClusterZnode, UUID clusterId, ReplicationEndpoint replicationEndpoint, WALFileLengthProvider walFileLengthProvider, MetricsSource metrics) throws IOException; http://git-wip-us.apache.org/repos/asf/hbase/blob/930be731/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java index b1d82c8..853bafb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java @@ -34,19 +34,21 @@ import java.util.TreeSet; import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.Future; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; - +import java.util.stream.Collectors; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; @@ -60,7 +62,7 @@ import org.apache.hadoop.hbase.replication.ReplicationPeer; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.replication.ReplicationPeers; import org.apache.hadoop.hbase.replication.ReplicationQueueInfo; -import org.apache.hadoop.hbase.replication.ReplicationQueues; +import org.apache.hadoop.hbase.replication.ReplicationQueueStorage; import org.apache.hadoop.hbase.replication.ReplicationTracker; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; @@ -68,6 +70,7 @@ import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import
[35/37] hbase git commit: HBASE-19748 TestRegionReplicaFailover and TestRegionReplicaReplicationEndpoint UT hangs
HBASE-19748 TestRegionReplicaFailover and TestRegionReplicaReplicationEndpoint UT hangs Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/69f462df Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/69f462df Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/69f462df Branch: refs/heads/HBASE-19397-branch-2 Commit: 69f462dfd074ab6c62eb0b51ccd8c4f631b13fbb Parents: e70b754 Author: huzhengAuthored: Wed Jan 10 15:00:30 2018 +0800 Committer: zhangduo Committed: Fri Jan 12 21:46:35 2018 +0800 -- .../main/java/org/apache/hadoop/hbase/master/HMaster.java | 9 - 1 file changed, 4 insertions(+), 5 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/69f462df/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 64522c0..52e1c10 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -38,7 +38,6 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Objects; -import java.util.Optional; import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; @@ -68,6 +67,7 @@ import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.PleaseHoldException; +import org.apache.hadoop.hbase.ReplicationPeerNotFoundException; import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerMetricsBuilder; import org.apache.hadoop.hbase.ServerName; @@ -3346,13 +3346,12 @@ public class HMaster extends HRegionServer implements MasterServices { cpHost.preGetReplicationPeerConfig(peerId); } LOG.info(getClientIdAuditPrefix() + " get replication peer config, id=" + peerId); -Optional peerConfig = - this.replicationPeerManager.getPeerConfig(peerId); - +ReplicationPeerConfig peerConfig = this.replicationPeerManager.getPeerConfig(peerId) +.orElseThrow(() -> new ReplicationPeerNotFoundException(peerId)); if (cpHost != null) { cpHost.postGetReplicationPeerConfig(peerId); } -return peerConfig.orElse(null); +return peerConfig; } @Override
[14/37] hbase git commit: HBASE-19599 Remove ReplicationQueuesClient, use ReplicationQueueStorage directly
http://git-wip-us.apache.org/repos/asf/hbase/blob/0cce5c5e/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java index 6e27a21..d8f9625 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java @@ -21,13 +21,13 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.HashSet; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Queue; import java.util.Set; import java.util.stream.Collectors; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FileStatus; @@ -48,17 +48,18 @@ import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; import org.apache.hadoop.hbase.replication.ReplicationPeers; import org.apache.hadoop.hbase.replication.ReplicationQueueInfo; +import org.apache.hadoop.hbase.replication.ReplicationQueueStorage; import org.apache.hadoop.hbase.replication.ReplicationQueues; -import org.apache.hadoop.hbase.replication.ReplicationQueuesClient; -import org.apache.hadoop.hbase.replication.ReplicationQueuesClientArguments; +import org.apache.hadoop.hbase.replication.ReplicationQueuesArguments; +import org.apache.hadoop.hbase.replication.ReplicationStorageFactory; import org.apache.hadoop.hbase.replication.ReplicationTracker; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; -import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.util.concurrent.AtomicLongMap; /** @@ -303,57 +304,53 @@ public class DumpReplicationQueues extends Configured implements Tool { } public String dumpQueues(ClusterConnection connection, ZKWatcher zkw, Set peerIds, - boolean hdfs) throws Exception { -ReplicationQueuesClient queuesClient; + boolean hdfs) throws Exception { +ReplicationQueueStorage queueStorage; ReplicationPeers replicationPeers; ReplicationQueues replicationQueues; ReplicationTracker replicationTracker; -ReplicationQueuesClientArguments replicationArgs = -new ReplicationQueuesClientArguments(getConf(), new WarnOnlyAbortable(), zkw); +ReplicationQueuesArguments replicationArgs = +new ReplicationQueuesArguments(getConf(), new WarnOnlyAbortable(), zkw); StringBuilder sb = new StringBuilder(); -queuesClient = ReplicationFactory.getReplicationQueuesClient(replicationArgs); -queuesClient.init(); +queueStorage = ReplicationStorageFactory.getReplicationQueueStorage(zkw, getConf()); replicationQueues = ReplicationFactory.getReplicationQueues(replicationArgs); -replicationPeers = ReplicationFactory.getReplicationPeers(zkw, getConf(), queuesClient, connection); +replicationPeers = +ReplicationFactory.getReplicationPeers(zkw, getConf(), queueStorage, connection); replicationTracker = ReplicationFactory.getReplicationTracker(zkw, replicationPeers, getConf(), new WarnOnlyAbortable(), new WarnOnlyStoppable()); -List liveRegionServers = replicationTracker.getListOfRegionServers(); +Set liveRegionServers = new HashSet<>(replicationTracker.getListOfRegionServers()); // Loops each peer on each RS and dumps the queues -try { - List regionservers = queuesClient.getListOfReplicators(); - if (regionservers == null || regionservers.isEmpty()) { -return sb.toString(); +List regionservers = queueStorage.getListOfReplicators(); +if (regionservers == null || regionservers.isEmpty()) { + return sb.toString(); +} +for (ServerName regionserver : regionservers) { + List queueIds = queueStorage.getAllQueues(regionserver); + replicationQueues.init(regionserver.getServerName()); + if (!liveRegionServers.contains(regionserver.getServerName())) { +deadRegionServers.add(regionserver.getServerName()); } - for (String regionserver : regionservers) { -List queueIds = queuesClient.getAllQueues(regionserver); -replicationQueues.init(regionserver); -if (!liveRegionServers.contains(regionserver)) { - deadRegionServers.add(regionserver); -} -for (String