http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/apidocs/org/apache/hadoop/hbase/mapreduce/CellCounter.html
----------------------------------------------------------------------
diff --git a/apidocs/org/apache/hadoop/hbase/mapreduce/CellCounter.html 
b/apidocs/org/apache/hadoop/hbase/mapreduce/CellCounter.html
index 2fd2324..354e2d3 100644
--- a/apidocs/org/apache/hadoop/hbase/mapreduce/CellCounter.html
+++ b/apidocs/org/apache/hadoop/hbase/mapreduce/CellCounter.html
@@ -286,7 +286,7 @@ implements org.apache.hadoop.util.Tool</pre>
 <ul class="blockListLast">
 <li class="blockList">
 <h4>main</h4>
-<pre>public static&nbsp;void&nbsp;<a 
href="../../../../../src-html/org/apache/hadoop/hbase/mapreduce/CellCounter.html#line.327">main</a>(<a
 
href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String</a>[]&nbsp;args)
+<pre>public static&nbsp;void&nbsp;<a 
href="../../../../../src-html/org/apache/hadoop/hbase/mapreduce/CellCounter.html#line.332">main</a>(<a
 
href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String</a>[]&nbsp;args)
                  throws <a 
href="https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true";
 title="class or interface in java.lang">Exception</a></pre>
 <div class="block">Main entry point.</div>
 <dl>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/apidocs/org/apache/hadoop/hbase/mapreduce/WALPlayer.html
----------------------------------------------------------------------
diff --git a/apidocs/org/apache/hadoop/hbase/mapreduce/WALPlayer.html 
b/apidocs/org/apache/hadoop/hbase/mapreduce/WALPlayer.html
index 9a603e5..0b077d2 100644
--- a/apidocs/org/apache/hadoop/hbase/mapreduce/WALPlayer.html
+++ b/apidocs/org/apache/hadoop/hbase/mapreduce/WALPlayer.html
@@ -416,7 +416,7 @@ implements org.apache.hadoop.util.Tool</pre>
 <ul class="blockList">
 <li class="blockList">
 <h4>main</h4>
-<pre>public static&nbsp;void&nbsp;<a 
href="../../../../../src-html/org/apache/hadoop/hbase/mapreduce/WALPlayer.html#line.377">main</a>(<a
 
href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String</a>[]&nbsp;args)
+<pre>public static&nbsp;void&nbsp;<a 
href="../../../../../src-html/org/apache/hadoop/hbase/mapreduce/WALPlayer.html#line.382">main</a>(<a
 
href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String</a>[]&nbsp;args)
                  throws <a 
href="https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true";
 title="class or interface in java.lang">Exception</a></pre>
 <div class="block">Main entry point.</div>
 <dl>
@@ -433,7 +433,7 @@ implements org.apache.hadoop.util.Tool</pre>
 <ul class="blockListLast">
 <li class="blockList">
 <h4>run</h4>
-<pre>public&nbsp;int&nbsp;<a 
href="../../../../../src-html/org/apache/hadoop/hbase/mapreduce/WALPlayer.html#line.383">run</a>(<a
 
href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String</a>[]&nbsp;args)
+<pre>public&nbsp;int&nbsp;<a 
href="../../../../../src-html/org/apache/hadoop/hbase/mapreduce/WALPlayer.html#line.388">run</a>(<a
 
href="https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String</a>[]&nbsp;args)
         throws <a 
href="https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true";
 title="class or interface in java.lang">Exception</a></pre>
 <dl>
 <dt><span class="overrideSpecifyLabel">Specified by:</span></dt>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/apidocs/src-html/org/apache/hadoop/hbase/client/Admin.html
----------------------------------------------------------------------
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/Admin.html 
b/apidocs/src-html/org/apache/hadoop/hbase/client/Admin.html
index 4bdaf23..863532f 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/client/Admin.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/client/Admin.html
@@ -2735,7 +2735,18 @@
 <span class="sourceLineNo">2727</span>   * @return List of servers that are 
not cleared<a name="line.2727"></a>
 <span class="sourceLineNo">2728</span>   */<a name="line.2728"></a>
 <span class="sourceLineNo">2729</span>  List&lt;ServerName&gt; 
clearDeadServers(final List&lt;ServerName&gt; servers) throws IOException;<a 
name="line.2729"></a>
-<span class="sourceLineNo">2730</span>}<a name="line.2730"></a>
+<span class="sourceLineNo">2730</span><a name="line.2730"></a>
+<span class="sourceLineNo">2731</span>  /**<a name="line.2731"></a>
+<span class="sourceLineNo">2732</span>   * Create a new table by cloning the 
existent table schema.<a name="line.2732"></a>
+<span class="sourceLineNo">2733</span>   *<a name="line.2733"></a>
+<span class="sourceLineNo">2734</span>   * @param tableName name of the table 
to be cloned<a name="line.2734"></a>
+<span class="sourceLineNo">2735</span>   * @param newTableName name of the new 
table where the table will be created<a name="line.2735"></a>
+<span class="sourceLineNo">2736</span>   * @param preserveSplits True if the 
splits should be preserved<a name="line.2736"></a>
+<span class="sourceLineNo">2737</span>   * @throws IOException if a remote or 
network exception occurs<a name="line.2737"></a>
+<span class="sourceLineNo">2738</span>   */<a name="line.2738"></a>
+<span class="sourceLineNo">2739</span>  void cloneTableSchema(final TableName 
tableName, final TableName newTableName,<a name="line.2739"></a>
+<span class="sourceLineNo">2740</span>      final boolean preserveSplits) 
throws IOException;<a name="line.2740"></a>
+<span class="sourceLineNo">2741</span>}<a name="line.2741"></a>
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
----------------------------------------------------------------------
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html 
b/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
index 5c18b82..27eb0b0 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
@@ -1238,7 +1238,17 @@
 <span class="sourceLineNo">1230</span>   * @return CacheEvictionStats related 
to the eviction wrapped by a {@link CompletableFuture}.<a name="line.1230"></a>
 <span class="sourceLineNo">1231</span>   */<a name="line.1231"></a>
 <span class="sourceLineNo">1232</span>  
CompletableFuture&lt;CacheEvictionStats&gt; clearBlockCache(final TableName 
tableName);<a name="line.1232"></a>
-<span class="sourceLineNo">1233</span>}<a name="line.1233"></a>
+<span class="sourceLineNo">1233</span><a name="line.1233"></a>
+<span class="sourceLineNo">1234</span>  /**<a name="line.1234"></a>
+<span class="sourceLineNo">1235</span>   * Create a new table by cloning the 
existent table schema.<a name="line.1235"></a>
+<span class="sourceLineNo">1236</span>   *<a name="line.1236"></a>
+<span class="sourceLineNo">1237</span>   * @param tableName name of the table 
to be cloned<a name="line.1237"></a>
+<span class="sourceLineNo">1238</span>   * @param newTableName name of the new 
table where the table will be created<a name="line.1238"></a>
+<span class="sourceLineNo">1239</span>   * @param preserveSplits True if the 
splits should be preserved<a name="line.1239"></a>
+<span class="sourceLineNo">1240</span>   */<a name="line.1240"></a>
+<span class="sourceLineNo">1241</span>  CompletableFuture&lt;Void&gt;  
cloneTableSchema(final TableName tableName,<a name="line.1241"></a>
+<span class="sourceLineNo">1242</span>      final TableName newTableName, 
final boolean preserveSplits);<a name="line.1242"></a>
+<span class="sourceLineNo">1243</span>}<a name="line.1243"></a>
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/CellCounter.html
----------------------------------------------------------------------
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/CellCounter.html 
b/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/CellCounter.html
index 67249a1..6f1fc1b 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/CellCounter.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/CellCounter.html
@@ -300,44 +300,49 @@
 <span class="sourceLineNo">292</span>  @Override<a name="line.292"></a>
 <span class="sourceLineNo">293</span>  public int run(String[] args) throws 
Exception {<a name="line.293"></a>
 <span class="sourceLineNo">294</span>    if (args.length &lt; 2) {<a 
name="line.294"></a>
-<span class="sourceLineNo">295</span>      System.err.println("ERROR: Wrong 
number of parameters: " + args.length);<a name="line.295"></a>
-<span class="sourceLineNo">296</span>      System.err.println("Usage: 
CellCounter ");<a name="line.296"></a>
-<span class="sourceLineNo">297</span>      System.err.println("       
&lt;tablename&gt; &lt;outputDir&gt; &lt;reportSeparator&gt; [^[regex pattern] 
or " +<a name="line.297"></a>
-<span class="sourceLineNo">298</span>        "[Prefix] for row filter]] 
--starttime=[starttime] --endtime=[endtime]");<a name="line.298"></a>
-<span class="sourceLineNo">299</span>      System.err.println("  Note: -D 
properties will be applied to the conf used. ");<a name="line.299"></a>
-<span class="sourceLineNo">300</span>      System.err.println("  Additionally, 
all of the SCAN properties from TableInputFormat");<a name="line.300"></a>
-<span class="sourceLineNo">301</span>      System.err.println("  can be 
specified to get fine grained control on what is counted..");<a 
name="line.301"></a>
-<span class="sourceLineNo">302</span>      System.err.println("   -D " + 
TableInputFormat.SCAN_ROW_START + "=&lt;rowkey&gt;");<a name="line.302"></a>
-<span class="sourceLineNo">303</span>      System.err.println("   -D " + 
TableInputFormat.SCAN_ROW_STOP + "=&lt;rowkey&gt;");<a name="line.303"></a>
-<span class="sourceLineNo">304</span>      System.err.println("   -D " + 
TableInputFormat.SCAN_COLUMNS + "=\"&lt;col1&gt; &lt;col2&gt;...\"");<a 
name="line.304"></a>
-<span class="sourceLineNo">305</span>      System.err.println("   -D " + 
TableInputFormat.SCAN_COLUMN_FAMILY + "=&lt;family1&gt;,&lt;family2&gt;, 
...");<a name="line.305"></a>
-<span class="sourceLineNo">306</span>      System.err.println("   -D " + 
TableInputFormat.SCAN_TIMESTAMP + "=&lt;timestamp&gt;");<a name="line.306"></a>
-<span class="sourceLineNo">307</span>      System.err.println("   -D " + 
TableInputFormat.SCAN_TIMERANGE_START + "=&lt;timestamp&gt;");<a 
name="line.307"></a>
-<span class="sourceLineNo">308</span>      System.err.println("   -D " + 
TableInputFormat.SCAN_TIMERANGE_END + "=&lt;timestamp&gt;");<a 
name="line.308"></a>
-<span class="sourceLineNo">309</span>      System.err.println("   -D " + 
TableInputFormat.SCAN_MAXVERSIONS + "=&lt;count&gt;");<a name="line.309"></a>
-<span class="sourceLineNo">310</span>      System.err.println("   -D " + 
TableInputFormat.SCAN_CACHEDROWS + "=&lt;count&gt;");<a name="line.310"></a>
-<span class="sourceLineNo">311</span>      System.err.println("   -D " + 
TableInputFormat.SCAN_BATCHSIZE + "=&lt;count&gt;");<a name="line.311"></a>
-<span class="sourceLineNo">312</span>      System.err.println(" 
&lt;reportSeparator&gt; parameter can be used to override the default report 
separator " +<a name="line.312"></a>
-<span class="sourceLineNo">313</span>          "string : used to separate the 
rowId/column family name and qualifier name.");<a name="line.313"></a>
-<span class="sourceLineNo">314</span>      System.err.println(" [^[regex 
pattern] or [Prefix] parameter can be used to limit the cell counter count " 
+<a name="line.314"></a>
-<span class="sourceLineNo">315</span>          "operation to a limited subset 
of rows from the table based on regex or prefix pattern.");<a 
name="line.315"></a>
-<span class="sourceLineNo">316</span>      return -1;<a name="line.316"></a>
-<span class="sourceLineNo">317</span>    }<a name="line.317"></a>
-<span class="sourceLineNo">318</span>    Job job = 
createSubmittableJob(getConf(), args);<a name="line.318"></a>
-<span class="sourceLineNo">319</span>    return (job.waitForCompletion(true) ? 
0 : 1);<a name="line.319"></a>
-<span class="sourceLineNo">320</span>  }<a name="line.320"></a>
-<span class="sourceLineNo">321</span><a name="line.321"></a>
-<span class="sourceLineNo">322</span>  /**<a name="line.322"></a>
-<span class="sourceLineNo">323</span>   * Main entry point.<a 
name="line.323"></a>
-<span class="sourceLineNo">324</span>   * @param args The command line 
parameters.<a name="line.324"></a>
-<span class="sourceLineNo">325</span>   * @throws Exception When running the 
job fails.<a name="line.325"></a>
-<span class="sourceLineNo">326</span>   */<a name="line.326"></a>
-<span class="sourceLineNo">327</span>  public static void main(String[] args) 
throws Exception {<a name="line.327"></a>
-<span class="sourceLineNo">328</span>    int errCode = 
ToolRunner.run(HBaseConfiguration.create(), new CellCounter(), args);<a 
name="line.328"></a>
-<span class="sourceLineNo">329</span>    System.exit(errCode);<a 
name="line.329"></a>
-<span class="sourceLineNo">330</span>  }<a name="line.330"></a>
-<span class="sourceLineNo">331</span><a name="line.331"></a>
-<span class="sourceLineNo">332</span>}<a name="line.332"></a>
+<span class="sourceLineNo">295</span>      printUsage(args.length);<a 
name="line.295"></a>
+<span class="sourceLineNo">296</span>      return -1;<a name="line.296"></a>
+<span class="sourceLineNo">297</span>    }<a name="line.297"></a>
+<span class="sourceLineNo">298</span>    Job job = 
createSubmittableJob(getConf(), args);<a name="line.298"></a>
+<span class="sourceLineNo">299</span>    return (job.waitForCompletion(true) ? 
0 : 1);<a name="line.299"></a>
+<span class="sourceLineNo">300</span>  }<a name="line.300"></a>
+<span class="sourceLineNo">301</span><a name="line.301"></a>
+<span class="sourceLineNo">302</span>  private void printUsage(int 
parameterCount) {<a name="line.302"></a>
+<span class="sourceLineNo">303</span>    System.err.println("ERROR: Wrong 
number of parameters: " + parameterCount);<a name="line.303"></a>
+<span class="sourceLineNo">304</span>    System.err.println("Usage: hbase 
cellcounter &lt;tablename&gt; &lt;outputDir&gt; [reportSeparator] "<a 
name="line.304"></a>
+<span class="sourceLineNo">305</span>        + "[^[regex pattern] or [Prefix]] 
[--starttime=&lt;starttime&gt; --endtime=&lt;endtime&gt;]");<a 
name="line.305"></a>
+<span class="sourceLineNo">306</span>    System.err.println("  Note: -D 
properties will be applied to the conf used.");<a name="line.306"></a>
+<span class="sourceLineNo">307</span>    System.err.println("  Additionally, 
all of the SCAN properties from TableInputFormat can be "<a name="line.307"></a>
+<span class="sourceLineNo">308</span>        + "specified to get fine grained 
control on what is counted.");<a name="line.308"></a>
+<span class="sourceLineNo">309</span>    System.err.println("   -D" + 
TableInputFormat.SCAN_ROW_START + "=&lt;rowkey&gt;");<a name="line.309"></a>
+<span class="sourceLineNo">310</span>    System.err.println("   -D" + 
TableInputFormat.SCAN_ROW_STOP + "=&lt;rowkey&gt;");<a name="line.310"></a>
+<span class="sourceLineNo">311</span>    System.err.println("   -D" + 
TableInputFormat.SCAN_COLUMNS + "=\"&lt;col1&gt; &lt;col2&gt;...\"");<a 
name="line.311"></a>
+<span class="sourceLineNo">312</span>    System.err.println("   -D" + 
TableInputFormat.SCAN_COLUMN_FAMILY<a name="line.312"></a>
+<span class="sourceLineNo">313</span>        + 
"=&lt;family1&gt;,&lt;family2&gt;, ...");<a name="line.313"></a>
+<span class="sourceLineNo">314</span>    System.err.println("   -D" + 
TableInputFormat.SCAN_TIMESTAMP + "=&lt;timestamp&gt;");<a name="line.314"></a>
+<span class="sourceLineNo">315</span>    System.err.println("   -D" + 
TableInputFormat.SCAN_TIMERANGE_START + "=&lt;timestamp&gt;");<a 
name="line.315"></a>
+<span class="sourceLineNo">316</span>    System.err.println("   -D" + 
TableInputFormat.SCAN_TIMERANGE_END + "=&lt;timestamp&gt;");<a 
name="line.316"></a>
+<span class="sourceLineNo">317</span>    System.err.println("   -D" + 
TableInputFormat.SCAN_MAXVERSIONS + "=&lt;count&gt;");<a name="line.317"></a>
+<span class="sourceLineNo">318</span>    System.err.println("   -D" + 
TableInputFormat.SCAN_CACHEDROWS + "=&lt;count&gt;");<a name="line.318"></a>
+<span class="sourceLineNo">319</span>    System.err.println("   -D" + 
TableInputFormat.SCAN_BATCHSIZE + "=&lt;count&gt;");<a name="line.319"></a>
+<span class="sourceLineNo">320</span>    System.err.println(" 
&lt;reportSeparator&gt; parameter can be used to override the default report 
"<a name="line.320"></a>
+<span class="sourceLineNo">321</span>        + "separator string : used to 
separate the rowId/column family name and qualifier name.");<a 
name="line.321"></a>
+<span class="sourceLineNo">322</span>    System.err.println(" [^[regex 
pattern] or [Prefix] parameter can be used to limit the cell "<a 
name="line.322"></a>
+<span class="sourceLineNo">323</span>        + "counter count operation to a 
limited subset of rows from the table based on regex or "<a name="line.323"></a>
+<span class="sourceLineNo">324</span>        + "prefix pattern.");<a 
name="line.324"></a>
+<span class="sourceLineNo">325</span>  }<a name="line.325"></a>
+<span class="sourceLineNo">326</span><a name="line.326"></a>
+<span class="sourceLineNo">327</span>  /**<a name="line.327"></a>
+<span class="sourceLineNo">328</span>   * Main entry point.<a 
name="line.328"></a>
+<span class="sourceLineNo">329</span>   * @param args The command line 
parameters.<a name="line.329"></a>
+<span class="sourceLineNo">330</span>   * @throws Exception When running the 
job fails.<a name="line.330"></a>
+<span class="sourceLineNo">331</span>   */<a name="line.331"></a>
+<span class="sourceLineNo">332</span>  public static void main(String[] args) 
throws Exception {<a name="line.332"></a>
+<span class="sourceLineNo">333</span>    int errCode = 
ToolRunner.run(HBaseConfiguration.create(), new CellCounter(), args);<a 
name="line.333"></a>
+<span class="sourceLineNo">334</span>    System.exit(errCode);<a 
name="line.334"></a>
+<span class="sourceLineNo">335</span>  }<a name="line.335"></a>
+<span class="sourceLineNo">336</span><a name="line.336"></a>
+<span class="sourceLineNo">337</span>}<a name="line.337"></a>
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/RowCounter.html
----------------------------------------------------------------------
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/RowCounter.html 
b/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/RowCounter.html
index 56ec3ac..8f4cacb 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/RowCounter.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/RowCounter.html
@@ -229,9 +229,9 @@
 <span class="sourceLineNo">221</span>   * Note that we don't document 
--expected-count, because it's intended for test.<a name="line.221"></a>
 <span class="sourceLineNo">222</span>   */<a name="line.222"></a>
 <span class="sourceLineNo">223</span>  private static void printUsage() {<a 
name="line.223"></a>
-<span class="sourceLineNo">224</span>    System.err.println("Usage: RowCounter 
[options] &lt;tablename&gt; " +<a name="line.224"></a>
-<span class="sourceLineNo">225</span>        "[--starttime=[start] 
--endtime=[end] " +<a name="line.225"></a>
-<span class="sourceLineNo">226</span>        
"[--range=[startKey],[endKey][;[startKey],[endKey]...]] [&lt;column1&gt; 
&lt;column2&gt;...]");<a name="line.226"></a>
+<span class="sourceLineNo">224</span>    System.err.println("Usage: hbase 
rowcounter [options] &lt;tablename&gt; "<a name="line.224"></a>
+<span class="sourceLineNo">225</span>        + "[--starttime=&lt;start&gt; 
--endtime=&lt;end&gt;] "<a name="line.225"></a>
+<span class="sourceLineNo">226</span>        + 
"[--range=[startKey],[endKey][;[startKey],[endKey]...]] [&lt;column1&gt; 
&lt;column2&gt;...]");<a name="line.226"></a>
 <span class="sourceLineNo">227</span>    System.err.println("For performance 
consider the following options:\n"<a name="line.227"></a>
 <span class="sourceLineNo">228</span>        + 
"-Dhbase.client.scanner.caching=100\n"<a name="line.228"></a>
 <span class="sourceLineNo">229</span>        + 
"-Dmapreduce.map.speculative=false");<a name="line.229"></a>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/WALPlayer.html
----------------------------------------------------------------------
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/WALPlayer.html 
b/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/WALPlayer.html
index a5477ac..9501e97 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/WALPlayer.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/WALPlayer.html
@@ -357,46 +357,51 @@
 <span class="sourceLineNo">349</span>      System.err.println("ERROR: " + 
errorMsg);<a name="line.349"></a>
 <span class="sourceLineNo">350</span>    }<a name="line.350"></a>
 <span class="sourceLineNo">351</span>    System.err.println("Usage: " + NAME + 
" [options] &lt;wal inputdir&gt; &lt;tables&gt; [&lt;tableMappings&gt;]");<a 
name="line.351"></a>
-<span class="sourceLineNo">352</span>    System.err.println("Read all WAL 
entries for &lt;tables&gt;.");<a name="line.352"></a>
-<span class="sourceLineNo">353</span>    System.err.println("If no tables 
(\"\") are specific, all tables are imported.");<a name="line.353"></a>
-<span class="sourceLineNo">354</span>    System.err.println("(Careful, even 
hbase:meta entries will be imported" + " in that case.)");<a 
name="line.354"></a>
-<span class="sourceLineNo">355</span>    System.err.println("Otherwise 
&lt;tables&gt; is a comma separated list of tables.\n");<a name="line.355"></a>
-<span class="sourceLineNo">356</span>    System.err.println("The WAL entries 
can be mapped to new set of tables via &lt;tableMapping&gt;.");<a 
name="line.356"></a>
-<span class="sourceLineNo">357</span>    
System.err.println("&lt;tableMapping&gt; is a command separated list of 
targettables.");<a name="line.357"></a>
+<span class="sourceLineNo">352</span>    System.err.println("Replay all WAL 
files into HBase.");<a name="line.352"></a>
+<span class="sourceLineNo">353</span>    System.err.println("&lt;tables&gt; is 
a comma separated list of tables.");<a name="line.353"></a>
+<span class="sourceLineNo">354</span>    System.err.println("If no tables 
(\"\") are specified, all tables are imported.");<a name="line.354"></a>
+<span class="sourceLineNo">355</span>    System.err.println("(Be careful, 
hbase:meta entries will be imported in this case.)\n");<a name="line.355"></a>
+<span class="sourceLineNo">356</span>    System.err.println("WAL entries can 
be mapped to new set of tables via &lt;tableMappings&gt;.");<a 
name="line.356"></a>
+<span class="sourceLineNo">357</span>    
System.err.println("&lt;tableMappings&gt; is a comma separated list of target 
tables.");<a name="line.357"></a>
 <span class="sourceLineNo">358</span>    System.err.println("If specified, 
each table in &lt;tables&gt; must have a mapping.\n");<a name="line.358"></a>
 <span class="sourceLineNo">359</span>    System.err.println("By default " + 
NAME + " will load data directly into HBase.");<a name="line.359"></a>
-<span class="sourceLineNo">360</span>    System.err.println("To generate 
HFiles for a bulk data load instead, pass the option:");<a name="line.360"></a>
+<span class="sourceLineNo">360</span>    System.err.println("To generate 
HFiles for a bulk data load instead, pass the following option:");<a 
name="line.360"></a>
 <span class="sourceLineNo">361</span>    System.err.println("  -D" + 
BULK_OUTPUT_CONF_KEY + "=/path/for/output");<a name="line.361"></a>
 <span class="sourceLineNo">362</span>    System.err.println("  (Only one table 
can be specified, and no mapping is allowed!)");<a name="line.362"></a>
-<span class="sourceLineNo">363</span>    System.err.println("Other options: 
(specify time range to WAL edit to consider)");<a name="line.363"></a>
+<span class="sourceLineNo">363</span>    System.err.println("Time range 
options:");<a name="line.363"></a>
 <span class="sourceLineNo">364</span>    System.err.println("  -D" + 
WALInputFormat.START_TIME_KEY + "=[date|ms]");<a name="line.364"></a>
 <span class="sourceLineNo">365</span>    System.err.println("  -D" + 
WALInputFormat.END_TIME_KEY + "=[date|ms]");<a name="line.365"></a>
-<span class="sourceLineNo">366</span>    System.err.println("   -D " + 
JOB_NAME_CONF_KEY<a name="line.366"></a>
-<span class="sourceLineNo">367</span>        + "=jobName - use the specified 
mapreduce job name for the wal player");<a name="line.367"></a>
-<span class="sourceLineNo">368</span>    System.err.println("For performance 
also consider the following options:\n"<a name="line.368"></a>
-<span class="sourceLineNo">369</span>        + "  
-Dmapreduce.map.speculative=false\n" + "  
-Dmapreduce.reduce.speculative=false");<a name="line.369"></a>
-<span class="sourceLineNo">370</span>  }<a name="line.370"></a>
-<span class="sourceLineNo">371</span><a name="line.371"></a>
-<span class="sourceLineNo">372</span>  /**<a name="line.372"></a>
-<span class="sourceLineNo">373</span>   * Main entry point.<a 
name="line.373"></a>
-<span class="sourceLineNo">374</span>   * @param args The command line 
parameters.<a name="line.374"></a>
-<span class="sourceLineNo">375</span>   * @throws Exception When running the 
job fails.<a name="line.375"></a>
-<span class="sourceLineNo">376</span>   */<a name="line.376"></a>
-<span class="sourceLineNo">377</span>  public static void main(String[] args) 
throws Exception {<a name="line.377"></a>
-<span class="sourceLineNo">378</span>    int ret = ToolRunner.run(new 
WALPlayer(HBaseConfiguration.create()), args);<a name="line.378"></a>
-<span class="sourceLineNo">379</span>    System.exit(ret);<a 
name="line.379"></a>
-<span class="sourceLineNo">380</span>  }<a name="line.380"></a>
-<span class="sourceLineNo">381</span><a name="line.381"></a>
-<span class="sourceLineNo">382</span>  @Override<a name="line.382"></a>
-<span class="sourceLineNo">383</span>  public int run(String[] args) throws 
Exception {<a name="line.383"></a>
-<span class="sourceLineNo">384</span>    if (args.length &lt; 2) {<a 
name="line.384"></a>
-<span class="sourceLineNo">385</span>      usage("Wrong number of arguments: " 
+ args.length);<a name="line.385"></a>
-<span class="sourceLineNo">386</span>      System.exit(-1);<a 
name="line.386"></a>
-<span class="sourceLineNo">387</span>    }<a name="line.387"></a>
-<span class="sourceLineNo">388</span>    Job job = 
createSubmittableJob(args);<a name="line.388"></a>
-<span class="sourceLineNo">389</span>    return job.waitForCompletion(true) ? 
0 : 1;<a name="line.389"></a>
-<span class="sourceLineNo">390</span>  }<a name="line.390"></a>
-<span class="sourceLineNo">391</span>}<a name="line.391"></a>
+<span class="sourceLineNo">366</span>    System.err.println("  (The start and 
the end date of timerange. The dates can be expressed");<a name="line.366"></a>
+<span class="sourceLineNo">367</span>    System.err.println("  in milliseconds 
since epoch or in yyyy-MM-dd'T'HH:mm:ss.SS format.");<a name="line.367"></a>
+<span class="sourceLineNo">368</span>    System.err.println("  E.g. 
1234567890120 or 2009-02-13T23:32:30.12)");<a name="line.368"></a>
+<span class="sourceLineNo">369</span>    System.err.println("Other 
options:");<a name="line.369"></a>
+<span class="sourceLineNo">370</span>    System.err.println("  -D" + 
JOB_NAME_CONF_KEY + "=jobName");<a name="line.370"></a>
+<span class="sourceLineNo">371</span>    System.err.println("  Use the 
specified mapreduce job name for the wal player");<a name="line.371"></a>
+<span class="sourceLineNo">372</span>    System.err.println("For performance 
also consider the following options:\n"<a name="line.372"></a>
+<span class="sourceLineNo">373</span>        + "  
-Dmapreduce.map.speculative=false\n"<a name="line.373"></a>
+<span class="sourceLineNo">374</span>        + "  
-Dmapreduce.reduce.speculative=false");<a name="line.374"></a>
+<span class="sourceLineNo">375</span>  }<a name="line.375"></a>
+<span class="sourceLineNo">376</span><a name="line.376"></a>
+<span class="sourceLineNo">377</span>  /**<a name="line.377"></a>
+<span class="sourceLineNo">378</span>   * Main entry point.<a 
name="line.378"></a>
+<span class="sourceLineNo">379</span>   * @param args The command line 
parameters.<a name="line.379"></a>
+<span class="sourceLineNo">380</span>   * @throws Exception When running the 
job fails.<a name="line.380"></a>
+<span class="sourceLineNo">381</span>   */<a name="line.381"></a>
+<span class="sourceLineNo">382</span>  public static void main(String[] args) 
throws Exception {<a name="line.382"></a>
+<span class="sourceLineNo">383</span>    int ret = ToolRunner.run(new 
WALPlayer(HBaseConfiguration.create()), args);<a name="line.383"></a>
+<span class="sourceLineNo">384</span>    System.exit(ret);<a 
name="line.384"></a>
+<span class="sourceLineNo">385</span>  }<a name="line.385"></a>
+<span class="sourceLineNo">386</span><a name="line.386"></a>
+<span class="sourceLineNo">387</span>  @Override<a name="line.387"></a>
+<span class="sourceLineNo">388</span>  public int run(String[] args) throws 
Exception {<a name="line.388"></a>
+<span class="sourceLineNo">389</span>    if (args.length &lt; 2) {<a 
name="line.389"></a>
+<span class="sourceLineNo">390</span>      usage("Wrong number of arguments: " 
+ args.length);<a name="line.390"></a>
+<span class="sourceLineNo">391</span>      System.exit(-1);<a 
name="line.391"></a>
+<span class="sourceLineNo">392</span>    }<a name="line.392"></a>
+<span class="sourceLineNo">393</span>    Job job = 
createSubmittableJob(args);<a name="line.393"></a>
+<span class="sourceLineNo">394</span>    return job.waitForCompletion(true) ? 
0 : 1;<a name="line.394"></a>
+<span class="sourceLineNo">395</span>  }<a name="line.395"></a>
+<span class="sourceLineNo">396</span>}<a name="line.396"></a>
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/book.html
----------------------------------------------------------------------
diff --git a/book.html b/book.html
index 8269c24..f963820 100644
--- a/book.html
+++ b/book.html
@@ -1340,7 +1340,7 @@ You can still test the behavior of the primary Master or 
a RegionServer by killi
 <div class="content">
 This chapter expands upon the <a href="#getting_started">Getting Started</a> 
chapter to further explain configuration of Apache HBase.
 Please read this chapter carefully, especially the <a 
href="#basic.prerequisites">Basic Prerequisites</a>
-to ensure that your HBase testing and deployment goes smoothly, and prevent 
data loss.
+to ensure that your HBase testing and deployment goes smoothly.
 Familiarize yourself with <a 
href="#hbase_supported_tested_definitions">Support and Testing Expectations</a> 
as well.
 </div>
 </div>
@@ -1564,10 +1564,10 @@ You must set <code>JAVA_HOME</code> on each node of 
your cluster. <em>hbase-env.
 <p>For example, assuming that a schema had 3 ColumnFamilies per region with an 
average of 3 StoreFiles per ColumnFamily, and there are 100 regions per 
RegionServer, the JVM will open <code>3 * 3 * 100 = 900</code> file 
descriptors, not counting open JAR files, configuration files, and others. 
Opening a file does not take many resources, and the risk of allowing a user to 
open too many files is minimal.</p>
 </div>
 <div class="paragraph">
-<p>Another related setting is the number of processes a user is allowed to run 
at once. In Linux and Unix, the number of processes is set using the 
<code>ulimit -u</code> command. This should not be confused with the 
<code>nproc</code> command, which controls the number of CPUs available to a 
given user. Under load, a <code>ulimit -u</code> that is too low can cause 
OutOfMemoryError exceptions. See Jack Levin&#8217;s major HDFS issues thread on 
the hbase-users mailing list, from 2011.</p>
+<p>Another related setting is the number of processes a user is allowed to run 
at once. In Linux and Unix, the number of processes is set using the 
<code>ulimit -u</code> command. This should not be confused with the 
<code>nproc</code> command, which controls the number of CPUs available to a 
given user. Under load, a <code>ulimit -u</code> that is too low can cause 
OutOfMemoryError exceptions.</p>
 </div>
 <div class="paragraph">
-<p>Configuring the maximum number of file descriptors and processes for the 
user who is running the HBase process is an operating system configuration, 
rather than an HBase configuration. It is also important to be sure that the 
settings are changed for the user that actually runs HBase. To see which user 
started HBase, and that user&#8217;s ulimit configuration, look at the first 
line of the HBase log for that instance. A useful read setting config on your 
hadoop cluster is Aaron Kimball&#8217;s Configuration Parameters: What can you 
just ignore?</p>
+<p>Configuring the maximum number of file descriptors and processes for the 
user who is running the HBase process is an operating system configuration, 
rather than an HBase configuration. It is also important to be sure that the 
settings are changed for the user that actually runs HBase. To see which user 
started HBase, and that user&#8217;s ulimit configuration, look at the first 
line of the HBase log for that instance.</p>
 </div>
 <div class="exampleblock">
 <div class="title">Example 5. <code>ulimit</code> Settings on Ubuntu</div>
@@ -1622,7 +1622,8 @@ See <a 
href="https://wiki.apache.org/hadoop/Distributions%20and%20Commercial%20S
 <td class="content">
 <div class="title">Hadoop 2.x is recommended.</div>
 <div class="paragraph">
-<p>Hadoop 2.x is faster and includes features, such as short-circuit reads, 
which will help improve your HBase random read profile.
+<p>Hadoop 2.x is faster and includes features, such as short-circuit reads 
(see <a href="#perf.hdfs.configs.localread">Leveraging local data</a>),
+which will help improve your HBase random read profile.
 Hadoop 2.x also includes important bug fixes that will improve your overall 
HBase experience. HBase does not support running with
 earlier versions of Hadoop. See the table below for requirements specific to 
different HBase versions.</p>
 </div>
@@ -1709,12 +1710,18 @@ earlier versions of Hadoop. See the table below for 
requirements specific to dif
 <td class="tableblock halign-left valign-top"><p class="tableblock">X</p></td>
 </tr>
 <tr>
-<td class="tableblock halign-left valign-top"><p 
class="tableblock">Hadoop-2.8.2+</p></td>
+<td class="tableblock halign-left valign-top"><p 
class="tableblock">Hadoop-2.8.2</p></td>
 <td class="tableblock halign-left valign-top"><p class="tableblock">NT</p></td>
 <td class="tableblock halign-left valign-top"><p class="tableblock">NT</p></td>
 <td class="tableblock halign-left valign-top"><p class="tableblock">NT</p></td>
 </tr>
 <tr>
+<td class="tableblock halign-left valign-top"><p 
class="tableblock">Hadoop-2.8.3+</p></td>
+<td class="tableblock halign-left valign-top"><p class="tableblock">NT</p></td>
+<td class="tableblock halign-left valign-top"><p class="tableblock">NT</p></td>
+<td class="tableblock halign-left valign-top"><p class="tableblock">S</p></td>
+</tr>
+<tr>
 <td class="tableblock halign-left valign-top"><p 
class="tableblock">Hadoop-2.9.0</p></td>
 <td class="tableblock halign-left valign-top"><p class="tableblock">X</p></td>
 <td class="tableblock halign-left valign-top"><p class="tableblock">X</p></td>
@@ -1773,7 +1780,7 @@ data loss. This patch is present in Apache Hadoop 
releases 2.6.1+.</p>
 <td class="content">
 <div class="title">Hadoop 2.y.0 Releases</div>
 <div class="paragraph">
-<p>Starting around the time of Hadoop version 2.7.0, the Hadoop PMC got into 
the habit of calling out new minor releases on their major version 2 release 
line as not stable / production ready. As such, HBase expressly advises 
downstream users to avoid running on top of these releases. Note that 
additionally the 2.8.1 was release was given the same caveat by the Hadoop PMC. 
For reference, see the release announcements for <a 
href="https://s.apache.org/hadoop-2.7.0-announcement";>Apache Hadoop 2.7.0</a>, 
<a href="https://s.apache.org/hadoop-2.8.0-announcement";>Apache Hadoop 
2.8.0</a>, <a href="https://s.apache.org/hadoop-2.8.1-announcement";>Apache 
Hadoop 2.8.1</a>, and <a 
href="https://s.apache.org/hadoop-2.9.0-announcement";>Apache Hadoop 
2.9.0</a>.</p>
+<p>Starting around the time of Hadoop version 2.7.0, the Hadoop PMC got into 
the habit of calling out new minor releases on their major version 2 release 
line as not stable / production ready. As such, HBase expressly advises 
downstream users to avoid running on top of these releases. Note that 
additionally the 2.8.1 release was given the same caveat by the Hadoop PMC. For 
reference, see the release announcements for <a 
href="https://s.apache.org/hadoop-2.7.0-announcement";>Apache Hadoop 2.7.0</a>, 
<a href="https://s.apache.org/hadoop-2.8.0-announcement";>Apache Hadoop 
2.8.0</a>, <a href="https://s.apache.org/hadoop-2.8.1-announcement";>Apache 
Hadoop 2.8.1</a>, and <a 
href="https://s.apache.org/hadoop-2.9.0-announcement";>Apache Hadoop 
2.9.0</a>.</p>
 </div>
 </td>
 </tr>
@@ -1788,12 +1795,14 @@ data loss. This patch is present in Apache Hadoop 
releases 2.6.1+.</p>
 <td class="content">
 <div class="title">Replace the Hadoop Bundled With HBase!</div>
 <div class="paragraph">
-<p>Because HBase depends on Hadoop, it bundles an instance of the Hadoop jar 
under its <em>lib</em> directory.
-The bundled jar is ONLY for use in standalone mode.
+<p>Because HBase depends on Hadoop, it bundles Hadoop jars under its 
<em>lib</em> directory.
+The bundled jars are ONLY for use in standalone mode.
 In distributed mode, it is <em>critical</em> that the version of Hadoop that 
is out on your cluster match what is under HBase.
-Replace the hadoop jar found in the HBase lib directory with the hadoop jar 
you are running on your cluster to avoid version mismatch issues.
-Make sure you replace the jar in HBase across your whole cluster.
-Hadoop version mismatch issues have various manifestations but often all look 
like its hung.</p>
+Replace the hadoop jars found in the HBase lib directory with the equivalent 
hadoop jars from the version you are running
+on your cluster to avoid version mismatch issues.
+Make sure you replace the jars under HBase across your whole cluster.
+Hadoop version mismatch issues have various manifestations. Check for mismatch 
if
+HBase appears hung.</p>
 </div>
 </td>
 </tr>
@@ -6779,6 +6788,16 @@ Quitting...</code></pre>
 <div class="title">"Distributed Log Replay" feature broken and removed</div>
 <p>The Distributed Log Replay feature was broken and has been removed from 
HBase 2.y+. As a consequence all related configs, metrics, RPC fields, and 
logging have also been removed. Note that this feature was found to be 
unreliable in the run up to HBase 1.0, defaulted to being unused, and was 
effectively removed in HBase 1.2.0 when we started ignoring the config that 
turns it on (<a 
href="https://issues.apache.org/jira/browse/HBASE-14465";>HBASE-14465</a>). If 
you are currently using the feature, be sure to perform a clean shutdown, 
ensure all DLR work is complete, and disable the feature prior to upgrading.</p>
 </div>
+<div id="upgrade2.0.prefix-tree.removed" class="paragraph">
+<div class="title"><em>prefix-tree</em> encoding removed</div>
+<p>The prefix-tree encoding was removed from HBase 2.0.0 (<a 
href="https://issues.apache.org/jira/browse/HBASE-19179";>HBASE-19179</a>).
+It was (late!) deprecated in hbase-1.2.7, hbase-1.4.0, and hbase-1.3.2.</p>
+</div>
+<div class="paragraph">
+<p>This feature was removed because it as not being actively maintained. If 
interested in reviving this
+sweet facility which improved random read latencies at the expensive of slowed 
writes,
+write the HBase developers list at <em>dev at hbase dot apache dot 
org</em>.</p>
+</div>
 <div id="upgrade2.0.metrics" class="paragraph">
 <div class="title">Changed metrics</div>
 <p>The following metrics have changed names:</p>
@@ -6826,6 +6845,19 @@ Quitting...</code></pre>
 </li>
 </ul>
 </div>
+<div id="upgrade2.0.logging" class="paragraph">
+<div class="title">Changed logging</div>
+<p>HBase-2.0.0 now uses <a href="https://www.slf4j.org/";>slf4j</a> as its 
logging frontend.
+Prevously, we used <a href="http://logging.apache.org/log4j/1.2/";>log4j 
(1.2)</a>.
+For most the transition should be seamless; slf4j does a good job interpreting
+<em>log4j.properties</em> logging configuration files such that you should not 
notice
+any difference in your log system emissions.</p>
+</div>
+<div class="paragraph">
+<p>That said, your <em>log4j.properties</em> may need freshening. See <a 
href="https://issues.apache.org/jira/browse/HBASE-20351";>HBASE-20351</a>
+for example, where a stale log configuration file manifest as netty 
configuration
+being dumped at DEBUG level as preamble on every shell command invocation.</p>
+</div>
 <div id="upgrade2.0.zkconfig" class="paragraph">
 <div class="title">ZooKeeper configs no longer read from zoo.cfg</div>
 <p>HBase no longer optionally reads the 'zoo.cfg' file for ZooKeeper related 
configuration settings. If you previously relied on the 
'hbase.config.read.zookeeper.config' config for this functionality, you should 
migrate any needed settings to the hbase-site.xml file while adding the prefix 
'hbase.zookeeper.property.' to each property name.</p>
@@ -24259,10 +24291,19 @@ Analyze.</p>
 </div>
 <div class="sect2">
 <h3 id="trouble.log.gc"><a class="anchor" href="#trouble.log.gc"></a>125.3. 
JVM Garbage Collection Logs</h3>
-<div class="listingblock">
-<div class="content">
-<pre>All example Garbage Collection logs in this section are based on Java 8 
output. The introduction of Unified Logging in Java 9 and newer will result in 
very different looking logs.</pre>
+<div class="admonitionblock note">
+<table>
+<tr>
+<td class="icon">
+<i class="fa icon-note" title="Note"></i>
+</td>
+<td class="content">
+<div class="paragraph">
+<p>All example Garbage Collection logs in this section are based on Java 8 
output. The introduction of Unified Logging in Java 9 and newer will result in 
very different looking logs.</p>
 </div>
+</td>
+</tr>
+</table>
 </div>
 <div class="paragraph">
 <p>HBase is memory intensive, and using the default GC you can see long pauses 
in all threads including the <em>Juliet Pause</em> aka "GC of Death". To help 
debug this or confirm this is happening GC logging can be turned on in the Java 
virtual machine.</p>
@@ -26187,8 +26228,12 @@ Some commands take arguments. Pass no args or -h for 
usage.
   pe              Run PerformanceEvaluation
   ltt             Run LoadTestTool
   canary          Run the Canary tool
-  regionsplitter  Run the RegionSplitter tool
   version         Print the version
+  backup          Backup tables for recovery
+  restore         Restore tables from existing backup image
+  regionsplitter  Run RegionSplitter tool
+  rowcounter      Run RowCounter tool
+  cellcounter     Run CellCounter tool
   CLASSNAME       Run the class named CLASSNAME</pre>
 </div>
 </div>
@@ -27030,7 +27075,7 @@ Assuming you&#8217;re running HDFS with permissions 
enabled, those permissions w
 </div>
 </div>
 <div class="sect2">
-<h3 id="_walplayer"><a class="anchor" href="#_walplayer"></a>146.14. 
WALPlayer</h3>
+<h3 id="walplayer"><a class="anchor" href="#walplayer"></a>146.14. 
WALPlayer</h3>
 <div class="paragraph">
 <p>WALPlayer is a utility to replay WAL files into HBase.</p>
 </div>
@@ -27061,30 +27106,70 @@ The output can optionally be mapped to another set of 
tables.</p>
 <p>WALPlayer, by default, runs as a mapreduce job.
 To NOT run WALPlayer as a mapreduce job on your cluster, force it to run all 
in the local process by adding the flags 
<code>-Dmapreduce.jobtracker.address=local</code> on the command line.</p>
 </div>
+<div class="sect3">
+<h4 id="walplayer.options"><a class="anchor" 
href="#walplayer.options"></a>146.14.1. WALPlayer Options</h4>
+<div class="paragraph">
+<p>Running <code>WALPlayer</code> with no arguments prints brief usage 
information:</p>
+</div>
+<div class="listingblock">
+<div class="content">
+<pre>Usage: WALPlayer [options] &lt;wal inputdir&gt; &lt;tables&gt; 
[&lt;tableMappings&gt;]
+Replay all WAL files into HBase.
+&lt;tables&gt; is a comma separated list of tables.
+If no tables ("") are specified, all tables are imported.
+(Be careful, hbase:meta entries will be imported in this case.)
+
+WAL entries can be mapped to new set of tables via &lt;tableMappings&gt;.
+&lt;tableMappings&gt; is a comma separated list of target tables.
+If specified, each table in &lt;tables&gt; must have a mapping.
+
+By default WALPlayer will load data directly into HBase.
+To generate HFiles for a bulk data load instead, pass the following option:
+  -Dwal.bulk.output=/path/for/output
+  (Only one table can be specified, and no mapping is allowed!)
+Time range options:
+  -Dwal.start.time=[date|ms]
+  -Dwal.end.time=[date|ms]
+  (The start and the end date of timerange. The dates can be expressed
+  in milliseconds since epoch or in yyyy-MM-dd'T'HH:mm:ss.SS format.
+  E.g. 1234567890120 or 2009-02-13T23:32:30.12)
+Other options:
+  -Dmapreduce.job.name=jobName
+  Use the specified mapreduce job name for the wal player
+For performance also consider the following options:
+  -Dmapreduce.map.speculative=false
+  -Dmapreduce.reduce.speculative=false</pre>
+</div>
+</div>
+</div>
 </div>
 <div class="sect2">
-<h3 id="rowcounter"><a class="anchor" href="#rowcounter"></a>146.15. 
RowCounter and CellCounter</h3>
+<h3 id="rowcounter"><a class="anchor" href="#rowcounter"></a>146.15. 
RowCounter</h3>
 <div class="paragraph">
-<p><a 
href="https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/RowCounter.html";>RowCounter</a>
        is a mapreduce job to count all the rows of a table.
+<p><a 
href="https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/RowCounter.html";>RowCounter</a>
 is a mapreduce job to count all the rows of a table.
 This is a good utility to use as a sanity check to ensure that HBase can read 
all the blocks of a table if there are any concerns of metadata inconsistency.
-It will run the mapreduce all in a single process but it will run faster if 
you have a MapReduce cluster in place for it to exploit. It is also possible to 
limit
-the time range of data to be scanned by using the 
<code>--starttime=[starttime]</code> and <code>--endtime=[endtime]</code> 
flags.</p>
+It will run the mapreduce all in a single process but it will run faster if 
you have a MapReduce cluster in place for it to exploit.
+It is possible to limit the time range of data to be scanned by using the 
<code>--starttime=[starttime]</code> and <code>--endtime=[endtime]</code> flags.
+The scanned data can be limited based on keys using the 
<code>--range=[startKey],[endKey][;[startKey],[endKey]&#8230;&#8203;]</code> 
option.</p>
 </div>
 <div class="listingblock">
 <div class="content">
-<pre>$ bin/hbase org.apache.hadoop.hbase.mapreduce.RowCounter 
&lt;tablename&gt; [&lt;column1&gt; &lt;column2&gt;...]</pre>
+<pre>$ bin/hbase rowcounter [options] &lt;tablename&gt; 
[--starttime=&lt;start&gt; --endtime=&lt;end&gt;] 
[--range=[startKey],[endKey][;[startKey],[endKey]...]] [&lt;column1&gt; 
&lt;column2&gt;...]</pre>
 </div>
 </div>
 <div class="paragraph">
 <p>RowCounter only counts one version per cell.</p>
 </div>
 <div class="paragraph">
-<p>Note: caching for the input Scan is configured via 
<code>hbase.client.scanner.caching</code> in the job configuration.</p>
+<p>For performance consider to use 
<code>-Dhbase.client.scanner.caching=100</code> and 
<code>-Dmapreduce.map.speculative=false</code> options.</p>
+</div>
 </div>
+<div class="sect2">
+<h3 id="cellcounter"><a class="anchor" href="#cellcounter"></a>146.16. 
CellCounter</h3>
 <div class="paragraph">
 <p>HBase ships another diagnostic mapreduce job called <a 
href="https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/CellCounter.html";>CellCounter</a>.
 Like RowCounter, it gathers more fine-grained statistics about your table.
-The statistics gathered by RowCounter are more fine-grained and include:</p>
+The statistics gathered by CellCounter are more fine-grained and include:</p>
 </div>
 <div class="ulist">
 <ul>
@@ -27111,14 +27196,14 @@ The statistics gathered by RowCounter are more 
fine-grained and include:</p>
 <div class="paragraph">
 <p>The program allows you to limit the scope of the run.
 Provide a row regex or prefix to limit the rows to analyze.
-Specify a time range to scan the table by using the 
<code>--starttime=[starttime]</code> and <code>--endtime=[endtime]</code> 
flags.</p>
+Specify a time range to scan the table by using the 
<code>--starttime=&lt;starttime&gt;</code> and 
<code>--endtime=&lt;endtime&gt;</code> flags.</p>
 </div>
 <div class="paragraph">
 <p>Use <code>hbase.mapreduce.scan.column.family</code> to specify scanning a 
single column family.</p>
 </div>
 <div class="listingblock">
 <div class="content">
-<pre>$ bin/hbase org.apache.hadoop.hbase.mapreduce.CellCounter 
&lt;tablename&gt; &lt;outputDir&gt; [regex or prefix]</pre>
+<pre>$ bin/hbase cellcounter &lt;tablename&gt; &lt;outputDir&gt; 
[reportSeparator] [regex or prefix] [--starttime=&lt;starttime&gt; 
--endtime=&lt;endtime&gt;]</pre>
 </div>
 </div>
 <div class="paragraph">
@@ -27126,15 +27211,14 @@ Specify a time range to scan the table by using the 
<code>--starttime=[starttime
 </div>
 </div>
 <div class="sect2">
-<h3 id="_mlockall"><a class="anchor" href="#_mlockall"></a>146.16. 
mlockall</h3>
+<h3 id="_mlockall"><a class="anchor" href="#_mlockall"></a>146.17. 
mlockall</h3>
 <div class="paragraph">
 <p>It is possible to optionally pin your servers in physical memory making 
them less likely to be swapped out in oversubscribed environments by having the 
servers call <a href="http://linux.die.net/man/2/mlockall";>mlockall</a> on 
startup.
-See <a href="https://issues.apache.org/jira/browse/HBASE-4391";>HBASE-4391 Add 
ability to
-          start RS as root and call mlockall</a> for how to build the optional 
library and have it run on startup.</p>
+See <a href="https://issues.apache.org/jira/browse/HBASE-4391";>HBASE-4391 Add 
ability to start RS as root and call mlockall</a> for how to build the optional 
library and have it run on startup.</p>
 </div>
 </div>
 <div class="sect2">
-<h3 id="compaction.tool"><a class="anchor" href="#compaction.tool"></a>146.17. 
Offline Compaction Tool</h3>
+<h3 id="compaction.tool"><a class="anchor" href="#compaction.tool"></a>146.18. 
Offline Compaction Tool</h3>
 <div class="paragraph">
 <p>See the usage for the
 <a 
href="https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/regionserver/CompactionTool.html";>CompactionTool</a>.
@@ -27147,7 +27231,7 @@ Run it like:</p>
 </div>
 </div>
 <div class="sect2">
-<h3 id="__code_hbase_clean_code"><a class="anchor" 
href="#__code_hbase_clean_code"></a>146.18. <code>hbase clean</code></h3>
+<h3 id="__code_hbase_clean_code"><a class="anchor" 
href="#__code_hbase_clean_code"></a>146.19. <code>hbase clean</code></h3>
 <div class="paragraph">
 <p>The <code>hbase clean</code> command cleans HBase data from ZooKeeper, 
HDFS, or both.
 It is appropriate to use for testing.
@@ -27166,7 +27250,7 @@ Options:
 </div>
 </div>
 <div class="sect2">
-<h3 id="__code_hbase_pe_code"><a class="anchor" 
href="#__code_hbase_pe_code"></a>146.19. <code>hbase pe</code></h3>
+<h3 id="__code_hbase_pe_code"><a class="anchor" 
href="#__code_hbase_pe_code"></a>146.20. <code>hbase pe</code></h3>
 <div class="paragraph">
 <p>The <code>hbase pe</code> command runs the PerformanceEvaluation tool, 
which is used for testing.</p>
 </div>
@@ -27179,7 +27263,7 @@ For usage instructions, run the command with no 
options.</p>
 </div>
 </div>
 <div class="sect2">
-<h3 id="__code_hbase_ltt_code"><a class="anchor" 
href="#__code_hbase_ltt_code"></a>146.20. <code>hbase ltt</code></h3>
+<h3 id="__code_hbase_ltt_code"><a class="anchor" 
href="#__code_hbase_ltt_code"></a>146.21. <code>hbase ltt</code></h3>
 <div class="paragraph">
 <p>The <code>hbase ltt</code> command runs the LoadTestTool utility, which is 
used for testing.</p>
 </div>
@@ -31958,9 +32042,8 @@ policy, which is configured with all the available 
actions. It chose to run <cod
 <div class="sect3">
 <h4 id="chaos.monkey.properties"><a class="anchor" 
href="#chaos.monkey.properties"></a>168.5.5. Configuring Individual ChaosMonkey 
Actions</h4>
 <div class="paragraph">
-<p>Since HBase version 1.0.0 (<a 
href="https://issues.apache.org/jira/browse/HBASE-11348";>HBASE-11348</a>),
-ChaosMonkey integration tests can be configured per test run.
-Create a Java properties file in the HBase classpath and pass it to 
ChaosMonkey using
+<p>ChaosMonkey integration tests can be configured per test run.
+Create a Java properties file in the HBase CLASSPATH and pass it to 
ChaosMonkey using
 the <code>-monkeyProps</code> configuration flag. Configurable properties, 
along with their default
 values if applicable, are listed in the 
<code>org.apache.hadoop.hbase.chaos.factories.MonkeyConstants</code>
 class. For properties that have defaults, you can override them by including 
them
@@ -31975,8 +32058,11 @@ in your properties file.</p>
 </div>
 </div>
 <div class="paragraph">
-<p>The above command will start the integration tests and chaos monkey passing 
the properties file <em>monkey.properties</em>.
-Here is an example chaos monkey file:</p>
+<p>The above command will start the integration tests and chaos monkey. It 
will look for the
+properties file <em>monkey.properties</em> on the HBase CLASSPATH; e.g. inside 
the HBASE <em>conf</em> dir.</p>
+</div>
+<div class="paragraph">
+<p>Here is an example chaos monkey file:</p>
 </div>
 <div id="monkey.properties" class="listingblock">
 <div class="title">Example ChaosMonkey Properties File</div>
@@ -31990,6 +32076,9 @@ batch.restart.rs.ratio=<span 
class="float">0.4f</span></code></pre>
 </div>
 </div>
 <div class="paragraph">
+<p>Periods/time are expressed in milliseconds.</p>
+</div>
+<div class="paragraph">
 <p>HBase 1.0.2 and newer adds the ability to restart HBase&#8217;s underlying 
ZooKeeper quorum or
 HDFS nodes. To use these actions, you need to configure some new properties, 
which
 have no reasonable defaults because they are deployment-specific, in your 
ChaosMonkey
@@ -37679,7 +37768,7 @@ The server will return cellblocks compressed using this 
same compressor as long
 <div id="footer">
 <div id="footer-text">
 Version 3.0.0-SNAPSHOT<br>
-Last updated 2018-04-11 14:29:48 UTC
+Last updated 2018-04-12 14:30:08 UTC
 </div>
 </div>
 </body>

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/bulk-loads.html
----------------------------------------------------------------------
diff --git a/bulk-loads.html b/bulk-loads.html
index 192ae3b..8aa75d4 100644
--- a/bulk-loads.html
+++ b/bulk-loads.html
@@ -7,7 +7,7 @@
   <head>
     <meta charset="UTF-8" />
     <meta name="viewport" content="width=device-width, initial-scale=1.0" />
-    <meta name="Date-Revision-yyyymmdd" content="20180411" />
+    <meta name="Date-Revision-yyyymmdd" content="20180412" />
     <meta http-equiv="Content-Language" content="en" />
     <title>Apache HBase &#x2013;  
       Bulk Loads in Apache HBase (TM)
@@ -306,7 +306,7 @@ under the License. -->
                         <a href="https://www.apache.org/";>The Apache Software 
Foundation</a>.
             All rights reserved.      
                     
-                  <li id="publishDate" class="pull-right">Last Published: 
2018-04-11</li>
+                  <li id="publishDate" class="pull-right">Last Published: 
2018-04-12</li>
             </p>
                 </div>
 

Reply via email to