hbase git commit: HBASE-21088 HStoreFile should be closed in HStore#hasReferences

2018-08-27 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2.1 e826e3f2b -> c1cd6d5a8


HBASE-21088 HStoreFile should be closed in HStore#hasReferences


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c1cd6d5a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c1cd6d5a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c1cd6d5a

Branch: refs/heads/branch-2.1
Commit: c1cd6d5a89966f3cfec378146f8088f80d6d36ac
Parents: e826e3f
Author: Ted Yu 
Authored: Mon Aug 27 20:31:08 2018 -0700
Committer: Michael Stack 
Committed: Mon Aug 27 20:31:50 2018 -0700

--
 .../org/apache/hadoop/hbase/regionserver/HStore.java | 11 +++
 1 file changed, 11 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c1cd6d5a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index 7566a70..80f1f21 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -1670,6 +1670,17 @@ public class HStore implements Store, HeapSize, 
StoreConfigInformation, Propagat
   LOG.error("Error trying to determine if store has references, assuming 
references exists",
 ioe);
   return true;
+} finally {
+  if (reloadedStoreFiles != null) {
+for (HStoreFile storeFile : reloadedStoreFiles) {
+  try {
+storeFile.closeStoreFile(false);
+  } catch (IOException ioe) {
+LOG.warn("Encountered exception closing " + storeFile + ": " + 
ioe.getMessage());
+// continue with closing the remaining store files
+  }
+}
+  }
 }
   }
 



hbase git commit: HBASE-21088 HStoreFile should be closed in HStore#hasReferences

2018-08-27 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 dc53d3087 -> f89b4ed9d


HBASE-21088 HStoreFile should be closed in HStore#hasReferences


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f89b4ed9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f89b4ed9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f89b4ed9

Branch: refs/heads/branch-2.0
Commit: f89b4ed9d6ff3b8a1cf83196d061d99f044f8f23
Parents: dc53d30
Author: Ted Yu 
Authored: Mon Aug 27 20:31:08 2018 -0700
Committer: Michael Stack 
Committed: Mon Aug 27 20:31:08 2018 -0700

--
 .../org/apache/hadoop/hbase/regionserver/HStore.java | 11 +++
 1 file changed, 11 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f89b4ed9/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index dad9346..2227e58 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -1643,6 +1643,17 @@ public class HStore implements Store, HeapSize, 
StoreConfigInformation, Propagat
   LOG.error("Error trying to determine if store has references, assuming 
references exists",
 ioe);
   return true;
+} finally {
+  if (reloadedStoreFiles != null) {
+for (HStoreFile storeFile : reloadedStoreFiles) {
+  try {
+storeFile.closeStoreFile(false);
+  } catch (IOException ioe) {
+LOG.warn("Encountered exception closing " + storeFile + ": " + 
ioe.getMessage());
+// continue with closing the remaining store files
+  }
+}
+  }
 }
   }
 



[hbase] Git Push Summary

2018-08-27 Thread stack
Repository: hbase
Updated Tags:  refs/tags/2.0.2RC0 [created] 525d53c05


svn commit: r28986 [4/4] - /dev/hbase/hbase-2.0.2RC0/

2018-08-27 Thread stack
Propchange: dev/hbase/hbase-2.0.2RC0/hbase-2.0.2-bin.tar.gz
--
svn:mime-type = application/octet-stream

Added: dev/hbase/hbase-2.0.2RC0/hbase-2.0.2-bin.tar.gz.asc
==
--- dev/hbase/hbase-2.0.2RC0/hbase-2.0.2-bin.tar.gz.asc (added)
+++ dev/hbase/hbase-2.0.2RC0/hbase-2.0.2-bin.tar.gz.asc Tue Aug 28 03:12:19 2018
@@ -0,0 +1,10 @@
+-BEGIN PGP SIGNATURE-
+
+iQEcBAABAgAGBQJbhJJXAAoJEN8PW7wwzQmWAAUH/0fzzv6CErzGxMX4DUikAqVo
+waOYLGboqpEioySVNdnnkrPGFFVbqKxWFkF1KgDTjhOslu4A14fXN7JOonh2fTnM
+CLE00qoOoV0Uaiz8RxBMyMgwGAconL1MPDPOGtOM9XYNwJNVMJAKV8RVTxZ/HCke
+PdXQ6jQ1V+6ZMRbMVftfGpcO3mDJYfMf7D1xPDbgOVNF/Pj/zA7BYgA6Sph2E4kZ
+M/dilie013Tc+MvYmpf6m8k/ss4nh72VEcpKeCvB1nuxAU+oj+kse/Vt8tH4eWUS
+j+oefjVNcMLB1zIM4oLIEbsrrnX/xd0WBo8sMK+EHpz9BzJCmg6xn9n/BBrxZpQ=
+=7kEy
+-END PGP SIGNATURE-

Added: dev/hbase/hbase-2.0.2RC0/hbase-2.0.2-bin.tar.gz.sha512
==
--- dev/hbase/hbase-2.0.2RC0/hbase-2.0.2-bin.tar.gz.sha512 (added)
+++ dev/hbase/hbase-2.0.2RC0/hbase-2.0.2-bin.tar.gz.sha512 Tue Aug 28 03:12:19 
2018
@@ -0,0 +1,3 @@
+hbase-2.0.2-bin.tar.gz: E8485A0B 8812250A 865A5C1F AB902BDB 9EF0B538 001DB6AD
+AC528791 474760E6 9ABD04B3 5B64F106 A497B54D C2E4C535
+96575DAF 589DE0C4 852C261C F024DDFC

Added: dev/hbase/hbase-2.0.2RC0/hbase-2.0.2-src.tar.gz
==
Binary file - no diff available.

Propchange: dev/hbase/hbase-2.0.2RC0/hbase-2.0.2-src.tar.gz
--
svn:mime-type = application/octet-stream

Added: dev/hbase/hbase-2.0.2RC0/hbase-2.0.2-src.tar.gz.asc
==
--- dev/hbase/hbase-2.0.2RC0/hbase-2.0.2-src.tar.gz.asc (added)
+++ dev/hbase/hbase-2.0.2RC0/hbase-2.0.2-src.tar.gz.asc Tue Aug 28 03:12:19 2018
@@ -0,0 +1,10 @@
+-BEGIN PGP SIGNATURE-
+
+iQEcBAABAgAGBQJbhJJaAAoJEN8PW7wwzQmWQ/sH/jL6YWUOJirfmKZmSSHBHCrt
+BUHL/XeCWmrnZ1lE8vn3fOyIwYT7Yxexr6C4+fuaDcHXhxa86pBVJ22P/W+fX37z
+uJgJgJuW69SfGBEgYnOE5/bkYF+3/8XDnKTSXVyMy1M/I1oUKEOg+SI0R5DOmYYz
+mqvrUQnvBJAucLhz0VytuBGMJfpSw09ZITnE6YrMLkof6YMaCkOR3tL6G8rX2WRQ
+c8sblyFExO+uS/FV4JAoStltqfTo8oLz0yUiY5k8BbXAdJQyT6Qtf8Cievxb0LgC
+ddABRA0z/PyYT9zoxOJd69vJDp5x/iyXcyee6Sa6we392drdSsavFnK84qCph3U=
+=vrqd
+-END PGP SIGNATURE-

Added: dev/hbase/hbase-2.0.2RC0/hbase-2.0.2-src.tar.gz.sha512
==
--- dev/hbase/hbase-2.0.2RC0/hbase-2.0.2-src.tar.gz.sha512 (added)
+++ dev/hbase/hbase-2.0.2RC0/hbase-2.0.2-src.tar.gz.sha512 Tue Aug 28 03:12:19 
2018
@@ -0,0 +1,3 @@
+hbase-2.0.2-src.tar.gz: E8A2A082 02A7995A 3496C8BA DD4E2A5B 963552D6 9BC6E48F
+2DB17FC0 0673973A FADDC707 F7C2936A E31AACE0 6F98F1DC
+2C1D4FFB 08CCD05D 2F825EEA 4524FA34

Added: 
dev/hbase/hbase-2.0.2RC0/hbase_rel_2.0.1_to_2.0.2RC0_compatibility_report.html
==
--- 
dev/hbase/hbase-2.0.2RC0/hbase_rel_2.0.1_to_2.0.2RC0_compatibility_report.html 
(added)
+++ 
dev/hbase/hbase-2.0.2RC0/hbase_rel_2.0.1_to_2.0.2RC0_compatibility_report.html 
Tue Aug 28 03:12:19 2018
@@ -0,0 +1,474 @@
+http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
+
+http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
+
+
+
+
+
+hbase: rel/2.0.1 to 2.0.2RC0 compatibility report
+
+body {
+font-family:Arial, sans-serif;
+background-color:White;
+color:Black;
+}
+hr {
+color:Black;
+background-color:Black;
+height:1px;
+border:0;
+}
+h1 {
+margin-bottom:0px;
+padding-bottom:0px;
+font-size:1.625em;
+}
+h2 {
+margin-bottom:0px;
+padding-bottom:0px;
+font-size:1.25em;
+white-space:nowrap;
+}
+div.symbols {
+color:#003E69;
+}
+div.symbols i {
+color:Brown;
+}
+span.section {
+font-weight:bold;
+cursor:pointer;
+color:#003E69;
+white-space:nowrap;
+margin-left:0.3125em;
+}
+span:hover.section {
+color:#336699;
+}
+span.sect_aff {
+cursor:pointer;
+padding-left:1.55em;
+font-size:0.875em;
+color:#cc3300;
+}
+span.ext {
+font-weight:normal;
+}
+span.jar {
+color:#cc3300;
+font-size:0.875em;
+font-weight:bold;
+}
+div.jar_list {
+padding-left:0.4em;
+font-size:0.94em;
+}
+span.pkg_t {
+color:#408080;
+font-size:0.875em;
+}
+span.pkg {
+color:#408080;
+font-size:0.875em;
+font-weight:bold;
+}
+span.cname {
+color:Green;
+font-size:0.875em;
+font-weight:bold;
+}
+span.iname_b {
+font-weight:bold;
+}
+span.iname_a {
+color:#33;
+font-weight:bold;
+font-size:0.94em;
+}
+span.sym_p {
+

svn commit: r28986 [2/4] - /dev/hbase/hbase-2.0.2RC0/

2018-08-27 Thread stack


Added: dev/hbase/hbase-2.0.2RC0/CHANGES.md
==
--- dev/hbase/hbase-2.0.2RC0/CHANGES.md (added)
+++ dev/hbase/hbase-2.0.2RC0/CHANGES.md Tue Aug 28 03:12:19 2018
@@ -0,0 +1,6365 @@
+# HBASE Changelog
+
+
+## Release 2.0.2 - Unreleased (as of 2018-08-27)
+
+### INCOMPATIBLE CHANGES:
+
+| JIRA | Summary | Priority | Component |
+|: |: | :--- |: |
+| [HBASE-20884](https://issues.apache.org/jira/browse/HBASE-20884) | Replace 
usage of our Base64 implementation with java.util.Base64 |  Major | . |
+| [HBASE-20691](https://issues.apache.org/jira/browse/HBASE-20691) | Storage 
policy should allow deferring to HDFS |  Blocker | Filesystem Integration, wal |
+
+
+### NEW FEATURES:
+
+| JIRA | Summary | Priority | Component |
+|: |: | :--- |: |
+| [HBASE-20833](https://issues.apache.org/jira/browse/HBASE-20833) | Modify 
pre-upgrade coprocessor validator to support table level coprocessors |  Major 
| Coprocessors |
+
+
+### IMPROVEMENTS:
+
+| JIRA | Summary | Priority | Component |
+|: |: | :--- |: |
+| [HBASE-20387](https://issues.apache.org/jira/browse/HBASE-20387) | flaky 
infrastructure should work for all branches |  Critical | test |
+| [HBASE-20979](https://issues.apache.org/jira/browse/HBASE-20979) | Flaky 
test reporting should specify what JSON it needs and handle HTTP errors |  
Minor | test |
+| [HBASE-20856](https://issues.apache.org/jira/browse/HBASE-20856) | PITA 
having to set WAL provider in two places |  Minor | Operability, wal |
+| [HBASE-20935](https://issues.apache.org/jira/browse/HBASE-20935) | 
HStore.removeCompactedFiles should log in case it is unable to delete a file |  
Minor | . |
+| [HBASE-20873](https://issues.apache.org/jira/browse/HBASE-20873) | Update 
doc for Endpoint-based Export |  Minor | documentation |
+| [HBASE-20806](https://issues.apache.org/jira/browse/HBASE-20806) | Split 
style journal for flushes and compactions |  Minor | . |
+| [HBASE-20474](https://issues.apache.org/jira/browse/HBASE-20474) | Show 
non-RPC tasks on master/regionserver Web UI  by default |  Major | UI |
+| [HBASE-20826](https://issues.apache.org/jira/browse/HBASE-20826) | Truncate 
responseInfo attributes on RpcServer WARN messages |  Major | rpc |
+| [HBASE-20810](https://issues.apache.org/jira/browse/HBASE-20810) | Include 
the procedure id in the exception message in HBaseAdmin for better debugging |  
Major | Admin, proc-v2 |
+| [HBASE-20733](https://issues.apache.org/jira/browse/HBASE-20733) | QABot 
should run checkstyle tests if the checkstyle configs change |  Minor | build, 
community |
+
+
+### BUG FIXES:
+
+| JIRA | Summary | Priority | Component |
+|: |: | :--- |: |
+| [HBASE-21120](https://issues.apache.org/jira/browse/HBASE-21120) | 
MoveRegionProcedure makes no progress; goes to STUCK |  Major | amv2 |
+| [HBASE-20772](https://issues.apache.org/jira/browse/HBASE-20772) | 
Controlled shutdown fills Master log with the disturbing message "No matching 
procedure found for rit=OPEN, location=, table=Y, region= 
transition to CLOSED |  Major | logging |
+| [HBASE-20978](https://issues.apache.org/jira/browse/HBASE-20978) | [amv2] 
Worker terminating UNNATURALLY during MoveRegionProcedure |  Critical | amv2 |
+| [HBASE-21078](https://issues.apache.org/jira/browse/HBASE-21078) | [amv2] 
CODE-BUG NPE in RTP doing Unassign |  Major | amv2 |
+| [HBASE-21113](https://issues.apache.org/jira/browse/HBASE-21113) | Apply the 
branch-2 version of HBASE-21095, The timeout retry logic for several procedures 
are broken after master restarts |  Major | amv2 |
+| [HBASE-21101](https://issues.apache.org/jira/browse/HBASE-21101) | Remove 
the waitUntilAllRegionsAssigned call after split in TestTruncateTableProcedure 
|  Major | test |
+| [HBASE-20614](https://issues.apache.org/jira/browse/HBASE-20614) | REST scan 
API with incorrect filter text file throws HTTP 503 Service Unavailable error | 
 Minor | REST |
+| [HBASE-20648](https://issues.apache.org/jira/browse/HBASE-20648) | 
HBASE-19364 "Truncate\_preserve fails with table when replica region \> 1" for 
master branch |  Major | . |
+| [HBASE-21041](https://issues.apache.org/jira/browse/HBASE-21041) | 
Memstore's heap size will be decreased to minus zero after flush |  Major | . |
+| [HBASE-21031](https://issues.apache.org/jira/browse/HBASE-21031) | Memory 
leak if replay edits failed during region opening |  Major | . |
+| [HBASE-20705](https://issues.apache.org/jira/browse/HBASE-20705) | Having 
RPC Quota on a table prevents Space quota to be recreated/removed |  Major | . |
+| [HBASE-21058](https://issues.apache.org/jira/browse/HBASE-21058) | Nightly 
tests for branches 1 fail to build ref guide |  Major | documentation |
+| [HBASE-21074](https://issues.apache.org/jira/browse/HBASE-21074) | JDK7 
branches need to pass "-Dhttps.protocols=TLSv1.2" to maven when building |  
Major | build, community, test |
+| 

svn commit: r28986 [3/4] - /dev/hbase/hbase-2.0.2RC0/

2018-08-27 Thread stack
Added: dev/hbase/hbase-2.0.2RC0/RELEASENOTES.md
==
--- dev/hbase/hbase-2.0.2RC0/RELEASENOTES.md (added)
+++ dev/hbase/hbase-2.0.2RC0/RELEASENOTES.md Tue Aug 28 03:12:19 2018
@@ -0,0 +1,8553 @@
+# HBASE  2.0.2 Release Notes
+
+
+
+
+These release notes cover new developer and user-facing incompatibilities, 
important issues, features, and major improvements.
+
+
+---
+
+* [HBASE-20941](https://issues.apache.org/jira/browse/HBASE-20941) | *Major* | 
**Create and implement HbckService in master**
+
+Adds an HBCK Service and a first method to force-change-in-table-state for use 
by an HBCK client effecting 'repair' to a malfunctioning HBase.
+
+
+---
+
+* [HBASE-21072](https://issues.apache.org/jira/browse/HBASE-21072) | *Major* | 
**Block out HBCK1 in hbase2**
+
+Fence out hbase-1.x hbck1 instances. Stop them making state changes on an 
hbase-2.x cluster; they could do damage. We do this by writing the hbck1 lock 
file into place on hbase-2.x Master start-up.
+
+To disable this new behavior, set hbase.write.hbck1.lock.file to false
+
+
+---
+
+* [HBASE-21012](https://issues.apache.org/jira/browse/HBASE-21012) | 
*Critical* | **Revert the change of serializing TimeRangeTracker**
+
+HFiles generated by 2.0.0, 2.0.1, 2.1.0 are not forward compatible to 1.4.6-, 
1.3.2.1-, 1.2.6.1-, and other inactive releases. Why HFile lose compatability 
is hbase in new versions (2.0.0, 2.0.1, 2.1.0) use protobuf to 
serialize/deserialize TimeRangeTracker (TRT) while old versions use 
DataInput/DataOutput. To solve this, We have to put HBASE-21012 to 2.x and put 
HBASE-21013 in 1.x. For more information, please check HBASE-21008.
+
+
+---
+
+* [HBASE-20813](https://issues.apache.org/jira/browse/HBASE-20813) | *Minor* | 
**Remove RPC quotas when the associated table/Namespace is dropped off**
+
+In previous releases, when a Space Quota was configured on a table or 
namespace and that table or namespace was deleted, the Space Quota was also 
deleted. This change improves the implementation so that the same is also done 
for RPC Quotas.
+
+
+---
+
+* [HBASE-20856](https://issues.apache.org/jira/browse/HBASE-20856) | *Minor* | 
**PITA having to set WAL provider in two places**
+
+With this change if a WAL's meta provider (hbase.wal.meta\_provider) is not 
explicitly set, it now defaults to whatever hbase.wal.provider is set to. 
Previous, the two settings operated independently, each with its own default.
+
+This change is operationally incompatible with previous HBase versions because 
the default WAL meta provider no longer defaults to AsyncFSWALProvider but to 
hbase.wal.provider.
+
+The thought is that this is more in line with an operator's expectation, that 
a change in hbase.wal.provider is sufficient to change how WALs are written, 
especially given hbase.wal.meta\_provider is an obscure configuration and that 
the very idea that meta regions would have their own wal provider would likely 
come as a surprise.
+
+
+---
+
+* [HBASE-20538](https://issues.apache.org/jira/browse/HBASE-20538) | 
*Critical* | **Upgrade our hadoop versions to 2.7.7 and 3.0.3**
+
+Update hadoop-two.version to 2.7.7 and hadoop-three.version to 3.0.3 due to a 
JDK issue which is solved by HADOOP-15473.
+
+
+---
+
+* [HBASE-20884](https://issues.apache.org/jira/browse/HBASE-20884) | *Major* | 
**Replace usage of our Base64 implementation with java.util.Base64**
+
+Class org.apache.hadoop.hbase.util.Base64 has been removed in it's entirety 
from HBase 2+. In HBase 1, unused methods have been removed from the class and 
the audience was changed from  Public to Private. This class was originally 
intended as an internal utility class that could be used externally but 
thinking since changed; these classes should not have been advertised as public 
to end-users.
+
+This represents an incompatible change for users who relied on this 
implementation. An alternative implementation for affected clients is available 
at java.util.Base64 when using Java 8 or newer; be aware, it may encode/decode 
differently. For clients seeking to restore this specific implementation, it is 
available in the public domain for download at 
http://iharder.sourceforge.net/current/java/base64/
+
+
+---
+
+* [HBASE-20691](https://issues.apache.org/jira/browse/HBASE-20691) | *Blocker* 
| **Storage policy should allow deferring to HDFS**
+
+After HBASE-20691 we have changed the default setting of 
hbase.wal.storage.policy from "HOT" back to "NONE" which means we defer the 
policy to HDFS. This fixes the problem of release 2.0.0 that the storage policy 
of WAL directory will defer to HDFS and may not be "HOT" even if you explicitly 
set hbase.wal.storage.policy to "HOT"
+
+
+---
+
+* [HBASE-20839](https://issues.apache.org/jira/browse/HBASE-20839) | *Blocker* 
| **Fallback to FSHLog if we can not instantiated AsyncFSWAL when user does not 
specify AsyncFSWAL explicitly**
+
+As we hack into the internal of 

svn commit: r28986 [1/4] - /dev/hbase/hbase-2.0.2RC0/

2018-08-27 Thread stack
Author: stack
Date: Tue Aug 28 03:12:19 2018
New Revision: 28986

Log:
Add 2.0.2RC0

Added:
dev/hbase/hbase-2.0.2RC0/
dev/hbase/hbase-2.0.2RC0/CHANGES.md
dev/hbase/hbase-2.0.2RC0/RELEASENOTES.md
dev/hbase/hbase-2.0.2RC0/hbase-2.0.2-bin.tar.gz   (with props)
dev/hbase/hbase-2.0.2RC0/hbase-2.0.2-bin.tar.gz.asc
dev/hbase/hbase-2.0.2RC0/hbase-2.0.2-bin.tar.gz.sha512
dev/hbase/hbase-2.0.2RC0/hbase-2.0.2-src.tar.gz   (with props)
dev/hbase/hbase-2.0.2RC0/hbase-2.0.2-src.tar.gz.asc
dev/hbase/hbase-2.0.2RC0/hbase-2.0.2-src.tar.gz.sha512

dev/hbase/hbase-2.0.2RC0/hbase_rel_2.0.1_to_2.0.2RC0_compatibility_report.html



hbase git commit: HBASE-21123 Commit 2.0.2 RELEASENOTES and CHANGES

2018-08-27 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 07bdeb954 -> dc53d3087


HBASE-21123 Commit 2.0.2 RELEASENOTES and CHANGES


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/dc53d308
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/dc53d308
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/dc53d308

Branch: refs/heads/branch-2.0
Commit: dc53d30876fefc9a88367ef30ea784add0a14efa
Parents: 07bdeb9
Author: Michael Stack 
Authored: Mon Aug 27 15:23:43 2018 -0700
Committer: Michael Stack 
Committed: Mon Aug 27 15:23:43 2018 -0700

--
 CHANGES.md  | 151 ++-
 RELEASENOTES.md | 114 +-
 2 files changed, 263 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/dc53d308/CHANGES.md
--
diff --git a/CHANGES.md b/CHANGES.md
index 03f3599..99069b3 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -40,7 +40,156 @@ document title above the apache license so markdown readers 
work. You
 may have to bulk import old-style CHANGES.txt on to the end in a code
 comment to preserve continuity of the CHANGELOG.
 -->
-## Release 2.0.1 - Unreleased (as of 2018-06-13)
+
+## Release 2.0.2 - Unreleased (as of 2018-08-27)
+
+### INCOMPATIBLE CHANGES:
+
+| JIRA | Summary | Priority | Component |
+|: |: | :--- |: |
+| [HBASE-20884](https://issues.apache.org/jira/browse/HBASE-20884) | Replace 
usage of our Base64 implementation with java.util.Base64 |  Major | . |
+| [HBASE-20691](https://issues.apache.org/jira/browse/HBASE-20691) | Storage 
policy should allow deferring to HDFS |  Blocker | Filesystem Integration, wal |
+
+
+### NEW FEATURES:
+
+| JIRA | Summary | Priority | Component |
+|: |: | :--- |: |
+| [HBASE-20833](https://issues.apache.org/jira/browse/HBASE-20833) | Modify 
pre-upgrade coprocessor validator to support table level coprocessors |  Major 
| Coprocessors |
+
+
+### IMPROVEMENTS:
+
+| JIRA | Summary | Priority | Component |
+|: |: | :--- |: |
+| [HBASE-20387](https://issues.apache.org/jira/browse/HBASE-20387) | flaky 
infrastructure should work for all branches |  Critical | test |
+| [HBASE-20979](https://issues.apache.org/jira/browse/HBASE-20979) | Flaky 
test reporting should specify what JSON it needs and handle HTTP errors |  
Minor | test |
+| [HBASE-20856](https://issues.apache.org/jira/browse/HBASE-20856) | PITA 
having to set WAL provider in two places |  Minor | Operability, wal |
+| [HBASE-20935](https://issues.apache.org/jira/browse/HBASE-20935) | 
HStore.removeCompactedFiles should log in case it is unable to delete a file |  
Minor | . |
+| [HBASE-20873](https://issues.apache.org/jira/browse/HBASE-20873) | Update 
doc for Endpoint-based Export |  Minor | documentation |
+| [HBASE-20806](https://issues.apache.org/jira/browse/HBASE-20806) | Split 
style journal for flushes and compactions |  Minor | . |
+| [HBASE-20474](https://issues.apache.org/jira/browse/HBASE-20474) | Show 
non-RPC tasks on master/regionserver Web UI  by default |  Major | UI |
+| [HBASE-20826](https://issues.apache.org/jira/browse/HBASE-20826) | Truncate 
responseInfo attributes on RpcServer WARN messages |  Major | rpc |
+| [HBASE-20810](https://issues.apache.org/jira/browse/HBASE-20810) | Include 
the procedure id in the exception message in HBaseAdmin for better debugging |  
Major | Admin, proc-v2 |
+| [HBASE-20733](https://issues.apache.org/jira/browse/HBASE-20733) | QABot 
should run checkstyle tests if the checkstyle configs change |  Minor | build, 
community |
+
+
+### BUG FIXES:
+
+| JIRA | Summary | Priority | Component |
+|: |: | :--- |: |
+| [HBASE-21120](https://issues.apache.org/jira/browse/HBASE-21120) | 
MoveRegionProcedure makes no progress; goes to STUCK |  Major | amv2 |
+| [HBASE-20772](https://issues.apache.org/jira/browse/HBASE-20772) | 
Controlled shutdown fills Master log with the disturbing message "No matching 
procedure found for rit=OPEN, location=, table=Y, region= 
transition to CLOSED |  Major | logging |
+| [HBASE-20978](https://issues.apache.org/jira/browse/HBASE-20978) | [amv2] 
Worker terminating UNNATURALLY during MoveRegionProcedure |  Critical | amv2 |
+| [HBASE-21078](https://issues.apache.org/jira/browse/HBASE-21078) | [amv2] 
CODE-BUG NPE in RTP doing Unassign |  Major | amv2 |
+| [HBASE-21113](https://issues.apache.org/jira/browse/HBASE-21113) | Apply the 
branch-2 version of HBASE-21095, The timeout retry logic for several procedures 
are broken after master restarts |  Major | amv2 |
+| [HBASE-21101](https://issues.apache.org/jira/browse/HBASE-21101) | Remove 
the waitUntilAllRegionsAssigned call after split in 

hbase git commit: HBASE-21120 MoveRegionProcedure makes no progress; goes to STUCK

2018-08-27 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2.1 625be5137 -> e826e3f2b


HBASE-21120 MoveRegionProcedure makes no progress; goes to STUCK


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e826e3f2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e826e3f2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e826e3f2

Branch: refs/heads/branch-2.1
Commit: e826e3f2b85c491c5b57453621cdbaa66c387c41
Parents: 625be51
Author: Michael Stack 
Authored: Mon Aug 27 14:54:55 2018 -0700
Committer: Michael Stack 
Committed: Mon Aug 27 14:55:52 2018 -0700

--
 .../master/assignment/UnassignProcedure.java| 23 +---
 1 file changed, 6 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e826e3f2/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java
index 46ff48c..4f58a0f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java
@@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
 import org.apache.hadoop.hbase.favored.FavoredNodesManager;
 import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
-import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.master.RegionState.State;
 import org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
@@ -117,7 +116,9 @@ public class UnassignProcedure extends 
RegionTransitionProcedure {
 this.destinationServer = destinationServer;
 this.force = force;
 this.removeAfterUnassigning = removeAfterUnassigning;
-setTransitionState(RegionTransitionState.REGION_TRANSITION_QUEUE);
+
+// we don't need REGION_TRANSITION_QUEUE, we jump directly to sending the 
request
+setTransitionState(RegionTransitionState.REGION_TRANSITION_DISPATCH);
   }
 
   @Override
@@ -178,21 +179,9 @@ public class UnassignProcedure extends 
RegionTransitionProcedure {
 
   @Override
   protected boolean startTransition(final MasterProcedureEnv env, final 
RegionStateNode regionNode) {
-// Check region is actually unassignable now we have lock on it. If not 
skirt to end.
-// It could have had its status changed on us post construction... perhaps 
a split removed
-// the region we are to unassign (a split and a move happening 
near-concurrently).
-// Don't overcheck. A region is set to have a SPLITTING state if it is the 
parent and it is
-// being split. Regions that are in this RSN state are unassignable. 
Regions that are SPLIT
-// are not.
-RegionStates regionStates = env.getAssignmentManager().getRegionStates();
-RegionState rs = regionStates.getRegionState(regionNode.getRegionInfo());
-// Don't try unassigning regions that are closed or split. RSN state could 
have been set
-// after our creation but before we got the region lock.
-if (rs.isClosing() || rs.isClosed() || rs.isSplit() || rs.isMerged()) {
-  LOG.info("NOT unassignable {}, skipping {}", rs, this);
-  return false;
-}
-return true;
+// nothing to do here. we skip the step in the constructor
+// by jumping to REGION_TRANSITION_DISPATCH
+throw new UnsupportedOperationException();
   }
 
   @Override



hbase git commit: HBASE-21120 MoveRegionProcedure makes no progress; goes to STUCK

2018-08-27 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 8fd5e039e -> 07bdeb954


HBASE-21120 MoveRegionProcedure makes no progress; goes to STUCK


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/07bdeb95
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/07bdeb95
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/07bdeb95

Branch: refs/heads/branch-2.0
Commit: 07bdeb95427541c3b0e1f0684d4a4b3811f162a8
Parents: 8fd5e03
Author: Michael Stack 
Authored: Mon Aug 27 14:54:55 2018 -0700
Committer: Michael Stack 
Committed: Mon Aug 27 14:54:55 2018 -0700

--
 .../master/assignment/UnassignProcedure.java| 23 +---
 1 file changed, 6 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/07bdeb95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java
index 46ff48c..4f58a0f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java
@@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
 import org.apache.hadoop.hbase.favored.FavoredNodesManager;
 import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
-import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.master.RegionState.State;
 import org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
@@ -117,7 +116,9 @@ public class UnassignProcedure extends 
RegionTransitionProcedure {
 this.destinationServer = destinationServer;
 this.force = force;
 this.removeAfterUnassigning = removeAfterUnassigning;
-setTransitionState(RegionTransitionState.REGION_TRANSITION_QUEUE);
+
+// we don't need REGION_TRANSITION_QUEUE, we jump directly to sending the 
request
+setTransitionState(RegionTransitionState.REGION_TRANSITION_DISPATCH);
   }
 
   @Override
@@ -178,21 +179,9 @@ public class UnassignProcedure extends 
RegionTransitionProcedure {
 
   @Override
   protected boolean startTransition(final MasterProcedureEnv env, final 
RegionStateNode regionNode) {
-// Check region is actually unassignable now we have lock on it. If not 
skirt to end.
-// It could have had its status changed on us post construction... perhaps 
a split removed
-// the region we are to unassign (a split and a move happening 
near-concurrently).
-// Don't overcheck. A region is set to have a SPLITTING state if it is the 
parent and it is
-// being split. Regions that are in this RSN state are unassignable. 
Regions that are SPLIT
-// are not.
-RegionStates regionStates = env.getAssignmentManager().getRegionStates();
-RegionState rs = regionStates.getRegionState(regionNode.getRegionInfo());
-// Don't try unassigning regions that are closed or split. RSN state could 
have been set
-// after our creation but before we got the region lock.
-if (rs.isClosing() || rs.isClosed() || rs.isSplit() || rs.isMerged()) {
-  LOG.info("NOT unassignable {}, skipping {}", rs, this);
-  return false;
-}
-return true;
+// nothing to do here. we skip the step in the constructor
+// by jumping to REGION_TRANSITION_DISPATCH
+throw new UnsupportedOperationException();
   }
 
   @Override



hbase git commit: Adding larsfrancke as a committer to the POM file

2018-08-27 Thread larsfrancke
Repository: hbase
Updated Branches:
  refs/heads/master 3813f0ac3 -> 508925652


Adding larsfrancke as a committer to the POM file


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/50892565
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/50892565
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/50892565

Branch: refs/heads/master
Commit: 5089256529c7c04bb1a8eb67efc204c36b15a110
Parents: 3813f0a
Author: Lars Francke 
Authored: Mon Aug 27 23:05:49 2018 +0200
Committer: Lars Francke 
Committed: Mon Aug 27 23:05:49 2018 +0200

--
 pom.xml | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/50892565/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 540c1f9..3583da3 100755
--- a/pom.xml
+++ b/pom.xml
@@ -393,6 +393,12 @@
   -8
 
 
+  larsfrancke
+  Lars Francke
+  larsfran...@apache.org
+  Europe/Berlin
+
+
   larsgeorge
   Lars George
   larsgeo...@apache.org



hbase git commit: HBASE-20941 Created and implemented HbckService in master

2018-08-27 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master 6dd538303 -> 3813f0ac3


HBASE-20941 Created and implemented HbckService in master

Added API setTableStateInMeta() to update table state only in Meta. This will 
be used by hbck2 tool.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3813f0ac
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3813f0ac
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3813f0ac

Branch: refs/heads/master
Commit: 3813f0ac3d661f8a9985e6e67283d8556a5d08c1
Parents: 6dd5383
Author: Umesh Agashe 
Authored: Tue Aug 7 11:46:42 2018 -0700
Committer: Michael Stack 
Committed: Mon Aug 27 12:11:52 2018 -0700

--
 .../hadoop/hbase/client/ClusterConnection.java  |  32 ++
 .../hbase/client/ConnectionImplementation.java  |  22 
 .../apache/hadoop/hbase/client/HBaseHbck.java   |  95 +
 .../org/apache/hadoop/hbase/client/Hbck.java|  50 +
 .../hbase/shaded/protobuf/RequestConverter.java |  12 +++
 .../hadoop/hbase/HBaseInterfaceAudience.java|   5 +
 .../src/main/protobuf/Master.proto  |  11 ++
 .../hadoop/hbase/master/MasterRpcServices.java  |  31 +-
 .../hadoop/hbase/HBaseTestingUtility.java   |   9 +-
 .../apache/hadoop/hbase/client/TestHbck.java| 104 +++
 .../hadoop/hbase/master/TestMasterMetrics.java  |   1 +
 11 files changed, 370 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3813f0ac/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
index adf47ca..d3e675c 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
@@ -326,4 +326,36 @@ public interface ClusterConnection extends Connection {
* @throws IOException if a remote or network exception occurs
*/
   int getCurrentNrHRS() throws IOException;
+
+  /**
+   * Retrieve an Hbck implementation to fix an HBase cluster.
+   * The returned Hbck is not guaranteed to be thread-safe. A new instance 
should be created by
+   * each thread. This is a lightweight operation. Pooling or caching of the 
returned Hbck instance
+   * is not recommended.
+   * 
+   * The caller is responsible for calling {@link Hbck#close()} on the 
returned Hbck instance.
+   *
+   * This will be used mostly by hbck tool.
+   *
+   * @return an Hbck instance for active master. Active master is fetched from 
the zookeeper.
+   */
+  Hbck getHbck() throws IOException;
+
+  /**
+   * Retrieve an Hbck implementation to fix an HBase cluster.
+   * The returned Hbck is not guaranteed to be thread-safe. A new instance 
should be created by
+   * each thread. This is a lightweight operation. Pooling or caching of the 
returned Hbck instance
+   * is not recommended.
+   * 
+   * The caller is responsible for calling {@link Hbck#close()} on the 
returned Hbck instance.
+   *
+   * This will be used mostly by hbck tool. This may only be used to by pass 
getting
+   * registered master from ZK. In situations where ZK is not available or 
active master is not
+   * registered with ZK and user can get master address by other means, master 
can be explicitly
+   * specified.
+   *
+   * @param masterServer explicit {@link ServerName} for master server
+   * @return an Hbck instance for a specified master server
+   */
+  Hbck getHbck(ServerName masterServer) throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/3813f0ac/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
index 7e07daf..67fe551 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
@@ -423,6 +423,28 @@ class ConnectionImplementation implements 
ClusterConnection, Closeable {
   }
 
   @Override
+  public Hbck getHbck() throws IOException {
+return getHbck(get(registry.getMasterAddress()));
+  }
+
+  @Override
+  public Hbck getHbck(ServerName masterServer) throws IOException {
+checkClosed();
+if (isDeadServer(masterServer)) {
+  throw new RegionServerStoppedException(masterServer + " is dead.");
+}

hbase git commit: HBASE-20941 Created and implemented HbckService in master

2018-08-27 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2 a0cbfb8ea -> cdf5cfa39


HBASE-20941 Created and implemented HbckService in master

Added API setTableStateInMeta() to update table state only in Meta. This will 
be used by hbck2 tool.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/cdf5cfa3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/cdf5cfa3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/cdf5cfa3

Branch: refs/heads/branch-2
Commit: cdf5cfa396b26d64541dcc793a250eea6ba891c3
Parents: a0cbfb8
Author: Umesh Agashe 
Authored: Tue Aug 7 11:46:42 2018 -0700
Committer: Michael Stack 
Committed: Mon Aug 27 12:10:33 2018 -0700

--
 .../hadoop/hbase/client/ClusterConnection.java  |  32 ++
 .../hbase/client/ConnectionImplementation.java  |  22 
 .../apache/hadoop/hbase/client/HBaseHbck.java   |  95 +
 .../org/apache/hadoop/hbase/client/Hbck.java|  50 +
 .../hbase/shaded/protobuf/RequestConverter.java |  12 +++
 .../hadoop/hbase/HBaseInterfaceAudience.java|   5 +
 .../src/main/protobuf/Master.proto  |  11 ++
 .../hadoop/hbase/master/MasterRpcServices.java  |  31 +-
 .../hadoop/hbase/HBaseTestingUtility.java   |   9 +-
 .../apache/hadoop/hbase/client/TestHbck.java| 104 +++
 .../hadoop/hbase/master/TestMasterMetrics.java  |   1 +
 11 files changed, 370 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/cdf5cfa3/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
index adf47ca..d3e675c 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
@@ -326,4 +326,36 @@ public interface ClusterConnection extends Connection {
* @throws IOException if a remote or network exception occurs
*/
   int getCurrentNrHRS() throws IOException;
+
+  /**
+   * Retrieve an Hbck implementation to fix an HBase cluster.
+   * The returned Hbck is not guaranteed to be thread-safe. A new instance 
should be created by
+   * each thread. This is a lightweight operation. Pooling or caching of the 
returned Hbck instance
+   * is not recommended.
+   * 
+   * The caller is responsible for calling {@link Hbck#close()} on the 
returned Hbck instance.
+   *
+   * This will be used mostly by hbck tool.
+   *
+   * @return an Hbck instance for active master. Active master is fetched from 
the zookeeper.
+   */
+  Hbck getHbck() throws IOException;
+
+  /**
+   * Retrieve an Hbck implementation to fix an HBase cluster.
+   * The returned Hbck is not guaranteed to be thread-safe. A new instance 
should be created by
+   * each thread. This is a lightweight operation. Pooling or caching of the 
returned Hbck instance
+   * is not recommended.
+   * 
+   * The caller is responsible for calling {@link Hbck#close()} on the 
returned Hbck instance.
+   *
+   * This will be used mostly by hbck tool. This may only be used to by pass 
getting
+   * registered master from ZK. In situations where ZK is not available or 
active master is not
+   * registered with ZK and user can get master address by other means, master 
can be explicitly
+   * specified.
+   *
+   * @param masterServer explicit {@link ServerName} for master server
+   * @return an Hbck instance for a specified master server
+   */
+  Hbck getHbck(ServerName masterServer) throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/cdf5cfa3/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
index a394211..2f35220 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
@@ -421,6 +421,28 @@ class ConnectionImplementation implements 
ClusterConnection, Closeable {
   }
 
   @Override
+  public Hbck getHbck() throws IOException {
+return getHbck(get(registry.getMasterAddress()));
+  }
+
+  @Override
+  public Hbck getHbck(ServerName masterServer) throws IOException {
+checkClosed();
+if (isDeadServer(masterServer)) {
+  throw new RegionServerStoppedException(masterServer + " is dead.");
+  

[2/2] hbase git commit: HBASE-21071 HBaseTestingUtility::startMiniCluster() to use builder pattern

2018-08-27 Thread stack
HBASE-21071 HBaseTestingUtility::startMiniCluster() to use builder pattern

Signed-off-by: zhangduo 
Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a0cbfb8e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a0cbfb8e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a0cbfb8e

Branch: refs/heads/branch-2
Commit: a0cbfb8ea6a7e33f21a1add8bc4c288ea04a6ec2
Parents: f9790b0
Author: Mingliang Liu 
Authored: Mon Aug 20 21:42:34 2018 -0700
Committer: Michael Stack 
Committed: Mon Aug 27 10:22:04 2018 -0700

--
 .../example/TestRefreshHFilesEndpoint.java  |   3 +-
 .../TableSnapshotInputFormatTestBase.java   |   6 +-
 .../hbase/mapreduce/TestHFileOutputFormat2.java |   5 +-
 .../hbase/snapshot/TestExportSnapshot.java  |   2 +-
 .../hbase/snapshot/TestMobExportSnapshot.java   |   2 +-
 .../snapshot/TestMobSecureExportSnapshot.java   |   2 +-
 .../snapshot/TestSecureExportSnapshot.java  |   2 +-
 .../hadoop/hbase/rest/TestStatusResource.java   |   2 +-
 .../hbase/rsgroup/TestRSGroupsOfflineMode.java  |   5 +-
 .../hadoop/hbase/HBaseTestingUtility.java   | 487 ---
 .../hadoop/hbase/StartMiniClusterOption.java| 254 ++
 .../hadoop/hbase/TestClientClusterMetrics.java  |   4 +-
 .../hadoop/hbase/TestClientClusterStatus.java   |   4 +-
 .../hbase/TestClientOperationTimeout.java   |   5 +-
 .../hadoop/hbase/TestGlobalMemStoreSize.java|   2 +-
 .../hadoop/hbase/TestLocalHBaseCluster.java |   5 +-
 .../apache/hadoop/hbase/TestMultiVersions.java  |   4 +-
 .../org/apache/hadoop/hbase/TestZooKeeper.java  |   4 +-
 .../hbase/client/TestAsyncClusterAdminApi2.java |   2 +-
 .../hbase/client/TestGetScanPartialResult.java  |   2 +-
 .../client/TestSeparateClientZKCluster.java |   5 +-
 .../hbase/client/TestTableSnapshotScanner.java  |   6 +-
 .../hbase/client/TestUpdateConfiguration.java   |   5 +-
 .../hbase/fs/TestBlockReorderBlockLocation.java |   2 +-
 .../hbase/fs/TestBlockReorderMultiBlocks.java   |   2 +-
 .../hadoop/hbase/master/AbstractTestDLS.java|   5 +-
 .../hadoop/hbase/master/TestGetInfoPort.java|   2 +-
 .../master/TestGetLastFlushedSequenceId.java|   2 +-
 .../hbase/master/TestGetReplicationLoad.java|   6 +-
 .../hadoop/hbase/master/TestMasterFailover.java |  10 +-
 .../TestMasterFailoverBalancerPersistence.java  |   8 +-
 .../master/TestMasterFileSystemWithWALDir.java  |   5 +-
 .../hadoop/hbase/master/TestMasterMetrics.java  |   6 +-
 .../hbase/master/TestMasterMetricsWrapper.java  |   2 +-
 .../TestMasterOperationsForRegionReplicas.java  |   7 +-
 .../TestMasterRestartAfterDisablingTable.java   |   6 +-
 .../hadoop/hbase/master/TestMasterShutdown.java |   5 +-
 .../TestMetaAssignmentWithStopMaster.java   |   5 +-
 .../hbase/master/TestMetaShutdownHandler.java   |   5 +-
 .../hadoop/hbase/master/TestRollingRestart.java |   5 +-
 .../hbase/master/TestShutdownBackupMaster.java  |   5 +-
 .../assignment/TestRegionMoveAndAbandon.java|   4 +-
 .../balancer/TestRegionLocationFinder.java  |   2 +-
 .../balancer/TestRegionsOnMasterOptions.java|   5 +-
 .../TestMasterFailoverWithProcedures.java   |   5 +-
 .../procedure/TestMasterProcedureWalLease.java  |   5 +-
 .../hbase/namespace/TestNamespaceAuditor.java   |   2 +-
 .../regionserver/TestClearRegionBlockCache.java |   3 +-
 .../hbase/regionserver/TestClusterId.java   |   7 +-
 .../regionserver/TestEncryptionKeyRotation.java |   2 +-
 .../hadoop/hbase/regionserver/TestHRegion.java  |   5 +-
 .../regionserver/TestHRegionOnCluster.java  |   3 +-
 .../regionserver/TestHRegionServerBulkLoad.java |   4 +-
 .../hbase/regionserver/TestJoinedScanners.java  |   5 +-
 .../TestRegionMergeTransactionOnCluster.java|   5 +-
 .../regionserver/TestRegionServerAbort.java |   4 +-
 .../regionserver/TestRegionServerHostname.java  |  13 +-
 .../regionserver/TestRegionServerMetrics.java   |   2 +-
 .../TestRegionServerOnlineConfigChange.java |   2 +-
 .../regionserver/TestRemoveRegionMetrics.java   |   2 +-
 .../TestSplitTransactionOnCluster.java  |   5 +-
 .../hadoop/hbase/regionserver/TestTags.java |   2 +-
 .../wal/AbstractTestLogRolling.java |   4 +-
 .../TestReplicationDisableInactivePeer.java |   4 +-
 .../TestReplicationDroppedTables.java   |   4 +-
 .../TestGlobalReplicationThrottler.java |   4 +-
 .../hadoop/hbase/trace/TestHTraceHooks.java |   5 +-
 .../util/TestMiniClusterLoadSequential.java |   2 +-
 .../hadoop/hbase/wal/TestWALFiltering.java  |   3 +-
 .../wal/TestWALOpenAfterDNRollingStart.java |   2 +-
 .../hbase/client/rsgroup/TestShellRSGroups.java |   2 +-
 71 files changed, 754 insertions(+), 269 deletions(-)
--



[1/2] hbase git commit: HBASE-21071 HBaseTestingUtility::startMiniCluster() to use builder pattern

2018-08-27 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2 f9790b01d -> a0cbfb8ea


http://git-wip-us.apache.org/repos/asf/hbase/blob/a0cbfb8e/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterShutdown.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterShutdown.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterShutdown.java
index 63cfe1f..a512833 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterShutdown.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterShutdown.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.LocalHBaseCluster;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.StartMiniClusterOption;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
@@ -66,7 +67,9 @@ public class TestMasterShutdown {
 
 // Start the cluster
 HBaseTestingUtility htu = new HBaseTestingUtility(conf);
-htu.startMiniCluster(NUM_MASTERS, NUM_RS);
+StartMiniClusterOption option = StartMiniClusterOption.builder()
+
.numMasters(NUM_MASTERS).numRegionServers(NUM_RS).numDataNodes(NUM_RS).build();
+htu.startMiniCluster(option);
 MiniHBaseCluster cluster = htu.getHBaseCluster();
 
 // get all the master threads

http://git-wip-us.apache.org/repos/asf/hbase/blob/a0cbfb8e/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java
index b087890..446c3f9 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java
@@ -23,6 +23,7 @@ import static org.junit.Assert.fail;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.StartMiniClusterOption;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
@@ -50,7 +51,9 @@ public class TestMetaAssignmentWithStopMaster {
 
   @BeforeClass
   public static void setUp() throws Exception {
-UTIL.startMiniCluster(2,3);
+StartMiniClusterOption option = StartMiniClusterOption.builder()
+.numMasters(2).numRegionServers(3).numDataNodes(3).build();
+UTIL.startMiniCluster(option);
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hbase/blob/a0cbfb8e/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java
index 9497210..ea532da 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.MiniHBaseCluster.MiniHBaseClusterRegionServer;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.StartMiniClusterOption;
 import org.apache.hadoop.hbase.Waiter;
 import org.apache.hadoop.hbase.master.assignment.RegionStates;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -60,7 +61,9 @@ public class TestMetaShutdownHandler {
 
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
-TEST_UTIL.startMiniCluster(1, 3, null, null, MyRegionServer.class);
+StartMiniClusterOption option = StartMiniClusterOption.builder()
+
.numRegionServers(3).rsClass(MyRegionServer.class).numDataNodes(3).build();
+TEST_UTIL.startMiniCluster(option);
   }
 
   @AfterClass

http://git-wip-us.apache.org/repos/asf/hbase/blob/a0cbfb8e/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java
index 8ecb49d..ff8ad0b 100644
--- 

[2/2] hbase git commit: HBASE-21071 HBaseTestingUtility::startMiniCluster() to use builder pattern

2018-08-27 Thread stack
HBASE-21071 HBaseTestingUtility::startMiniCluster() to use builder pattern

Signed-off-by: Duo Zhang 
Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6dd53830
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6dd53830
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6dd53830

Branch: refs/heads/master
Commit: 6dd5383033c5b0f84bd1de081a7fdadabe1658b8
Parents: 9e2732e
Author: Mingliang Liu 
Authored: Mon Aug 20 21:42:34 2018 -0700
Committer: Michael Stack 
Committed: Mon Aug 27 10:20:31 2018 -0700

--
 .../example/TestRefreshHFilesEndpoint.java  |   3 +-
 .../TableSnapshotInputFormatTestBase.java   |   6 +-
 .../hbase/mapreduce/TestHFileOutputFormat2.java |   5 +-
 .../hbase/snapshot/TestExportSnapshot.java  |   2 +-
 .../hbase/snapshot/TestMobExportSnapshot.java   |   2 +-
 .../snapshot/TestMobSecureExportSnapshot.java   |   2 +-
 .../snapshot/TestSecureExportSnapshot.java  |   2 +-
 .../hadoop/hbase/rest/TestStatusResource.java   |   2 +-
 .../hbase/rsgroup/TestRSGroupsOfflineMode.java  |   5 +-
 .../hadoop/hbase/HBaseTestingUtility.java   | 487 ---
 .../hadoop/hbase/StartMiniClusterOption.java| 254 ++
 .../hadoop/hbase/TestClientClusterMetrics.java  |   4 +-
 .../hadoop/hbase/TestClientClusterStatus.java   |   4 +-
 .../hbase/TestClientOperationTimeout.java   |   5 +-
 .../hadoop/hbase/TestGlobalMemStoreSize.java|   2 +-
 .../hadoop/hbase/TestLocalHBaseCluster.java |   5 +-
 .../apache/hadoop/hbase/TestMultiVersions.java  |   4 +-
 .../org/apache/hadoop/hbase/TestZooKeeper.java  |   4 +-
 .../hbase/client/TestAsyncClusterAdminApi2.java |   2 +-
 .../hbase/client/TestGetScanPartialResult.java  |   2 +-
 .../client/TestSeparateClientZKCluster.java |   5 +-
 .../hbase/client/TestTableSnapshotScanner.java  |   6 +-
 .../hbase/client/TestUpdateConfiguration.java   |   5 +-
 .../hbase/fs/TestBlockReorderBlockLocation.java |   2 +-
 .../hbase/fs/TestBlockReorderMultiBlocks.java   |   2 +-
 .../hadoop/hbase/master/AbstractTestDLS.java|   5 +-
 .../hadoop/hbase/master/TestGetInfoPort.java|   2 +-
 .../master/TestGetLastFlushedSequenceId.java|   2 +-
 .../hbase/master/TestGetReplicationLoad.java|   6 +-
 .../hadoop/hbase/master/TestMasterFailover.java |  10 +-
 .../TestMasterFailoverBalancerPersistence.java  |   8 +-
 .../master/TestMasterFileSystemWithWALDir.java  |   5 +-
 .../hadoop/hbase/master/TestMasterMetrics.java  |   6 +-
 .../hbase/master/TestMasterMetricsWrapper.java  |   2 +-
 .../TestMasterOperationsForRegionReplicas.java  |   7 +-
 .../TestMasterRestartAfterDisablingTable.java   |   6 +-
 .../hadoop/hbase/master/TestMasterShutdown.java |   5 +-
 .../TestMetaAssignmentWithStopMaster.java   |   5 +-
 .../hbase/master/TestMetaShutdownHandler.java   |   5 +-
 .../hadoop/hbase/master/TestRollingRestart.java |   5 +-
 .../hbase/master/TestShutdownBackupMaster.java  |   5 +-
 .../assignment/TestRegionMoveAndAbandon.java|   4 +-
 .../balancer/TestRegionLocationFinder.java  |   2 +-
 .../balancer/TestRegionsOnMasterOptions.java|   5 +-
 .../TestMasterFailoverWithProcedures.java   |   5 +-
 .../procedure/TestMasterProcedureWalLease.java  |   5 +-
 .../hbase/namespace/TestNamespaceAuditor.java   |   2 +-
 .../regionserver/TestClearRegionBlockCache.java |   3 +-
 .../hbase/regionserver/TestClusterId.java   |   7 +-
 .../regionserver/TestEncryptionKeyRotation.java |   2 +-
 .../hadoop/hbase/regionserver/TestHRegion.java  |   5 +-
 .../regionserver/TestHRegionOnCluster.java  |   3 +-
 .../regionserver/TestHRegionServerBulkLoad.java |   4 +-
 .../hbase/regionserver/TestJoinedScanners.java  |   5 +-
 .../TestRegionMergeTransactionOnCluster.java|   5 +-
 .../regionserver/TestRegionServerAbort.java |   4 +-
 .../regionserver/TestRegionServerHostname.java  |  13 +-
 .../regionserver/TestRegionServerMetrics.java   |   2 +-
 .../TestRegionServerOnlineConfigChange.java |   2 +-
 .../regionserver/TestRemoveRegionMetrics.java   |   2 +-
 .../TestSplitTransactionOnCluster.java  |   5 +-
 .../hadoop/hbase/regionserver/TestTags.java |   2 +-
 .../wal/AbstractTestLogRolling.java |   4 +-
 .../replication/SyncReplicationTestBase.java|   7 +-
 .../TestReplicationDisableInactivePeer.java |   4 +-
 .../TestReplicationDroppedTables.java   |   4 +-
 .../TestGlobalReplicationThrottler.java |   4 +-
 .../hadoop/hbase/trace/TestHTraceHooks.java |   5 +-
 .../util/TestMiniClusterLoadSequential.java |   2 +-
 .../hadoop/hbase/wal/TestWALFiltering.java  |   3 +-
 .../wal/TestWALOpenAfterDNRollingStart.java |   2 +-
 .../hbase/client/rsgroup/TestShellRSGroups.java |   2 +-
 .../hbase/spark/TestJavaHBaseContext.java   |   2 +-
 73 files changed, 760 insertions(+), 272 

hbase git commit: HBASE-21088 HStoreFile should be closed in HStore#hasReferences

2018-08-27 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/branch-2 4f32883e9 -> f9790b01d


HBASE-21088 HStoreFile should be closed in HStore#hasReferences


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f9790b01
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f9790b01
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f9790b01

Branch: refs/heads/branch-2
Commit: f9790b01d9ffd9457abe33d8a65a0a467389f4d6
Parents: 4f32883
Author: tedyu 
Authored: Mon Aug 27 09:38:03 2018 -0700
Committer: tedyu 
Committed: Mon Aug 27 09:38:03 2018 -0700

--
 .../org/apache/hadoop/hbase/regionserver/HStore.java | 11 +++
 1 file changed, 11 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f9790b01/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index 1b95a77..ea699ea 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -1670,6 +1670,17 @@ public class HStore implements Store, HeapSize, 
StoreConfigInformation, Propagat
   LOG.error("Error trying to determine if store has references, assuming 
references exists",
 ioe);
   return true;
+} finally {
+  if (reloadedStoreFiles != null) {
+for (HStoreFile storeFile : reloadedStoreFiles) {
+  try {
+storeFile.closeStoreFile(false);
+  } catch (IOException ioe) {
+LOG.warn("Encountered exception closing " + storeFile + ": " + 
ioe.getMessage());
+// continue with closing the remaining store files
+  }
+}
+  }
 }
   }
 



hbase git commit: HBASE-21088 HStoreFile should be closed in HStore#hasReferences

2018-08-27 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/master 993e74b74 -> 9e2732edb


HBASE-21088 HStoreFile should be closed in HStore#hasReferences


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9e2732ed
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9e2732ed
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9e2732ed

Branch: refs/heads/master
Commit: 9e2732edbbc1e207392bb5e3493dcf1b42a2dbcf
Parents: 993e74b
Author: tedyu 
Authored: Mon Aug 27 09:36:44 2018 -0700
Committer: tedyu 
Committed: Mon Aug 27 09:36:44 2018 -0700

--
 .../org/apache/hadoop/hbase/regionserver/HStore.java | 11 +++
 1 file changed, 11 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9e2732ed/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index 1ff9043..a040184 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -1712,6 +1712,17 @@ public class HStore implements Store, HeapSize, 
StoreConfigInformation, Propagat
   LOG.error("Error trying to determine if store has references, assuming 
references exists",
 ioe);
   return true;
+} finally {
+  if (reloadedStoreFiles != null) {
+for (HStoreFile storeFile : reloadedStoreFiles) {
+  try {
+storeFile.closeStoreFile(false);
+  } catch (IOException ioe) {
+LOG.warn("Encountered exception closing " + storeFile + ": " + 
ioe.getMessage());
+// continue with closing the remaining store files
+  }
+}
+  }
 }
   }
 



[17/43] hbase-site git commit: Published site at 993e74b7471a7fe483eae59ab198bb2533c55031.

2018-08-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRange1000Test.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRange1000Test.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRange1000Test.html
index e38835a..e11b644 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRange1000Test.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRange1000Test.html
@@ -2220,698 +2220,702 @@
 2212
 2213FilteredScanTest(Connection con, 
TestOptions options, Status status) {
 2214  super(con, options, status);
-2215}
-2216
-2217@Override
-2218boolean testRow(int i) throws 
IOException {
-2219  byte[] value = 
generateData(this.rand, getValueLength(this.rand));
-2220  Scan scan = 
constructScan(value);
-2221  ResultScanner scanner = null;
-  try {
-2223scanner = 
this.table.getScanner(scan);
-2224for (Result r = null; (r = 
scanner.next()) != null;) {
-2225  updateValueSize(r);
-2226}
-2227  } finally {
-2228if (scanner != null) {
-2229  
updateScanMetrics(scanner.getScanMetrics());
-2230  scanner.close();
-2231}
-2232  }
-2233  return true;
-2234}
-2235
-2236protected Scan constructScan(byte[] 
valuePrefix) throws IOException {
-2237  FilterList list = new 
FilterList();
-2238  Filter filter = new 
SingleColumnValueFilter(FAMILY_ZERO, COLUMN_ZERO,
-2239CompareOperator.EQUAL, new 
BinaryComparator(valuePrefix));
-2240  list.addFilter(filter);
-2241  if (opts.filterAll) {
-2242list.addFilter(new 
FilterAllFilter());
-2243  }
-2244  Scan scan = new 
Scan().setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks)
-2245  
.setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType)
-2246  
.setScanMetricsEnabled(true);
-2247  if (opts.addColumns) {
-2248for (int column = 0; column  
opts.columns; column++) {
-2249  byte [] qualifier = column == 
0? COLUMN_ZERO: Bytes.toBytes("" + column);
-2250  scan.addColumn(FAMILY_ZERO, 
qualifier);
-2251}
-2252  } else {
-2253scan.addFamily(FAMILY_ZERO);
-2254  }
-2255  scan.setFilter(list);
-2256  return scan;
-2257}
-2258  }
-2259
-2260  /**
-2261   * Compute a throughput rate in 
MB/s.
-2262   * @param rows Number of records 
consumed.
-2263   * @param timeMs Time taken in 
milliseconds.
-2264   * @return String value with label, ie 
'123.76 MB/s'
-2265   */
-2266  private static String 
calculateMbps(int rows, long timeMs, final int valueSize, int families, int 
columns) {
-2267BigDecimal rowSize = 
BigDecimal.valueOf(ROW_LENGTH +
-2268  ((valueSize + 
(FAMILY_NAME_BASE.length()+1) + COLUMN_ZERO.length) * columns) * families);
-2269BigDecimal mbps = 
BigDecimal.valueOf(rows).multiply(rowSize, CXT)
-2270  
.divide(BigDecimal.valueOf(timeMs), CXT).multiply(MS_PER_SEC, CXT)
-2271  .divide(BYTES_PER_MB, CXT);
-2272return FMT.format(mbps) + " MB/s";
-2273  }
-2274
-2275  /*
-2276   * Format passed integer.
-2277   * @param number
-2278   * @return Returns zero-prefixed 
ROW_LENGTH-byte wide decimal version of passed
-2279   * number (Does absolute in case 
number is negative).
-2280   */
-2281  public static byte [] format(final int 
number) {
-2282byte [] b = new byte[ROW_LENGTH];
-2283int d = Math.abs(number);
-2284for (int i = b.length - 1; i = 
0; i--) {
-2285  b[i] = (byte)((d % 10) + '0');
-2286  d /= 10;
-2287}
-2288return b;
-2289  }
-2290
-2291  /*
-2292   * This method takes some time and is 
done inline uploading data.  For
-2293   * example, doing the mapfile test, 
generation of the key and value
-2294   * consumes about 30% of CPU time.
-2295   * @return Generated random value to 
insert into a table cell.
-2296   */
-2297  public static byte[] 
generateData(final Random r, int length) {
-2298byte [] b = new byte [length];
-2299int i;
-2300
-2301for(i = 0; i  (length-8); i += 
8) {
-2302  b[i] = (byte) (65 + 
r.nextInt(26));
-2303  b[i+1] = b[i];
-2304  b[i+2] = b[i];
-2305  b[i+3] = b[i];
-2306  b[i+4] = b[i];
-2307  b[i+5] = b[i];
-2308  b[i+6] = b[i];
-2309  b[i+7] = b[i];
-2310}
-2311
-2312byte a = (byte) (65 + 
r.nextInt(26));
-2313for(; i  length; i++) {
-2314  b[i] = a;
-2315}
-2316return b;
-2317  }
-2318
-2319  static byte [] getRandomRow(final 
Random random, final int totalRows) {
-2320return 
format(generateRandomRow(random, totalRows));
+2215  if (opts.perClientRunRows == 
DEFAULT_ROWS_PER_GB) {
+2216LOG.warn("Option \"rows\" 

[30/43] hbase-site git commit: Published site at 993e74b7471a7fe483eae59ab198bb2533c55031.

2018-08-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTest.html
index e38835a..e11b644 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTest.html
@@ -2220,698 +2220,702 @@
 2212
 2213FilteredScanTest(Connection con, 
TestOptions options, Status status) {
 2214  super(con, options, status);
-2215}
-2216
-2217@Override
-2218boolean testRow(int i) throws 
IOException {
-2219  byte[] value = 
generateData(this.rand, getValueLength(this.rand));
-2220  Scan scan = 
constructScan(value);
-2221  ResultScanner scanner = null;
-  try {
-2223scanner = 
this.table.getScanner(scan);
-2224for (Result r = null; (r = 
scanner.next()) != null;) {
-2225  updateValueSize(r);
-2226}
-2227  } finally {
-2228if (scanner != null) {
-2229  
updateScanMetrics(scanner.getScanMetrics());
-2230  scanner.close();
-2231}
-2232  }
-2233  return true;
-2234}
-2235
-2236protected Scan constructScan(byte[] 
valuePrefix) throws IOException {
-2237  FilterList list = new 
FilterList();
-2238  Filter filter = new 
SingleColumnValueFilter(FAMILY_ZERO, COLUMN_ZERO,
-2239CompareOperator.EQUAL, new 
BinaryComparator(valuePrefix));
-2240  list.addFilter(filter);
-2241  if (opts.filterAll) {
-2242list.addFilter(new 
FilterAllFilter());
-2243  }
-2244  Scan scan = new 
Scan().setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks)
-2245  
.setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType)
-2246  
.setScanMetricsEnabled(true);
-2247  if (opts.addColumns) {
-2248for (int column = 0; column  
opts.columns; column++) {
-2249  byte [] qualifier = column == 
0? COLUMN_ZERO: Bytes.toBytes("" + column);
-2250  scan.addColumn(FAMILY_ZERO, 
qualifier);
-2251}
-2252  } else {
-2253scan.addFamily(FAMILY_ZERO);
-2254  }
-2255  scan.setFilter(list);
-2256  return scan;
-2257}
-2258  }
-2259
-2260  /**
-2261   * Compute a throughput rate in 
MB/s.
-2262   * @param rows Number of records 
consumed.
-2263   * @param timeMs Time taken in 
milliseconds.
-2264   * @return String value with label, ie 
'123.76 MB/s'
-2265   */
-2266  private static String 
calculateMbps(int rows, long timeMs, final int valueSize, int families, int 
columns) {
-2267BigDecimal rowSize = 
BigDecimal.valueOf(ROW_LENGTH +
-2268  ((valueSize + 
(FAMILY_NAME_BASE.length()+1) + COLUMN_ZERO.length) * columns) * families);
-2269BigDecimal mbps = 
BigDecimal.valueOf(rows).multiply(rowSize, CXT)
-2270  
.divide(BigDecimal.valueOf(timeMs), CXT).multiply(MS_PER_SEC, CXT)
-2271  .divide(BYTES_PER_MB, CXT);
-2272return FMT.format(mbps) + " MB/s";
-2273  }
-2274
-2275  /*
-2276   * Format passed integer.
-2277   * @param number
-2278   * @return Returns zero-prefixed 
ROW_LENGTH-byte wide decimal version of passed
-2279   * number (Does absolute in case 
number is negative).
-2280   */
-2281  public static byte [] format(final int 
number) {
-2282byte [] b = new byte[ROW_LENGTH];
-2283int d = Math.abs(number);
-2284for (int i = b.length - 1; i = 
0; i--) {
-2285  b[i] = (byte)((d % 10) + '0');
-2286  d /= 10;
-2287}
-2288return b;
-2289  }
-2290
-2291  /*
-2292   * This method takes some time and is 
done inline uploading data.  For
-2293   * example, doing the mapfile test, 
generation of the key and value
-2294   * consumes about 30% of CPU time.
-2295   * @return Generated random value to 
insert into a table cell.
-2296   */
-2297  public static byte[] 
generateData(final Random r, int length) {
-2298byte [] b = new byte [length];
-2299int i;
-2300
-2301for(i = 0; i  (length-8); i += 
8) {
-2302  b[i] = (byte) (65 + 
r.nextInt(26));
-2303  b[i+1] = b[i];
-2304  b[i+2] = b[i];
-2305  b[i+3] = b[i];
-2306  b[i+4] = b[i];
-2307  b[i+5] = b[i];
-2308  b[i+6] = b[i];
-2309  b[i+7] = b[i];
-2310}
-2311
-2312byte a = (byte) (65 + 
r.nextInt(26));
-2313for(; i  length; i++) {
-2314  b[i] = a;
-2315}
-2316return b;
-2317  }
-2318
-2319  static byte [] getRandomRow(final 
Random random, final int totalRows) {
-2320return 
format(generateRandomRow(random, totalRows));
+2215  if (opts.perClientRunRows == 
DEFAULT_ROWS_PER_GB) {
+2216LOG.warn("Option \"rows\" 
unspecified. Using default value " + DEFAULT_ROWS_PER_GB +
+2217". This could 

[33/43] hbase-site git commit: Published site at 993e74b7471a7fe483eae59ab198bb2533c55031.

2018-08-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncSequentialReadTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncSequentialReadTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncSequentialReadTest.html
index e38835a..e11b644 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncSequentialReadTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncSequentialReadTest.html
@@ -2220,698 +2220,702 @@
 2212
 2213FilteredScanTest(Connection con, 
TestOptions options, Status status) {
 2214  super(con, options, status);
-2215}
-2216
-2217@Override
-2218boolean testRow(int i) throws 
IOException {
-2219  byte[] value = 
generateData(this.rand, getValueLength(this.rand));
-2220  Scan scan = 
constructScan(value);
-2221  ResultScanner scanner = null;
-  try {
-2223scanner = 
this.table.getScanner(scan);
-2224for (Result r = null; (r = 
scanner.next()) != null;) {
-2225  updateValueSize(r);
-2226}
-2227  } finally {
-2228if (scanner != null) {
-2229  
updateScanMetrics(scanner.getScanMetrics());
-2230  scanner.close();
-2231}
-2232  }
-2233  return true;
-2234}
-2235
-2236protected Scan constructScan(byte[] 
valuePrefix) throws IOException {
-2237  FilterList list = new 
FilterList();
-2238  Filter filter = new 
SingleColumnValueFilter(FAMILY_ZERO, COLUMN_ZERO,
-2239CompareOperator.EQUAL, new 
BinaryComparator(valuePrefix));
-2240  list.addFilter(filter);
-2241  if (opts.filterAll) {
-2242list.addFilter(new 
FilterAllFilter());
-2243  }
-2244  Scan scan = new 
Scan().setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks)
-2245  
.setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType)
-2246  
.setScanMetricsEnabled(true);
-2247  if (opts.addColumns) {
-2248for (int column = 0; column  
opts.columns; column++) {
-2249  byte [] qualifier = column == 
0? COLUMN_ZERO: Bytes.toBytes("" + column);
-2250  scan.addColumn(FAMILY_ZERO, 
qualifier);
-2251}
-2252  } else {
-2253scan.addFamily(FAMILY_ZERO);
-2254  }
-2255  scan.setFilter(list);
-2256  return scan;
-2257}
-2258  }
-2259
-2260  /**
-2261   * Compute a throughput rate in 
MB/s.
-2262   * @param rows Number of records 
consumed.
-2263   * @param timeMs Time taken in 
milliseconds.
-2264   * @return String value with label, ie 
'123.76 MB/s'
-2265   */
-2266  private static String 
calculateMbps(int rows, long timeMs, final int valueSize, int families, int 
columns) {
-2267BigDecimal rowSize = 
BigDecimal.valueOf(ROW_LENGTH +
-2268  ((valueSize + 
(FAMILY_NAME_BASE.length()+1) + COLUMN_ZERO.length) * columns) * families);
-2269BigDecimal mbps = 
BigDecimal.valueOf(rows).multiply(rowSize, CXT)
-2270  
.divide(BigDecimal.valueOf(timeMs), CXT).multiply(MS_PER_SEC, CXT)
-2271  .divide(BYTES_PER_MB, CXT);
-2272return FMT.format(mbps) + " MB/s";
-2273  }
-2274
-2275  /*
-2276   * Format passed integer.
-2277   * @param number
-2278   * @return Returns zero-prefixed 
ROW_LENGTH-byte wide decimal version of passed
-2279   * number (Does absolute in case 
number is negative).
-2280   */
-2281  public static byte [] format(final int 
number) {
-2282byte [] b = new byte[ROW_LENGTH];
-2283int d = Math.abs(number);
-2284for (int i = b.length - 1; i = 
0; i--) {
-2285  b[i] = (byte)((d % 10) + '0');
-2286  d /= 10;
-2287}
-2288return b;
-2289  }
-2290
-2291  /*
-2292   * This method takes some time and is 
done inline uploading data.  For
-2293   * example, doing the mapfile test, 
generation of the key and value
-2294   * consumes about 30% of CPU time.
-2295   * @return Generated random value to 
insert into a table cell.
-2296   */
-2297  public static byte[] 
generateData(final Random r, int length) {
-2298byte [] b = new byte [length];
-2299int i;
-2300
-2301for(i = 0; i  (length-8); i += 
8) {
-2302  b[i] = (byte) (65 + 
r.nextInt(26));
-2303  b[i+1] = b[i];
-2304  b[i+2] = b[i];
-2305  b[i+3] = b[i];
-2306  b[i+4] = b[i];
-2307  b[i+5] = b[i];
-2308  b[i+6] = b[i];
-2309  b[i+7] = b[i];
-2310}
-2311
-2312byte a = (byte) (65 + 
r.nextInt(26));
-2313for(; i  length; i++) {
-2314  b[i] = a;
-2315}
-2316return b;
-2317  }
-2318
-2319  static byte [] getRandomRow(final 
Random random, final int totalRows) {
-2320return 
format(generateRandomRow(random, totalRows));
+2215  if (opts.perClientRunRows == 
DEFAULT_ROWS_PER_GB) {
+2216LOG.warn("Option \"rows\" 
unspecified. Using 

[21/43] hbase-site git commit: Published site at 993e74b7471a7fe483eae59ab198bb2533c55031.

2018-08-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.FilteredScanTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.FilteredScanTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.FilteredScanTest.html
index e38835a..e11b644 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.FilteredScanTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.FilteredScanTest.html
@@ -2220,698 +2220,702 @@
 2212
 2213FilteredScanTest(Connection con, 
TestOptions options, Status status) {
 2214  super(con, options, status);
-2215}
-2216
-2217@Override
-2218boolean testRow(int i) throws 
IOException {
-2219  byte[] value = 
generateData(this.rand, getValueLength(this.rand));
-2220  Scan scan = 
constructScan(value);
-2221  ResultScanner scanner = null;
-  try {
-2223scanner = 
this.table.getScanner(scan);
-2224for (Result r = null; (r = 
scanner.next()) != null;) {
-2225  updateValueSize(r);
-2226}
-2227  } finally {
-2228if (scanner != null) {
-2229  
updateScanMetrics(scanner.getScanMetrics());
-2230  scanner.close();
-2231}
-2232  }
-2233  return true;
-2234}
-2235
-2236protected Scan constructScan(byte[] 
valuePrefix) throws IOException {
-2237  FilterList list = new 
FilterList();
-2238  Filter filter = new 
SingleColumnValueFilter(FAMILY_ZERO, COLUMN_ZERO,
-2239CompareOperator.EQUAL, new 
BinaryComparator(valuePrefix));
-2240  list.addFilter(filter);
-2241  if (opts.filterAll) {
-2242list.addFilter(new 
FilterAllFilter());
-2243  }
-2244  Scan scan = new 
Scan().setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks)
-2245  
.setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType)
-2246  
.setScanMetricsEnabled(true);
-2247  if (opts.addColumns) {
-2248for (int column = 0; column  
opts.columns; column++) {
-2249  byte [] qualifier = column == 
0? COLUMN_ZERO: Bytes.toBytes("" + column);
-2250  scan.addColumn(FAMILY_ZERO, 
qualifier);
-2251}
-2252  } else {
-2253scan.addFamily(FAMILY_ZERO);
-2254  }
-2255  scan.setFilter(list);
-2256  return scan;
-2257}
-2258  }
-2259
-2260  /**
-2261   * Compute a throughput rate in 
MB/s.
-2262   * @param rows Number of records 
consumed.
-2263   * @param timeMs Time taken in 
milliseconds.
-2264   * @return String value with label, ie 
'123.76 MB/s'
-2265   */
-2266  private static String 
calculateMbps(int rows, long timeMs, final int valueSize, int families, int 
columns) {
-2267BigDecimal rowSize = 
BigDecimal.valueOf(ROW_LENGTH +
-2268  ((valueSize + 
(FAMILY_NAME_BASE.length()+1) + COLUMN_ZERO.length) * columns) * families);
-2269BigDecimal mbps = 
BigDecimal.valueOf(rows).multiply(rowSize, CXT)
-2270  
.divide(BigDecimal.valueOf(timeMs), CXT).multiply(MS_PER_SEC, CXT)
-2271  .divide(BYTES_PER_MB, CXT);
-2272return FMT.format(mbps) + " MB/s";
-2273  }
-2274
-2275  /*
-2276   * Format passed integer.
-2277   * @param number
-2278   * @return Returns zero-prefixed 
ROW_LENGTH-byte wide decimal version of passed
-2279   * number (Does absolute in case 
number is negative).
-2280   */
-2281  public static byte [] format(final int 
number) {
-2282byte [] b = new byte[ROW_LENGTH];
-2283int d = Math.abs(number);
-2284for (int i = b.length - 1; i = 
0; i--) {
-2285  b[i] = (byte)((d % 10) + '0');
-2286  d /= 10;
-2287}
-2288return b;
-2289  }
-2290
-2291  /*
-2292   * This method takes some time and is 
done inline uploading data.  For
-2293   * example, doing the mapfile test, 
generation of the key and value
-2294   * consumes about 30% of CPU time.
-2295   * @return Generated random value to 
insert into a table cell.
-2296   */
-2297  public static byte[] 
generateData(final Random r, int length) {
-2298byte [] b = new byte [length];
-2299int i;
-2300
-2301for(i = 0; i  (length-8); i += 
8) {
-2302  b[i] = (byte) (65 + 
r.nextInt(26));
-2303  b[i+1] = b[i];
-2304  b[i+2] = b[i];
-2305  b[i+3] = b[i];
-2306  b[i+4] = b[i];
-2307  b[i+5] = b[i];
-2308  b[i+6] = b[i];
-2309  b[i+7] = b[i];
-2310}
-2311
-2312byte a = (byte) (65 + 
r.nextInt(26));
-2313for(; i  length; i++) {
-2314  b[i] = a;
-2315}
-2316return b;
-2317  }
-2318
-2319  static byte [] getRandomRow(final 
Random random, final int totalRows) {
-2320return 
format(generateRandomRow(random, totalRows));
+2215  if (opts.perClientRunRows == 
DEFAULT_ROWS_PER_GB) {
+2216LOG.warn("Option \"rows\" 
unspecified. Using default value " + DEFAULT_ROWS_PER_GB +

[16/43] hbase-site git commit: Published site at 993e74b7471a7fe483eae59ab198bb2533c55031.

2018-08-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRange100Test.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRange100Test.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRange100Test.html
index e38835a..e11b644 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRange100Test.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRange100Test.html
@@ -2220,698 +2220,702 @@
 2212
 2213FilteredScanTest(Connection con, 
TestOptions options, Status status) {
 2214  super(con, options, status);
-2215}
-2216
-2217@Override
-2218boolean testRow(int i) throws 
IOException {
-2219  byte[] value = 
generateData(this.rand, getValueLength(this.rand));
-2220  Scan scan = 
constructScan(value);
-2221  ResultScanner scanner = null;
-  try {
-2223scanner = 
this.table.getScanner(scan);
-2224for (Result r = null; (r = 
scanner.next()) != null;) {
-2225  updateValueSize(r);
-2226}
-2227  } finally {
-2228if (scanner != null) {
-2229  
updateScanMetrics(scanner.getScanMetrics());
-2230  scanner.close();
-2231}
-2232  }
-2233  return true;
-2234}
-2235
-2236protected Scan constructScan(byte[] 
valuePrefix) throws IOException {
-2237  FilterList list = new 
FilterList();
-2238  Filter filter = new 
SingleColumnValueFilter(FAMILY_ZERO, COLUMN_ZERO,
-2239CompareOperator.EQUAL, new 
BinaryComparator(valuePrefix));
-2240  list.addFilter(filter);
-2241  if (opts.filterAll) {
-2242list.addFilter(new 
FilterAllFilter());
-2243  }
-2244  Scan scan = new 
Scan().setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks)
-2245  
.setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType)
-2246  
.setScanMetricsEnabled(true);
-2247  if (opts.addColumns) {
-2248for (int column = 0; column  
opts.columns; column++) {
-2249  byte [] qualifier = column == 
0? COLUMN_ZERO: Bytes.toBytes("" + column);
-2250  scan.addColumn(FAMILY_ZERO, 
qualifier);
-2251}
-2252  } else {
-2253scan.addFamily(FAMILY_ZERO);
-2254  }
-2255  scan.setFilter(list);
-2256  return scan;
-2257}
-2258  }
-2259
-2260  /**
-2261   * Compute a throughput rate in 
MB/s.
-2262   * @param rows Number of records 
consumed.
-2263   * @param timeMs Time taken in 
milliseconds.
-2264   * @return String value with label, ie 
'123.76 MB/s'
-2265   */
-2266  private static String 
calculateMbps(int rows, long timeMs, final int valueSize, int families, int 
columns) {
-2267BigDecimal rowSize = 
BigDecimal.valueOf(ROW_LENGTH +
-2268  ((valueSize + 
(FAMILY_NAME_BASE.length()+1) + COLUMN_ZERO.length) * columns) * families);
-2269BigDecimal mbps = 
BigDecimal.valueOf(rows).multiply(rowSize, CXT)
-2270  
.divide(BigDecimal.valueOf(timeMs), CXT).multiply(MS_PER_SEC, CXT)
-2271  .divide(BYTES_PER_MB, CXT);
-2272return FMT.format(mbps) + " MB/s";
-2273  }
-2274
-2275  /*
-2276   * Format passed integer.
-2277   * @param number
-2278   * @return Returns zero-prefixed 
ROW_LENGTH-byte wide decimal version of passed
-2279   * number (Does absolute in case 
number is negative).
-2280   */
-2281  public static byte [] format(final int 
number) {
-2282byte [] b = new byte[ROW_LENGTH];
-2283int d = Math.abs(number);
-2284for (int i = b.length - 1; i = 
0; i--) {
-2285  b[i] = (byte)((d % 10) + '0');
-2286  d /= 10;
-2287}
-2288return b;
-2289  }
-2290
-2291  /*
-2292   * This method takes some time and is 
done inline uploading data.  For
-2293   * example, doing the mapfile test, 
generation of the key and value
-2294   * consumes about 30% of CPU time.
-2295   * @return Generated random value to 
insert into a table cell.
-2296   */
-2297  public static byte[] 
generateData(final Random r, int length) {
-2298byte [] b = new byte [length];
-2299int i;
-2300
-2301for(i = 0; i  (length-8); i += 
8) {
-2302  b[i] = (byte) (65 + 
r.nextInt(26));
-2303  b[i+1] = b[i];
-2304  b[i+2] = b[i];
-2305  b[i+3] = b[i];
-2306  b[i+4] = b[i];
-2307  b[i+5] = b[i];
-2308  b[i+6] = b[i];
-2309  b[i+7] = b[i];
-2310}
-2311
-2312byte a = (byte) (65 + 
r.nextInt(26));
-2313for(; i  length; i++) {
-2314  b[i] = a;
-2315}
-2316return b;
-2317  }
-2318
-2319  static byte [] getRandomRow(final 
Random random, final int totalRows) {
-2320return 
format(generateRandomRow(random, totalRows));
+2215  if (opts.perClientRunRows == 
DEFAULT_ROWS_PER_GB) {
+2216LOG.warn("Option \"rows\" 

[01/43] hbase-site git commit: Published site at 993e74b7471a7fe483eae59ab198bb2533c55031.

2018-08-27 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site be237220f -> e312f7735


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/testdevapidocs/src-html/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.html
index 5d7cda3..d1be7a8 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.html
@@ -58,62 +58,63 @@
 050
 051  @BeforeClass
 052  public static void setUp() throws 
Exception {
-053UTIL.startMiniCluster(1);
-054  }
-055
-056  @AfterClass
-057  public static void tearDown() throws 
Exception {
-058UTIL.shutdownMiniCluster();
-059  }
-060
-061  @Test
-062  public void test() throws Exception {
-063ReplicationPeerStorage peerStorage = 
ReplicationStorageFactory
-064
.getReplicationPeerStorage(UTIL.getZooKeeperWatcher(), 
UTIL.getConfiguration());
-065ReplicationQueueStorage queueStorage 
= ReplicationStorageFactory
-066
.getReplicationQueueStorage(UTIL.getZooKeeperWatcher(), 
UTIL.getConfiguration());
-067
-068String peerId1 = "1";
-069String peerId2 = "2";
-070peerStorage.addPeer(peerId1, 
ReplicationPeerConfig.newBuilder().setClusterKey("key").build(),
-071  true, SyncReplicationState.NONE);
-072peerStorage.addPeer(peerId2, 
ReplicationPeerConfig.newBuilder().setClusterKey("key").build(),
-073  true, SyncReplicationState.NONE);
-074for (int i = 0; i  10; i++) {
-075  
queueStorage.addWAL(ServerName.valueOf("localhost", 1 + i, 10 + i), 
peerId1,
-076"file-" + i);
-077}
-078
queueStorage.addWAL(ServerName.valueOf("localhost", 1, 10), peerId2, 
"file");
-079HBaseFsck fsck = 
HbckTestingUtil.doFsck(UTIL.getConfiguration(), true);
-080
HbckTestingUtil.assertNoErrors(fsck);
-081
-082// should not remove anything since 
the replication peer is still alive
-083assertEquals(10, 
queueStorage.getListOfReplicators().size());
-084peerStorage.removePeer(peerId1);
-085// there should be orphan queues
-086assertEquals(10, 
queueStorage.getListOfReplicators().size());
-087fsck = 
HbckTestingUtil.doFsck(UTIL.getConfiguration(), false);
-088HbckTestingUtil.assertErrors(fsck, 
Stream.generate(() - {
-089  return 
ERROR_CODE.UNDELETED_REPLICATION_QUEUE;
-090
}).limit(10).toArray(ERROR_CODE[]::new));
-091
-092// should not delete anything when 
fix is false
-093assertEquals(10, 
queueStorage.getListOfReplicators().size());
-094
-095fsck = 
HbckTestingUtil.doFsck(UTIL.getConfiguration(), true);
-096HbckTestingUtil.assertErrors(fsck, 
Stream.generate(() - {
-097  return 
ERROR_CODE.UNDELETED_REPLICATION_QUEUE;
-098
}).limit(10).toArray(ERROR_CODE[]::new));
-099
-100ListServerName replicators = 
queueStorage.getListOfReplicators();
-101// should not remove the server with 
queue for peerId2
-102assertEquals(1, 
replicators.size());
-103
assertEquals(ServerName.valueOf("localhost", 1, 10), 
replicators.get(0));
-104for (String queueId : 
queueStorage.getAllQueues(replicators.get(0))) {
-105  assertEquals(peerId2, queueId);
-106}
-107  }
-108}
+053
UTIL.getConfiguration().setBoolean("hbase.write.hbck1.lock.file", false);
+054UTIL.startMiniCluster(1);
+055  }
+056
+057  @AfterClass
+058  public static void tearDown() throws 
Exception {
+059UTIL.shutdownMiniCluster();
+060  }
+061
+062  @Test
+063  public void test() throws Exception {
+064ReplicationPeerStorage peerStorage = 
ReplicationStorageFactory
+065
.getReplicationPeerStorage(UTIL.getZooKeeperWatcher(), 
UTIL.getConfiguration());
+066ReplicationQueueStorage queueStorage 
= ReplicationStorageFactory
+067
.getReplicationQueueStorage(UTIL.getZooKeeperWatcher(), 
UTIL.getConfiguration());
+068
+069String peerId1 = "1";
+070String peerId2 = "2";
+071peerStorage.addPeer(peerId1, 
ReplicationPeerConfig.newBuilder().setClusterKey("key").build(),
+072  true, SyncReplicationState.NONE);
+073peerStorage.addPeer(peerId2, 
ReplicationPeerConfig.newBuilder().setClusterKey("key").build(),
+074  true, SyncReplicationState.NONE);
+075for (int i = 0; i  10; i++) {
+076  
queueStorage.addWAL(ServerName.valueOf("localhost", 1 + i, 10 + i), 
peerId1,
+077"file-" + i);
+078}
+079
queueStorage.addWAL(ServerName.valueOf("localhost", 1, 10), peerId2, 
"file");
+080HBaseFsck fsck = 
HbckTestingUtil.doFsck(UTIL.getConfiguration(), true);
+081
HbckTestingUtil.assertNoErrors(fsck);
+082
+083// should not remove anything since 
the replication peer is still alive
+084assertEquals(10, 

[03/43] hbase-site git commit: Published site at 993e74b7471a7fe483eae59ab198bb2533c55031.

2018-08-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.TestOptions.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.TestOptions.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.TestOptions.html
index e38835a..e11b644 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.TestOptions.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.TestOptions.html
@@ -2220,698 +2220,702 @@
 2212
 2213FilteredScanTest(Connection con, 
TestOptions options, Status status) {
 2214  super(con, options, status);
-2215}
-2216
-2217@Override
-2218boolean testRow(int i) throws 
IOException {
-2219  byte[] value = 
generateData(this.rand, getValueLength(this.rand));
-2220  Scan scan = 
constructScan(value);
-2221  ResultScanner scanner = null;
-  try {
-2223scanner = 
this.table.getScanner(scan);
-2224for (Result r = null; (r = 
scanner.next()) != null;) {
-2225  updateValueSize(r);
-2226}
-2227  } finally {
-2228if (scanner != null) {
-2229  
updateScanMetrics(scanner.getScanMetrics());
-2230  scanner.close();
-2231}
-2232  }
-2233  return true;
-2234}
-2235
-2236protected Scan constructScan(byte[] 
valuePrefix) throws IOException {
-2237  FilterList list = new 
FilterList();
-2238  Filter filter = new 
SingleColumnValueFilter(FAMILY_ZERO, COLUMN_ZERO,
-2239CompareOperator.EQUAL, new 
BinaryComparator(valuePrefix));
-2240  list.addFilter(filter);
-2241  if (opts.filterAll) {
-2242list.addFilter(new 
FilterAllFilter());
-2243  }
-2244  Scan scan = new 
Scan().setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks)
-2245  
.setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType)
-2246  
.setScanMetricsEnabled(true);
-2247  if (opts.addColumns) {
-2248for (int column = 0; column  
opts.columns; column++) {
-2249  byte [] qualifier = column == 
0? COLUMN_ZERO: Bytes.toBytes("" + column);
-2250  scan.addColumn(FAMILY_ZERO, 
qualifier);
-2251}
-2252  } else {
-2253scan.addFamily(FAMILY_ZERO);
-2254  }
-2255  scan.setFilter(list);
-2256  return scan;
-2257}
-2258  }
-2259
-2260  /**
-2261   * Compute a throughput rate in 
MB/s.
-2262   * @param rows Number of records 
consumed.
-2263   * @param timeMs Time taken in 
milliseconds.
-2264   * @return String value with label, ie 
'123.76 MB/s'
-2265   */
-2266  private static String 
calculateMbps(int rows, long timeMs, final int valueSize, int families, int 
columns) {
-2267BigDecimal rowSize = 
BigDecimal.valueOf(ROW_LENGTH +
-2268  ((valueSize + 
(FAMILY_NAME_BASE.length()+1) + COLUMN_ZERO.length) * columns) * families);
-2269BigDecimal mbps = 
BigDecimal.valueOf(rows).multiply(rowSize, CXT)
-2270  
.divide(BigDecimal.valueOf(timeMs), CXT).multiply(MS_PER_SEC, CXT)
-2271  .divide(BYTES_PER_MB, CXT);
-2272return FMT.format(mbps) + " MB/s";
-2273  }
-2274
-2275  /*
-2276   * Format passed integer.
-2277   * @param number
-2278   * @return Returns zero-prefixed 
ROW_LENGTH-byte wide decimal version of passed
-2279   * number (Does absolute in case 
number is negative).
-2280   */
-2281  public static byte [] format(final int 
number) {
-2282byte [] b = new byte[ROW_LENGTH];
-2283int d = Math.abs(number);
-2284for (int i = b.length - 1; i = 
0; i--) {
-2285  b[i] = (byte)((d % 10) + '0');
-2286  d /= 10;
-2287}
-2288return b;
-2289  }
-2290
-2291  /*
-2292   * This method takes some time and is 
done inline uploading data.  For
-2293   * example, doing the mapfile test, 
generation of the key and value
-2294   * consumes about 30% of CPU time.
-2295   * @return Generated random value to 
insert into a table cell.
-2296   */
-2297  public static byte[] 
generateData(final Random r, int length) {
-2298byte [] b = new byte [length];
-2299int i;
-2300
-2301for(i = 0; i  (length-8); i += 
8) {
-2302  b[i] = (byte) (65 + 
r.nextInt(26));
-2303  b[i+1] = b[i];
-2304  b[i+2] = b[i];
-2305  b[i+3] = b[i];
-2306  b[i+4] = b[i];
-2307  b[i+5] = b[i];
-2308  b[i+6] = b[i];
-2309  b[i+7] = b[i];
-2310}
-2311
-2312byte a = (byte) (65 + 
r.nextInt(26));
-2313for(; i  length; i++) {
-2314  b[i] = a;
-2315}
-2316return b;
-2317  }
-2318
-2319  static byte [] getRandomRow(final 
Random random, final int totalRows) {
-2320return 
format(generateRandomRow(random, totalRows));
+2215  if (opts.perClientRunRows == 
DEFAULT_ROWS_PER_GB) {
+2216LOG.warn("Option \"rows\" 
unspecified. Using default value " + DEFAULT_ROWS_PER_GB +
+2217". 

[31/43] hbase-site git commit: Published site at 993e74b7471a7fe483eae59ab198bb2533c55031.

2018-08-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTableTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTableTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTableTest.html
index e38835a..e11b644 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTableTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTableTest.html
@@ -2220,698 +2220,702 @@
 2212
 2213FilteredScanTest(Connection con, 
TestOptions options, Status status) {
 2214  super(con, options, status);
-2215}
-2216
-2217@Override
-2218boolean testRow(int i) throws 
IOException {
-2219  byte[] value = 
generateData(this.rand, getValueLength(this.rand));
-2220  Scan scan = 
constructScan(value);
-2221  ResultScanner scanner = null;
-  try {
-2223scanner = 
this.table.getScanner(scan);
-2224for (Result r = null; (r = 
scanner.next()) != null;) {
-2225  updateValueSize(r);
-2226}
-2227  } finally {
-2228if (scanner != null) {
-2229  
updateScanMetrics(scanner.getScanMetrics());
-2230  scanner.close();
-2231}
-2232  }
-2233  return true;
-2234}
-2235
-2236protected Scan constructScan(byte[] 
valuePrefix) throws IOException {
-2237  FilterList list = new 
FilterList();
-2238  Filter filter = new 
SingleColumnValueFilter(FAMILY_ZERO, COLUMN_ZERO,
-2239CompareOperator.EQUAL, new 
BinaryComparator(valuePrefix));
-2240  list.addFilter(filter);
-2241  if (opts.filterAll) {
-2242list.addFilter(new 
FilterAllFilter());
-2243  }
-2244  Scan scan = new 
Scan().setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks)
-2245  
.setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType)
-2246  
.setScanMetricsEnabled(true);
-2247  if (opts.addColumns) {
-2248for (int column = 0; column  
opts.columns; column++) {
-2249  byte [] qualifier = column == 
0? COLUMN_ZERO: Bytes.toBytes("" + column);
-2250  scan.addColumn(FAMILY_ZERO, 
qualifier);
-2251}
-2252  } else {
-2253scan.addFamily(FAMILY_ZERO);
-2254  }
-2255  scan.setFilter(list);
-2256  return scan;
-2257}
-2258  }
-2259
-2260  /**
-2261   * Compute a throughput rate in 
MB/s.
-2262   * @param rows Number of records 
consumed.
-2263   * @param timeMs Time taken in 
milliseconds.
-2264   * @return String value with label, ie 
'123.76 MB/s'
-2265   */
-2266  private static String 
calculateMbps(int rows, long timeMs, final int valueSize, int families, int 
columns) {
-2267BigDecimal rowSize = 
BigDecimal.valueOf(ROW_LENGTH +
-2268  ((valueSize + 
(FAMILY_NAME_BASE.length()+1) + COLUMN_ZERO.length) * columns) * families);
-2269BigDecimal mbps = 
BigDecimal.valueOf(rows).multiply(rowSize, CXT)
-2270  
.divide(BigDecimal.valueOf(timeMs), CXT).multiply(MS_PER_SEC, CXT)
-2271  .divide(BYTES_PER_MB, CXT);
-2272return FMT.format(mbps) + " MB/s";
-2273  }
-2274
-2275  /*
-2276   * Format passed integer.
-2277   * @param number
-2278   * @return Returns zero-prefixed 
ROW_LENGTH-byte wide decimal version of passed
-2279   * number (Does absolute in case 
number is negative).
-2280   */
-2281  public static byte [] format(final int 
number) {
-2282byte [] b = new byte[ROW_LENGTH];
-2283int d = Math.abs(number);
-2284for (int i = b.length - 1; i = 
0; i--) {
-2285  b[i] = (byte)((d % 10) + '0');
-2286  d /= 10;
-2287}
-2288return b;
-2289  }
-2290
-2291  /*
-2292   * This method takes some time and is 
done inline uploading data.  For
-2293   * example, doing the mapfile test, 
generation of the key and value
-2294   * consumes about 30% of CPU time.
-2295   * @return Generated random value to 
insert into a table cell.
-2296   */
-2297  public static byte[] 
generateData(final Random r, int length) {
-2298byte [] b = new byte [length];
-2299int i;
-2300
-2301for(i = 0; i  (length-8); i += 
8) {
-2302  b[i] = (byte) (65 + 
r.nextInt(26));
-2303  b[i+1] = b[i];
-2304  b[i+2] = b[i];
-2305  b[i+3] = b[i];
-2306  b[i+4] = b[i];
-2307  b[i+5] = b[i];
-2308  b[i+6] = b[i];
-2309  b[i+7] = b[i];
-2310}
-2311
-2312byte a = (byte) (65 + 
r.nextInt(26));
-2313for(; i  length; i++) {
-2314  b[i] = a;
-2315}
-2316return b;
-2317  }
-2318
-2319  static byte [] getRandomRow(final 
Random random, final int totalRows) {
-2320return 
format(generateRandomRow(random, totalRows));
+2215  if (opts.perClientRunRows == 
DEFAULT_ROWS_PER_GB) {
+2216LOG.warn("Option \"rows\" 
unspecified. Using default value " + DEFAULT_ROWS_PER_GB +
+2217  

[32/43] hbase-site git commit: Published site at 993e74b7471a7fe483eae59ab198bb2533c55031.

2018-08-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncSequentialWriteTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncSequentialWriteTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncSequentialWriteTest.html
index e38835a..e11b644 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncSequentialWriteTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncSequentialWriteTest.html
@@ -2220,698 +2220,702 @@
 2212
 2213FilteredScanTest(Connection con, 
TestOptions options, Status status) {
 2214  super(con, options, status);
-2215}
-2216
-2217@Override
-2218boolean testRow(int i) throws 
IOException {
-2219  byte[] value = 
generateData(this.rand, getValueLength(this.rand));
-2220  Scan scan = 
constructScan(value);
-2221  ResultScanner scanner = null;
-  try {
-2223scanner = 
this.table.getScanner(scan);
-2224for (Result r = null; (r = 
scanner.next()) != null;) {
-2225  updateValueSize(r);
-2226}
-2227  } finally {
-2228if (scanner != null) {
-2229  
updateScanMetrics(scanner.getScanMetrics());
-2230  scanner.close();
-2231}
-2232  }
-2233  return true;
-2234}
-2235
-2236protected Scan constructScan(byte[] 
valuePrefix) throws IOException {
-2237  FilterList list = new 
FilterList();
-2238  Filter filter = new 
SingleColumnValueFilter(FAMILY_ZERO, COLUMN_ZERO,
-2239CompareOperator.EQUAL, new 
BinaryComparator(valuePrefix));
-2240  list.addFilter(filter);
-2241  if (opts.filterAll) {
-2242list.addFilter(new 
FilterAllFilter());
-2243  }
-2244  Scan scan = new 
Scan().setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks)
-2245  
.setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType)
-2246  
.setScanMetricsEnabled(true);
-2247  if (opts.addColumns) {
-2248for (int column = 0; column  
opts.columns; column++) {
-2249  byte [] qualifier = column == 
0? COLUMN_ZERO: Bytes.toBytes("" + column);
-2250  scan.addColumn(FAMILY_ZERO, 
qualifier);
-2251}
-2252  } else {
-2253scan.addFamily(FAMILY_ZERO);
-2254  }
-2255  scan.setFilter(list);
-2256  return scan;
-2257}
-2258  }
-2259
-2260  /**
-2261   * Compute a throughput rate in 
MB/s.
-2262   * @param rows Number of records 
consumed.
-2263   * @param timeMs Time taken in 
milliseconds.
-2264   * @return String value with label, ie 
'123.76 MB/s'
-2265   */
-2266  private static String 
calculateMbps(int rows, long timeMs, final int valueSize, int families, int 
columns) {
-2267BigDecimal rowSize = 
BigDecimal.valueOf(ROW_LENGTH +
-2268  ((valueSize + 
(FAMILY_NAME_BASE.length()+1) + COLUMN_ZERO.length) * columns) * families);
-2269BigDecimal mbps = 
BigDecimal.valueOf(rows).multiply(rowSize, CXT)
-2270  
.divide(BigDecimal.valueOf(timeMs), CXT).multiply(MS_PER_SEC, CXT)
-2271  .divide(BYTES_PER_MB, CXT);
-2272return FMT.format(mbps) + " MB/s";
-2273  }
-2274
-2275  /*
-2276   * Format passed integer.
-2277   * @param number
-2278   * @return Returns zero-prefixed 
ROW_LENGTH-byte wide decimal version of passed
-2279   * number (Does absolute in case 
number is negative).
-2280   */
-2281  public static byte [] format(final int 
number) {
-2282byte [] b = new byte[ROW_LENGTH];
-2283int d = Math.abs(number);
-2284for (int i = b.length - 1; i = 
0; i--) {
-2285  b[i] = (byte)((d % 10) + '0');
-2286  d /= 10;
-2287}
-2288return b;
-2289  }
-2290
-2291  /*
-2292   * This method takes some time and is 
done inline uploading data.  For
-2293   * example, doing the mapfile test, 
generation of the key and value
-2294   * consumes about 30% of CPU time.
-2295   * @return Generated random value to 
insert into a table cell.
-2296   */
-2297  public static byte[] 
generateData(final Random r, int length) {
-2298byte [] b = new byte [length];
-2299int i;
-2300
-2301for(i = 0; i  (length-8); i += 
8) {
-2302  b[i] = (byte) (65 + 
r.nextInt(26));
-2303  b[i+1] = b[i];
-2304  b[i+2] = b[i];
-2305  b[i+3] = b[i];
-2306  b[i+4] = b[i];
-2307  b[i+5] = b[i];
-2308  b[i+6] = b[i];
-2309  b[i+7] = b[i];
-2310}
-2311
-2312byte a = (byte) (65 + 
r.nextInt(26));
-2313for(; i  length; i++) {
-2314  b[i] = a;
-2315}
-2316return b;
-2317  }
-2318
-2319  static byte [] getRandomRow(final 
Random random, final int totalRows) {
-2320return 
format(generateRandomRow(random, totalRows));
+2215  if (opts.perClientRunRows == 
DEFAULT_ROWS_PER_GB) {
+2216LOG.warn("Option \"rows\" 
unspecified. Using 

[14/43] hbase-site git commit: Published site at 993e74b7471a7fe483eae59ab198bb2533c55031.

2018-08-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRangeTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRangeTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRangeTest.html
index e38835a..e11b644 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRangeTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRangeTest.html
@@ -2220,698 +2220,702 @@
 2212
 2213FilteredScanTest(Connection con, 
TestOptions options, Status status) {
 2214  super(con, options, status);
-2215}
-2216
-2217@Override
-2218boolean testRow(int i) throws 
IOException {
-2219  byte[] value = 
generateData(this.rand, getValueLength(this.rand));
-2220  Scan scan = 
constructScan(value);
-2221  ResultScanner scanner = null;
-  try {
-2223scanner = 
this.table.getScanner(scan);
-2224for (Result r = null; (r = 
scanner.next()) != null;) {
-2225  updateValueSize(r);
-2226}
-2227  } finally {
-2228if (scanner != null) {
-2229  
updateScanMetrics(scanner.getScanMetrics());
-2230  scanner.close();
-2231}
-2232  }
-2233  return true;
-2234}
-2235
-2236protected Scan constructScan(byte[] 
valuePrefix) throws IOException {
-2237  FilterList list = new 
FilterList();
-2238  Filter filter = new 
SingleColumnValueFilter(FAMILY_ZERO, COLUMN_ZERO,
-2239CompareOperator.EQUAL, new 
BinaryComparator(valuePrefix));
-2240  list.addFilter(filter);
-2241  if (opts.filterAll) {
-2242list.addFilter(new 
FilterAllFilter());
-2243  }
-2244  Scan scan = new 
Scan().setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks)
-2245  
.setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType)
-2246  
.setScanMetricsEnabled(true);
-2247  if (opts.addColumns) {
-2248for (int column = 0; column  
opts.columns; column++) {
-2249  byte [] qualifier = column == 
0? COLUMN_ZERO: Bytes.toBytes("" + column);
-2250  scan.addColumn(FAMILY_ZERO, 
qualifier);
-2251}
-2252  } else {
-2253scan.addFamily(FAMILY_ZERO);
-2254  }
-2255  scan.setFilter(list);
-2256  return scan;
-2257}
-2258  }
-2259
-2260  /**
-2261   * Compute a throughput rate in 
MB/s.
-2262   * @param rows Number of records 
consumed.
-2263   * @param timeMs Time taken in 
milliseconds.
-2264   * @return String value with label, ie 
'123.76 MB/s'
-2265   */
-2266  private static String 
calculateMbps(int rows, long timeMs, final int valueSize, int families, int 
columns) {
-2267BigDecimal rowSize = 
BigDecimal.valueOf(ROW_LENGTH +
-2268  ((valueSize + 
(FAMILY_NAME_BASE.length()+1) + COLUMN_ZERO.length) * columns) * families);
-2269BigDecimal mbps = 
BigDecimal.valueOf(rows).multiply(rowSize, CXT)
-2270  
.divide(BigDecimal.valueOf(timeMs), CXT).multiply(MS_PER_SEC, CXT)
-2271  .divide(BYTES_PER_MB, CXT);
-2272return FMT.format(mbps) + " MB/s";
-2273  }
-2274
-2275  /*
-2276   * Format passed integer.
-2277   * @param number
-2278   * @return Returns zero-prefixed 
ROW_LENGTH-byte wide decimal version of passed
-2279   * number (Does absolute in case 
number is negative).
-2280   */
-2281  public static byte [] format(final int 
number) {
-2282byte [] b = new byte[ROW_LENGTH];
-2283int d = Math.abs(number);
-2284for (int i = b.length - 1; i = 
0; i--) {
-2285  b[i] = (byte)((d % 10) + '0');
-2286  d /= 10;
-2287}
-2288return b;
-2289  }
-2290
-2291  /*
-2292   * This method takes some time and is 
done inline uploading data.  For
-2293   * example, doing the mapfile test, 
generation of the key and value
-2294   * consumes about 30% of CPU time.
-2295   * @return Generated random value to 
insert into a table cell.
-2296   */
-2297  public static byte[] 
generateData(final Random r, int length) {
-2298byte [] b = new byte [length];
-2299int i;
-2300
-2301for(i = 0; i  (length-8); i += 
8) {
-2302  b[i] = (byte) (65 + 
r.nextInt(26));
-2303  b[i+1] = b[i];
-2304  b[i+2] = b[i];
-2305  b[i+3] = b[i];
-2306  b[i+4] = b[i];
-2307  b[i+5] = b[i];
-2308  b[i+6] = b[i];
-2309  b[i+7] = b[i];
-2310}
-2311
-2312byte a = (byte) (65 + 
r.nextInt(26));
-2313for(; i  length; i++) {
-2314  b[i] = a;
-2315}
-2316return b;
-2317  }
-2318
-2319  static byte [] getRandomRow(final 
Random random, final int totalRows) {
-2320return 
format(generateRandomRow(random, totalRows));
+2215  if (opts.perClientRunRows == 
DEFAULT_ROWS_PER_GB) {
+2216LOG.warn("Option \"rows\" 
unspecified. Using 

[20/43] hbase-site git commit: Published site at 993e74b7471a7fe483eae59ab198bb2533c55031.

2018-08-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.IncrementTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.IncrementTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.IncrementTest.html
index e38835a..e11b644 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.IncrementTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.IncrementTest.html
@@ -2220,698 +2220,702 @@
 2212
 2213FilteredScanTest(Connection con, 
TestOptions options, Status status) {
 2214  super(con, options, status);
-2215}
-2216
-2217@Override
-2218boolean testRow(int i) throws 
IOException {
-2219  byte[] value = 
generateData(this.rand, getValueLength(this.rand));
-2220  Scan scan = 
constructScan(value);
-2221  ResultScanner scanner = null;
-  try {
-2223scanner = 
this.table.getScanner(scan);
-2224for (Result r = null; (r = 
scanner.next()) != null;) {
-2225  updateValueSize(r);
-2226}
-2227  } finally {
-2228if (scanner != null) {
-2229  
updateScanMetrics(scanner.getScanMetrics());
-2230  scanner.close();
-2231}
-2232  }
-2233  return true;
-2234}
-2235
-2236protected Scan constructScan(byte[] 
valuePrefix) throws IOException {
-2237  FilterList list = new 
FilterList();
-2238  Filter filter = new 
SingleColumnValueFilter(FAMILY_ZERO, COLUMN_ZERO,
-2239CompareOperator.EQUAL, new 
BinaryComparator(valuePrefix));
-2240  list.addFilter(filter);
-2241  if (opts.filterAll) {
-2242list.addFilter(new 
FilterAllFilter());
-2243  }
-2244  Scan scan = new 
Scan().setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks)
-2245  
.setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType)
-2246  
.setScanMetricsEnabled(true);
-2247  if (opts.addColumns) {
-2248for (int column = 0; column  
opts.columns; column++) {
-2249  byte [] qualifier = column == 
0? COLUMN_ZERO: Bytes.toBytes("" + column);
-2250  scan.addColumn(FAMILY_ZERO, 
qualifier);
-2251}
-2252  } else {
-2253scan.addFamily(FAMILY_ZERO);
-2254  }
-2255  scan.setFilter(list);
-2256  return scan;
-2257}
-2258  }
-2259
-2260  /**
-2261   * Compute a throughput rate in 
MB/s.
-2262   * @param rows Number of records 
consumed.
-2263   * @param timeMs Time taken in 
milliseconds.
-2264   * @return String value with label, ie 
'123.76 MB/s'
-2265   */
-2266  private static String 
calculateMbps(int rows, long timeMs, final int valueSize, int families, int 
columns) {
-2267BigDecimal rowSize = 
BigDecimal.valueOf(ROW_LENGTH +
-2268  ((valueSize + 
(FAMILY_NAME_BASE.length()+1) + COLUMN_ZERO.length) * columns) * families);
-2269BigDecimal mbps = 
BigDecimal.valueOf(rows).multiply(rowSize, CXT)
-2270  
.divide(BigDecimal.valueOf(timeMs), CXT).multiply(MS_PER_SEC, CXT)
-2271  .divide(BYTES_PER_MB, CXT);
-2272return FMT.format(mbps) + " MB/s";
-2273  }
-2274
-2275  /*
-2276   * Format passed integer.
-2277   * @param number
-2278   * @return Returns zero-prefixed 
ROW_LENGTH-byte wide decimal version of passed
-2279   * number (Does absolute in case 
number is negative).
-2280   */
-2281  public static byte [] format(final int 
number) {
-2282byte [] b = new byte[ROW_LENGTH];
-2283int d = Math.abs(number);
-2284for (int i = b.length - 1; i = 
0; i--) {
-2285  b[i] = (byte)((d % 10) + '0');
-2286  d /= 10;
-2287}
-2288return b;
-2289  }
-2290
-2291  /*
-2292   * This method takes some time and is 
done inline uploading data.  For
-2293   * example, doing the mapfile test, 
generation of the key and value
-2294   * consumes about 30% of CPU time.
-2295   * @return Generated random value to 
insert into a table cell.
-2296   */
-2297  public static byte[] 
generateData(final Random r, int length) {
-2298byte [] b = new byte [length];
-2299int i;
-2300
-2301for(i = 0; i  (length-8); i += 
8) {
-2302  b[i] = (byte) (65 + 
r.nextInt(26));
-2303  b[i+1] = b[i];
-2304  b[i+2] = b[i];
-2305  b[i+3] = b[i];
-2306  b[i+4] = b[i];
-2307  b[i+5] = b[i];
-2308  b[i+6] = b[i];
-2309  b[i+7] = b[i];
-2310}
-2311
-2312byte a = (byte) (65 + 
r.nextInt(26));
-2313for(; i  length; i++) {
-2314  b[i] = a;
-2315}
-2316return b;
-2317  }
-2318
-2319  static byte [] getRandomRow(final 
Random random, final int totalRows) {
-2320return 
format(generateRandomRow(random, totalRows));
+2215  if (opts.perClientRunRows == 
DEFAULT_ROWS_PER_GB) {
+2216LOG.warn("Option \"rows\" 
unspecified. Using default value " + DEFAULT_ROWS_PER_GB +
+2217   

[09/43] hbase-site git commit: Published site at 993e74b7471a7fe483eae59ab198bb2533c55031.

2018-08-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialReadTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialReadTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialReadTest.html
index e38835a..e11b644 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialReadTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialReadTest.html
@@ -2220,698 +2220,702 @@
 2212
 2213FilteredScanTest(Connection con, 
TestOptions options, Status status) {
 2214  super(con, options, status);
-2215}
-2216
-2217@Override
-2218boolean testRow(int i) throws 
IOException {
-2219  byte[] value = 
generateData(this.rand, getValueLength(this.rand));
-2220  Scan scan = 
constructScan(value);
-2221  ResultScanner scanner = null;
-  try {
-2223scanner = 
this.table.getScanner(scan);
-2224for (Result r = null; (r = 
scanner.next()) != null;) {
-2225  updateValueSize(r);
-2226}
-2227  } finally {
-2228if (scanner != null) {
-2229  
updateScanMetrics(scanner.getScanMetrics());
-2230  scanner.close();
-2231}
-2232  }
-2233  return true;
-2234}
-2235
-2236protected Scan constructScan(byte[] 
valuePrefix) throws IOException {
-2237  FilterList list = new 
FilterList();
-2238  Filter filter = new 
SingleColumnValueFilter(FAMILY_ZERO, COLUMN_ZERO,
-2239CompareOperator.EQUAL, new 
BinaryComparator(valuePrefix));
-2240  list.addFilter(filter);
-2241  if (opts.filterAll) {
-2242list.addFilter(new 
FilterAllFilter());
-2243  }
-2244  Scan scan = new 
Scan().setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks)
-2245  
.setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType)
-2246  
.setScanMetricsEnabled(true);
-2247  if (opts.addColumns) {
-2248for (int column = 0; column  
opts.columns; column++) {
-2249  byte [] qualifier = column == 
0? COLUMN_ZERO: Bytes.toBytes("" + column);
-2250  scan.addColumn(FAMILY_ZERO, 
qualifier);
-2251}
-2252  } else {
-2253scan.addFamily(FAMILY_ZERO);
-2254  }
-2255  scan.setFilter(list);
-2256  return scan;
-2257}
-2258  }
-2259
-2260  /**
-2261   * Compute a throughput rate in 
MB/s.
-2262   * @param rows Number of records 
consumed.
-2263   * @param timeMs Time taken in 
milliseconds.
-2264   * @return String value with label, ie 
'123.76 MB/s'
-2265   */
-2266  private static String 
calculateMbps(int rows, long timeMs, final int valueSize, int families, int 
columns) {
-2267BigDecimal rowSize = 
BigDecimal.valueOf(ROW_LENGTH +
-2268  ((valueSize + 
(FAMILY_NAME_BASE.length()+1) + COLUMN_ZERO.length) * columns) * families);
-2269BigDecimal mbps = 
BigDecimal.valueOf(rows).multiply(rowSize, CXT)
-2270  
.divide(BigDecimal.valueOf(timeMs), CXT).multiply(MS_PER_SEC, CXT)
-2271  .divide(BYTES_PER_MB, CXT);
-2272return FMT.format(mbps) + " MB/s";
-2273  }
-2274
-2275  /*
-2276   * Format passed integer.
-2277   * @param number
-2278   * @return Returns zero-prefixed 
ROW_LENGTH-byte wide decimal version of passed
-2279   * number (Does absolute in case 
number is negative).
-2280   */
-2281  public static byte [] format(final int 
number) {
-2282byte [] b = new byte[ROW_LENGTH];
-2283int d = Math.abs(number);
-2284for (int i = b.length - 1; i = 
0; i--) {
-2285  b[i] = (byte)((d % 10) + '0');
-2286  d /= 10;
-2287}
-2288return b;
-2289  }
-2290
-2291  /*
-2292   * This method takes some time and is 
done inline uploading data.  For
-2293   * example, doing the mapfile test, 
generation of the key and value
-2294   * consumes about 30% of CPU time.
-2295   * @return Generated random value to 
insert into a table cell.
-2296   */
-2297  public static byte[] 
generateData(final Random r, int length) {
-2298byte [] b = new byte [length];
-2299int i;
-2300
-2301for(i = 0; i  (length-8); i += 
8) {
-2302  b[i] = (byte) (65 + 
r.nextInt(26));
-2303  b[i+1] = b[i];
-2304  b[i+2] = b[i];
-2305  b[i+3] = b[i];
-2306  b[i+4] = b[i];
-2307  b[i+5] = b[i];
-2308  b[i+6] = b[i];
-2309  b[i+7] = b[i];
-2310}
-2311
-2312byte a = (byte) (65 + 
r.nextInt(26));
-2313for(; i  length; i++) {
-2314  b[i] = a;
-2315}
-2316return b;
-2317  }
-2318
-2319  static byte [] getRandomRow(final 
Random random, final int totalRows) {
-2320return 
format(generateRandomRow(random, totalRows));
+2215  if (opts.perClientRunRows == 
DEFAULT_ROWS_PER_GB) {
+2216LOG.warn("Option \"rows\" 
unspecified. Using default value " + 

[23/43] hbase-site git commit: Published site at 993e74b7471a7fe483eae59ab198bb2533c55031.

2018-08-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.Counter.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.Counter.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.Counter.html
index e38835a..e11b644 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.Counter.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.Counter.html
@@ -2220,698 +2220,702 @@
 2212
 2213FilteredScanTest(Connection con, 
TestOptions options, Status status) {
 2214  super(con, options, status);
-2215}
-2216
-2217@Override
-2218boolean testRow(int i) throws 
IOException {
-2219  byte[] value = 
generateData(this.rand, getValueLength(this.rand));
-2220  Scan scan = 
constructScan(value);
-2221  ResultScanner scanner = null;
-  try {
-2223scanner = 
this.table.getScanner(scan);
-2224for (Result r = null; (r = 
scanner.next()) != null;) {
-2225  updateValueSize(r);
-2226}
-2227  } finally {
-2228if (scanner != null) {
-2229  
updateScanMetrics(scanner.getScanMetrics());
-2230  scanner.close();
-2231}
-2232  }
-2233  return true;
-2234}
-2235
-2236protected Scan constructScan(byte[] 
valuePrefix) throws IOException {
-2237  FilterList list = new 
FilterList();
-2238  Filter filter = new 
SingleColumnValueFilter(FAMILY_ZERO, COLUMN_ZERO,
-2239CompareOperator.EQUAL, new 
BinaryComparator(valuePrefix));
-2240  list.addFilter(filter);
-2241  if (opts.filterAll) {
-2242list.addFilter(new 
FilterAllFilter());
-2243  }
-2244  Scan scan = new 
Scan().setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks)
-2245  
.setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType)
-2246  
.setScanMetricsEnabled(true);
-2247  if (opts.addColumns) {
-2248for (int column = 0; column  
opts.columns; column++) {
-2249  byte [] qualifier = column == 
0? COLUMN_ZERO: Bytes.toBytes("" + column);
-2250  scan.addColumn(FAMILY_ZERO, 
qualifier);
-2251}
-2252  } else {
-2253scan.addFamily(FAMILY_ZERO);
-2254  }
-2255  scan.setFilter(list);
-2256  return scan;
-2257}
-2258  }
-2259
-2260  /**
-2261   * Compute a throughput rate in 
MB/s.
-2262   * @param rows Number of records 
consumed.
-2263   * @param timeMs Time taken in 
milliseconds.
-2264   * @return String value with label, ie 
'123.76 MB/s'
-2265   */
-2266  private static String 
calculateMbps(int rows, long timeMs, final int valueSize, int families, int 
columns) {
-2267BigDecimal rowSize = 
BigDecimal.valueOf(ROW_LENGTH +
-2268  ((valueSize + 
(FAMILY_NAME_BASE.length()+1) + COLUMN_ZERO.length) * columns) * families);
-2269BigDecimal mbps = 
BigDecimal.valueOf(rows).multiply(rowSize, CXT)
-2270  
.divide(BigDecimal.valueOf(timeMs), CXT).multiply(MS_PER_SEC, CXT)
-2271  .divide(BYTES_PER_MB, CXT);
-2272return FMT.format(mbps) + " MB/s";
-2273  }
-2274
-2275  /*
-2276   * Format passed integer.
-2277   * @param number
-2278   * @return Returns zero-prefixed 
ROW_LENGTH-byte wide decimal version of passed
-2279   * number (Does absolute in case 
number is negative).
-2280   */
-2281  public static byte [] format(final int 
number) {
-2282byte [] b = new byte[ROW_LENGTH];
-2283int d = Math.abs(number);
-2284for (int i = b.length - 1; i = 
0; i--) {
-2285  b[i] = (byte)((d % 10) + '0');
-2286  d /= 10;
-2287}
-2288return b;
-2289  }
-2290
-2291  /*
-2292   * This method takes some time and is 
done inline uploading data.  For
-2293   * example, doing the mapfile test, 
generation of the key and value
-2294   * consumes about 30% of CPU time.
-2295   * @return Generated random value to 
insert into a table cell.
-2296   */
-2297  public static byte[] 
generateData(final Random r, int length) {
-2298byte [] b = new byte [length];
-2299int i;
-2300
-2301for(i = 0; i  (length-8); i += 
8) {
-2302  b[i] = (byte) (65 + 
r.nextInt(26));
-2303  b[i+1] = b[i];
-2304  b[i+2] = b[i];
-2305  b[i+3] = b[i];
-2306  b[i+4] = b[i];
-2307  b[i+5] = b[i];
-2308  b[i+6] = b[i];
-2309  b[i+7] = b[i];
-2310}
-2311
-2312byte a = (byte) (65 + 
r.nextInt(26));
-2313for(; i  length; i++) {
-2314  b[i] = a;
-2315}
-2316return b;
-2317  }
-2318
-2319  static byte [] getRandomRow(final 
Random random, final int totalRows) {
-2320return 
format(generateRandomRow(random, totalRows));
+2215  if (opts.perClientRunRows == 
DEFAULT_ROWS_PER_GB) {
+2216LOG.warn("Option \"rows\" 
unspecified. Using default value " + DEFAULT_ROWS_PER_GB +
+2217". This could take a 

[07/43] hbase-site git commit: Published site at 993e74b7471a7fe483eae59ab198bb2533c55031.

2018-08-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.Status.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.Status.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.Status.html
index e38835a..e11b644 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.Status.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.Status.html
@@ -2220,698 +2220,702 @@
 2212
 2213FilteredScanTest(Connection con, 
TestOptions options, Status status) {
 2214  super(con, options, status);
-2215}
-2216
-2217@Override
-2218boolean testRow(int i) throws 
IOException {
-2219  byte[] value = 
generateData(this.rand, getValueLength(this.rand));
-2220  Scan scan = 
constructScan(value);
-2221  ResultScanner scanner = null;
-  try {
-2223scanner = 
this.table.getScanner(scan);
-2224for (Result r = null; (r = 
scanner.next()) != null;) {
-2225  updateValueSize(r);
-2226}
-2227  } finally {
-2228if (scanner != null) {
-2229  
updateScanMetrics(scanner.getScanMetrics());
-2230  scanner.close();
-2231}
-2232  }
-2233  return true;
-2234}
-2235
-2236protected Scan constructScan(byte[] 
valuePrefix) throws IOException {
-2237  FilterList list = new 
FilterList();
-2238  Filter filter = new 
SingleColumnValueFilter(FAMILY_ZERO, COLUMN_ZERO,
-2239CompareOperator.EQUAL, new 
BinaryComparator(valuePrefix));
-2240  list.addFilter(filter);
-2241  if (opts.filterAll) {
-2242list.addFilter(new 
FilterAllFilter());
-2243  }
-2244  Scan scan = new 
Scan().setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks)
-2245  
.setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType)
-2246  
.setScanMetricsEnabled(true);
-2247  if (opts.addColumns) {
-2248for (int column = 0; column  
opts.columns; column++) {
-2249  byte [] qualifier = column == 
0? COLUMN_ZERO: Bytes.toBytes("" + column);
-2250  scan.addColumn(FAMILY_ZERO, 
qualifier);
-2251}
-2252  } else {
-2253scan.addFamily(FAMILY_ZERO);
-2254  }
-2255  scan.setFilter(list);
-2256  return scan;
-2257}
-2258  }
-2259
-2260  /**
-2261   * Compute a throughput rate in 
MB/s.
-2262   * @param rows Number of records 
consumed.
-2263   * @param timeMs Time taken in 
milliseconds.
-2264   * @return String value with label, ie 
'123.76 MB/s'
-2265   */
-2266  private static String 
calculateMbps(int rows, long timeMs, final int valueSize, int families, int 
columns) {
-2267BigDecimal rowSize = 
BigDecimal.valueOf(ROW_LENGTH +
-2268  ((valueSize + 
(FAMILY_NAME_BASE.length()+1) + COLUMN_ZERO.length) * columns) * families);
-2269BigDecimal mbps = 
BigDecimal.valueOf(rows).multiply(rowSize, CXT)
-2270  
.divide(BigDecimal.valueOf(timeMs), CXT).multiply(MS_PER_SEC, CXT)
-2271  .divide(BYTES_PER_MB, CXT);
-2272return FMT.format(mbps) + " MB/s";
-2273  }
-2274
-2275  /*
-2276   * Format passed integer.
-2277   * @param number
-2278   * @return Returns zero-prefixed 
ROW_LENGTH-byte wide decimal version of passed
-2279   * number (Does absolute in case 
number is negative).
-2280   */
-2281  public static byte [] format(final int 
number) {
-2282byte [] b = new byte[ROW_LENGTH];
-2283int d = Math.abs(number);
-2284for (int i = b.length - 1; i = 
0; i--) {
-2285  b[i] = (byte)((d % 10) + '0');
-2286  d /= 10;
-2287}
-2288return b;
-2289  }
-2290
-2291  /*
-2292   * This method takes some time and is 
done inline uploading data.  For
-2293   * example, doing the mapfile test, 
generation of the key and value
-2294   * consumes about 30% of CPU time.
-2295   * @return Generated random value to 
insert into a table cell.
-2296   */
-2297  public static byte[] 
generateData(final Random r, int length) {
-2298byte [] b = new byte [length];
-2299int i;
-2300
-2301for(i = 0; i  (length-8); i += 
8) {
-2302  b[i] = (byte) (65 + 
r.nextInt(26));
-2303  b[i+1] = b[i];
-2304  b[i+2] = b[i];
-2305  b[i+3] = b[i];
-2306  b[i+4] = b[i];
-2307  b[i+5] = b[i];
-2308  b[i+6] = b[i];
-2309  b[i+7] = b[i];
-2310}
-2311
-2312byte a = (byte) (65 + 
r.nextInt(26));
-2313for(; i  length; i++) {
-2314  b[i] = a;
-2315}
-2316return b;
-2317  }
-2318
-2319  static byte [] getRandomRow(final 
Random random, final int totalRows) {
-2320return 
format(generateRandomRow(random, totalRows));
+2215  if (opts.perClientRunRows == 
DEFAULT_ROWS_PER_GB) {
+2216LOG.warn("Option \"rows\" 
unspecified. Using default value " + DEFAULT_ROWS_PER_GB +
+2217". This could take a 
very 

[41/43] hbase-site git commit: Published site at 993e74b7471a7fe483eae59ab198bb2533c55031.

2018-08-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/book.html
--
diff --git a/book.html b/book.html
index f70154f..d42b6e0 100644
--- a/book.html
+++ b/book.html
@@ -41151,7 +41151,7 @@ 
org/apache/hadoop/hbase/security/access/AccessControlClient.revoke:(Lorg/apache/
 
 
 Version 3.0.0-SNAPSHOT
-Last updated 2018-08-26 14:29:57 UTC
+Last updated 2018-08-27 14:29:53 UTC
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/bulk-loads.html
--
diff --git a/bulk-loads.html b/bulk-loads.html
index 2286244..33fa69f 100644
--- a/bulk-loads.html
+++ b/bulk-loads.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase   
   Bulk Loads in Apache HBase (TM)
@@ -306,7 +306,7 @@ under the License. -->
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-08-26
+  Last Published: 
2018-08-27
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index 7a01cb6..4d75f1f 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Checkstyle Results
 
@@ -9828,12 +9828,12 @@
 http://checkstyle.sourceforge.net/config_javadoc.html#JavadocTagContinuationIndentation;>JavadocTagContinuationIndentation
 
 offset: 2
-763
+764
 Error
 
 
 http://checkstyle.sourceforge.net/config_javadoc.html#NonEmptyAtclauseDescription;>NonEmptyAtclauseDescription
-3606
+3605
 Error
 
 misc
@@ -16882,73 +16882,73 @@
 coding
 InnerAssignment
 Inner assignments should be avoided.
-2224
+2228
 
 Error
 sizes
 LineLength
 Line is longer than 100 characters (found 110).
-2266
+2270
 
 Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-2381
+2385
 
 Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-2382
+2386
 
 Error
 sizes
 LineLength
 Line is longer than 100 characters (found 101).
-2414
+2418
 
 Error
 sizes
 LineLength
 Line is longer than 100 characters (found 103).
-2467
+2471
 
 Error
 sizes
 LineLength
 Line is longer than 100 characters (found 106).
-2481
+2485
 
 Error
 sizes
 LineLength
 Line is longer than 100 characters (found 118).
-2485
+2489
 
 Error
 sizes
 LineLength
 Line is longer than 100 characters (found 116).
-2489
+2493
 
 Error
 sizes
 MethodLength
 Method length is 305 lines (max allowed is 150).
-2518
+2522
 
 Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-2684
+2688
 
 Error
 whitespace
 ParenPad
 ')' is preceded with whitespace.
-2713
+2717
 
 org/apache/hadoop/hbase/PerformanceEvaluationCommons.java
 
@@ -19469,7 +19469,7 @@
 
 Error
 javadoc
-NonEmptyAtclauseDescription
+JavadocTagContinuationIndentation
 Javadoc comment at column 0 has parse error. Details: no viable 
alternative at input '   *' while parsing JAVADOC_TAG
 117
 
@@ -28366,13 +28366,13 @@
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
-599
+598
 
 Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
-622
+621
 
 org/apache/hadoop/hbase/client/TableDescriptor.java
 
@@ -119111,7 +119111,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-08-26
+  Last Published: 
2018-08-27
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/coc.html
--
diff --git a/coc.html b/coc.html
index 1e138c8..f2ceafe 100644
--- a/coc.html
+++ b/coc.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  
   Code of Conduct Policy
@@ -375,7 +375,7 @@ email to mailto:priv...@hbase.apache.org;>the priv
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-08-26
+  Last Published: 
2018-08-27
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/dependencies.html
--
diff --git a/dependencies.html b/dependencies.html
index 5bf4f44..bfde090 100644
--- a/dependencies.html
+++ b/dependencies.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Dependencies
 
@@ -440,7 +440,7 @@
 

[06/43] hbase-site git commit: Published site at 993e74b7471a7fe483eae59ab198bb2533c55031.

2018-08-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.TableTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.TableTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.TableTest.html
index e38835a..e11b644 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.TableTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.TableTest.html
@@ -2220,698 +2220,702 @@
 2212
 2213FilteredScanTest(Connection con, 
TestOptions options, Status status) {
 2214  super(con, options, status);
-2215}
-2216
-2217@Override
-2218boolean testRow(int i) throws 
IOException {
-2219  byte[] value = 
generateData(this.rand, getValueLength(this.rand));
-2220  Scan scan = 
constructScan(value);
-2221  ResultScanner scanner = null;
-  try {
-2223scanner = 
this.table.getScanner(scan);
-2224for (Result r = null; (r = 
scanner.next()) != null;) {
-2225  updateValueSize(r);
-2226}
-2227  } finally {
-2228if (scanner != null) {
-2229  
updateScanMetrics(scanner.getScanMetrics());
-2230  scanner.close();
-2231}
-2232  }
-2233  return true;
-2234}
-2235
-2236protected Scan constructScan(byte[] 
valuePrefix) throws IOException {
-2237  FilterList list = new 
FilterList();
-2238  Filter filter = new 
SingleColumnValueFilter(FAMILY_ZERO, COLUMN_ZERO,
-2239CompareOperator.EQUAL, new 
BinaryComparator(valuePrefix));
-2240  list.addFilter(filter);
-2241  if (opts.filterAll) {
-2242list.addFilter(new 
FilterAllFilter());
-2243  }
-2244  Scan scan = new 
Scan().setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks)
-2245  
.setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType)
-2246  
.setScanMetricsEnabled(true);
-2247  if (opts.addColumns) {
-2248for (int column = 0; column  
opts.columns; column++) {
-2249  byte [] qualifier = column == 
0? COLUMN_ZERO: Bytes.toBytes("" + column);
-2250  scan.addColumn(FAMILY_ZERO, 
qualifier);
-2251}
-2252  } else {
-2253scan.addFamily(FAMILY_ZERO);
-2254  }
-2255  scan.setFilter(list);
-2256  return scan;
-2257}
-2258  }
-2259
-2260  /**
-2261   * Compute a throughput rate in 
MB/s.
-2262   * @param rows Number of records 
consumed.
-2263   * @param timeMs Time taken in 
milliseconds.
-2264   * @return String value with label, ie 
'123.76 MB/s'
-2265   */
-2266  private static String 
calculateMbps(int rows, long timeMs, final int valueSize, int families, int 
columns) {
-2267BigDecimal rowSize = 
BigDecimal.valueOf(ROW_LENGTH +
-2268  ((valueSize + 
(FAMILY_NAME_BASE.length()+1) + COLUMN_ZERO.length) * columns) * families);
-2269BigDecimal mbps = 
BigDecimal.valueOf(rows).multiply(rowSize, CXT)
-2270  
.divide(BigDecimal.valueOf(timeMs), CXT).multiply(MS_PER_SEC, CXT)
-2271  .divide(BYTES_PER_MB, CXT);
-2272return FMT.format(mbps) + " MB/s";
-2273  }
-2274
-2275  /*
-2276   * Format passed integer.
-2277   * @param number
-2278   * @return Returns zero-prefixed 
ROW_LENGTH-byte wide decimal version of passed
-2279   * number (Does absolute in case 
number is negative).
-2280   */
-2281  public static byte [] format(final int 
number) {
-2282byte [] b = new byte[ROW_LENGTH];
-2283int d = Math.abs(number);
-2284for (int i = b.length - 1; i = 
0; i--) {
-2285  b[i] = (byte)((d % 10) + '0');
-2286  d /= 10;
-2287}
-2288return b;
-2289  }
-2290
-2291  /*
-2292   * This method takes some time and is 
done inline uploading data.  For
-2293   * example, doing the mapfile test, 
generation of the key and value
-2294   * consumes about 30% of CPU time.
-2295   * @return Generated random value to 
insert into a table cell.
-2296   */
-2297  public static byte[] 
generateData(final Random r, int length) {
-2298byte [] b = new byte [length];
-2299int i;
-2300
-2301for(i = 0; i  (length-8); i += 
8) {
-2302  b[i] = (byte) (65 + 
r.nextInt(26));
-2303  b[i+1] = b[i];
-2304  b[i+2] = b[i];
-2305  b[i+3] = b[i];
-2306  b[i+4] = b[i];
-2307  b[i+5] = b[i];
-2308  b[i+6] = b[i];
-2309  b[i+7] = b[i];
-2310}
-2311
-2312byte a = (byte) (65 + 
r.nextInt(26));
-2313for(; i  length; i++) {
-2314  b[i] = a;
-2315}
-2316return b;
-2317  }
-2318
-2319  static byte [] getRandomRow(final 
Random random, final int totalRows) {
-2320return 
format(generateRandomRow(random, totalRows));
+2215  if (opts.perClientRunRows == 
DEFAULT_ROWS_PER_GB) {
+2216LOG.warn("Option \"rows\" 
unspecified. Using default value " + DEFAULT_ROWS_PER_GB +
+2217". This could 

[27/43] hbase-site git commit: Published site at 993e74b7471a7fe483eae59ab198bb2533c55031.

2018-08-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndDeleteTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndDeleteTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndDeleteTest.html
index e38835a..e11b644 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndDeleteTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndDeleteTest.html
@@ -2220,698 +2220,702 @@
 2212
 2213FilteredScanTest(Connection con, 
TestOptions options, Status status) {
 2214  super(con, options, status);
-2215}
-2216
-2217@Override
-2218boolean testRow(int i) throws 
IOException {
-2219  byte[] value = 
generateData(this.rand, getValueLength(this.rand));
-2220  Scan scan = 
constructScan(value);
-2221  ResultScanner scanner = null;
-  try {
-2223scanner = 
this.table.getScanner(scan);
-2224for (Result r = null; (r = 
scanner.next()) != null;) {
-2225  updateValueSize(r);
-2226}
-2227  } finally {
-2228if (scanner != null) {
-2229  
updateScanMetrics(scanner.getScanMetrics());
-2230  scanner.close();
-2231}
-2232  }
-2233  return true;
-2234}
-2235
-2236protected Scan constructScan(byte[] 
valuePrefix) throws IOException {
-2237  FilterList list = new 
FilterList();
-2238  Filter filter = new 
SingleColumnValueFilter(FAMILY_ZERO, COLUMN_ZERO,
-2239CompareOperator.EQUAL, new 
BinaryComparator(valuePrefix));
-2240  list.addFilter(filter);
-2241  if (opts.filterAll) {
-2242list.addFilter(new 
FilterAllFilter());
-2243  }
-2244  Scan scan = new 
Scan().setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks)
-2245  
.setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType)
-2246  
.setScanMetricsEnabled(true);
-2247  if (opts.addColumns) {
-2248for (int column = 0; column  
opts.columns; column++) {
-2249  byte [] qualifier = column == 
0? COLUMN_ZERO: Bytes.toBytes("" + column);
-2250  scan.addColumn(FAMILY_ZERO, 
qualifier);
-2251}
-2252  } else {
-2253scan.addFamily(FAMILY_ZERO);
-2254  }
-2255  scan.setFilter(list);
-2256  return scan;
-2257}
-2258  }
-2259
-2260  /**
-2261   * Compute a throughput rate in 
MB/s.
-2262   * @param rows Number of records 
consumed.
-2263   * @param timeMs Time taken in 
milliseconds.
-2264   * @return String value with label, ie 
'123.76 MB/s'
-2265   */
-2266  private static String 
calculateMbps(int rows, long timeMs, final int valueSize, int families, int 
columns) {
-2267BigDecimal rowSize = 
BigDecimal.valueOf(ROW_LENGTH +
-2268  ((valueSize + 
(FAMILY_NAME_BASE.length()+1) + COLUMN_ZERO.length) * columns) * families);
-2269BigDecimal mbps = 
BigDecimal.valueOf(rows).multiply(rowSize, CXT)
-2270  
.divide(BigDecimal.valueOf(timeMs), CXT).multiply(MS_PER_SEC, CXT)
-2271  .divide(BYTES_PER_MB, CXT);
-2272return FMT.format(mbps) + " MB/s";
-2273  }
-2274
-2275  /*
-2276   * Format passed integer.
-2277   * @param number
-2278   * @return Returns zero-prefixed 
ROW_LENGTH-byte wide decimal version of passed
-2279   * number (Does absolute in case 
number is negative).
-2280   */
-2281  public static byte [] format(final int 
number) {
-2282byte [] b = new byte[ROW_LENGTH];
-2283int d = Math.abs(number);
-2284for (int i = b.length - 1; i = 
0; i--) {
-2285  b[i] = (byte)((d % 10) + '0');
-2286  d /= 10;
-2287}
-2288return b;
-2289  }
-2290
-2291  /*
-2292   * This method takes some time and is 
done inline uploading data.  For
-2293   * example, doing the mapfile test, 
generation of the key and value
-2294   * consumes about 30% of CPU time.
-2295   * @return Generated random value to 
insert into a table cell.
-2296   */
-2297  public static byte[] 
generateData(final Random r, int length) {
-2298byte [] b = new byte [length];
-2299int i;
-2300
-2301for(i = 0; i  (length-8); i += 
8) {
-2302  b[i] = (byte) (65 + 
r.nextInt(26));
-2303  b[i+1] = b[i];
-2304  b[i+2] = b[i];
-2305  b[i+3] = b[i];
-2306  b[i+4] = b[i];
-2307  b[i+5] = b[i];
-2308  b[i+6] = b[i];
-2309  b[i+7] = b[i];
-2310}
-2311
-2312byte a = (byte) (65 + 
r.nextInt(26));
-2313for(; i  length; i++) {
-2314  b[i] = a;
-2315}
-2316return b;
-2317  }
-2318
-2319  static byte [] getRandomRow(final 
Random random, final int totalRows) {
-2320return 
format(generateRandomRow(random, totalRows));
+2215  if (opts.perClientRunRows == 
DEFAULT_ROWS_PER_GB) {
+2216LOG.warn("Option \"rows\" 
unspecified. Using default value " + 

[19/43] hbase-site git commit: Published site at 993e74b7471a7fe483eae59ab198bb2533c55031.

2018-08-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomReadTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomReadTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomReadTest.html
index e38835a..e11b644 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomReadTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomReadTest.html
@@ -2220,698 +2220,702 @@
 2212
 2213FilteredScanTest(Connection con, 
TestOptions options, Status status) {
 2214  super(con, options, status);
-2215}
-2216
-2217@Override
-2218boolean testRow(int i) throws 
IOException {
-2219  byte[] value = 
generateData(this.rand, getValueLength(this.rand));
-2220  Scan scan = 
constructScan(value);
-2221  ResultScanner scanner = null;
-  try {
-2223scanner = 
this.table.getScanner(scan);
-2224for (Result r = null; (r = 
scanner.next()) != null;) {
-2225  updateValueSize(r);
-2226}
-2227  } finally {
-2228if (scanner != null) {
-2229  
updateScanMetrics(scanner.getScanMetrics());
-2230  scanner.close();
-2231}
-2232  }
-2233  return true;
-2234}
-2235
-2236protected Scan constructScan(byte[] 
valuePrefix) throws IOException {
-2237  FilterList list = new 
FilterList();
-2238  Filter filter = new 
SingleColumnValueFilter(FAMILY_ZERO, COLUMN_ZERO,
-2239CompareOperator.EQUAL, new 
BinaryComparator(valuePrefix));
-2240  list.addFilter(filter);
-2241  if (opts.filterAll) {
-2242list.addFilter(new 
FilterAllFilter());
-2243  }
-2244  Scan scan = new 
Scan().setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks)
-2245  
.setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType)
-2246  
.setScanMetricsEnabled(true);
-2247  if (opts.addColumns) {
-2248for (int column = 0; column  
opts.columns; column++) {
-2249  byte [] qualifier = column == 
0? COLUMN_ZERO: Bytes.toBytes("" + column);
-2250  scan.addColumn(FAMILY_ZERO, 
qualifier);
-2251}
-2252  } else {
-2253scan.addFamily(FAMILY_ZERO);
-2254  }
-2255  scan.setFilter(list);
-2256  return scan;
-2257}
-2258  }
-2259
-2260  /**
-2261   * Compute a throughput rate in 
MB/s.
-2262   * @param rows Number of records 
consumed.
-2263   * @param timeMs Time taken in 
milliseconds.
-2264   * @return String value with label, ie 
'123.76 MB/s'
-2265   */
-2266  private static String 
calculateMbps(int rows, long timeMs, final int valueSize, int families, int 
columns) {
-2267BigDecimal rowSize = 
BigDecimal.valueOf(ROW_LENGTH +
-2268  ((valueSize + 
(FAMILY_NAME_BASE.length()+1) + COLUMN_ZERO.length) * columns) * families);
-2269BigDecimal mbps = 
BigDecimal.valueOf(rows).multiply(rowSize, CXT)
-2270  
.divide(BigDecimal.valueOf(timeMs), CXT).multiply(MS_PER_SEC, CXT)
-2271  .divide(BYTES_PER_MB, CXT);
-2272return FMT.format(mbps) + " MB/s";
-2273  }
-2274
-2275  /*
-2276   * Format passed integer.
-2277   * @param number
-2278   * @return Returns zero-prefixed 
ROW_LENGTH-byte wide decimal version of passed
-2279   * number (Does absolute in case 
number is negative).
-2280   */
-2281  public static byte [] format(final int 
number) {
-2282byte [] b = new byte[ROW_LENGTH];
-2283int d = Math.abs(number);
-2284for (int i = b.length - 1; i = 
0; i--) {
-2285  b[i] = (byte)((d % 10) + '0');
-2286  d /= 10;
-2287}
-2288return b;
-2289  }
-2290
-2291  /*
-2292   * This method takes some time and is 
done inline uploading data.  For
-2293   * example, doing the mapfile test, 
generation of the key and value
-2294   * consumes about 30% of CPU time.
-2295   * @return Generated random value to 
insert into a table cell.
-2296   */
-2297  public static byte[] 
generateData(final Random r, int length) {
-2298byte [] b = new byte [length];
-2299int i;
-2300
-2301for(i = 0; i  (length-8); i += 
8) {
-2302  b[i] = (byte) (65 + 
r.nextInt(26));
-2303  b[i+1] = b[i];
-2304  b[i+2] = b[i];
-2305  b[i+3] = b[i];
-2306  b[i+4] = b[i];
-2307  b[i+5] = b[i];
-2308  b[i+6] = b[i];
-2309  b[i+7] = b[i];
-2310}
-2311
-2312byte a = (byte) (65 + 
r.nextInt(26));
-2313for(; i  length; i++) {
-2314  b[i] = a;
-2315}
-2316return b;
-2317  }
-2318
-2319  static byte [] getRandomRow(final 
Random random, final int totalRows) {
-2320return 
format(generateRandomRow(random, totalRows));
+2215  if (opts.perClientRunRows == 
DEFAULT_ROWS_PER_GB) {
+2216LOG.warn("Option \"rows\" 
unspecified. Using default value " + DEFAULT_ROWS_PER_GB +
+2217  

[38/43] hbase-site git commit: Published site at 993e74b7471a7fe483eae59ab198bb2533c55031.

2018-08-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/downloads.html
--
diff --git a/downloads.html b/downloads.html
index 9714fd8..a8552e1 100644
--- a/downloads.html
+++ b/downloads.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Apache HBase Downloads
 
@@ -423,7 +423,7 @@ under the License. -->
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-08-26
+  Last Published: 
2018-08-27
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/export_control.html
--
diff --git a/export_control.html b/export_control.html
index ded334e..793f8ef 100644
--- a/export_control.html
+++ b/export_control.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  
   Export Control
@@ -331,7 +331,7 @@ for more details.
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-08-26
+  Last Published: 
2018-08-27
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/index.html
--
diff --git a/index.html b/index.html
index 7f2f6d7..1bf2722 100644
--- a/index.html
+++ b/index.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Apache HBase™ Home
 
@@ -411,7 +411,7 @@ Apache HBase is an open-source, distributed, versioned, 
non-relational database
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-08-26
+  Last Published: 
2018-08-27
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/integration.html
--
diff --git a/integration.html b/integration.html
index 3534cc4..08000a6 100644
--- a/integration.html
+++ b/integration.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  CI Management
 
@@ -291,7 +291,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-08-26
+  Last Published: 
2018-08-27
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/issue-tracking.html
--
diff --git a/issue-tracking.html b/issue-tracking.html
index 6dfbd32..62b64ee 100644
--- a/issue-tracking.html
+++ b/issue-tracking.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Issue Management
 
@@ -288,7 +288,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-08-26
+  Last Published: 
2018-08-27
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/license.html
--
diff --git a/license.html b/license.html
index cd7f1fe..7a4f11f 100644
--- a/license.html
+++ b/license.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Licenses
 
@@ -491,7 +491,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-08-26
+  Last Published: 
2018-08-27
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/mail-lists.html
--
diff --git a/mail-lists.html b/mail-lists.html
index 04b6cbe..a825b70 100644
--- a/mail-lists.html
+++ b/mail-lists.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Mailing Lists
 
@@ -341,7 +341,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-08-26
+  Last Published: 
2018-08-27
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/metrics.html
--
diff --git a/metrics.html b/metrics.html
index fe1de63..6a00fab 100644
--- a/metrics.html
+++ b/metrics.html

[25/43] hbase-site git commit: Published site at 993e74b7471a7fe483eae59ab198bb2533c55031.

2018-08-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndPutTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndPutTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndPutTest.html
index e38835a..e11b644 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndPutTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndPutTest.html
@@ -2220,698 +2220,702 @@
 2212
 2213FilteredScanTest(Connection con, 
TestOptions options, Status status) {
 2214  super(con, options, status);
-2215}
-2216
-2217@Override
-2218boolean testRow(int i) throws 
IOException {
-2219  byte[] value = 
generateData(this.rand, getValueLength(this.rand));
-2220  Scan scan = 
constructScan(value);
-2221  ResultScanner scanner = null;
-  try {
-2223scanner = 
this.table.getScanner(scan);
-2224for (Result r = null; (r = 
scanner.next()) != null;) {
-2225  updateValueSize(r);
-2226}
-2227  } finally {
-2228if (scanner != null) {
-2229  
updateScanMetrics(scanner.getScanMetrics());
-2230  scanner.close();
-2231}
-2232  }
-2233  return true;
-2234}
-2235
-2236protected Scan constructScan(byte[] 
valuePrefix) throws IOException {
-2237  FilterList list = new 
FilterList();
-2238  Filter filter = new 
SingleColumnValueFilter(FAMILY_ZERO, COLUMN_ZERO,
-2239CompareOperator.EQUAL, new 
BinaryComparator(valuePrefix));
-2240  list.addFilter(filter);
-2241  if (opts.filterAll) {
-2242list.addFilter(new 
FilterAllFilter());
-2243  }
-2244  Scan scan = new 
Scan().setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks)
-2245  
.setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType)
-2246  
.setScanMetricsEnabled(true);
-2247  if (opts.addColumns) {
-2248for (int column = 0; column  
opts.columns; column++) {
-2249  byte [] qualifier = column == 
0? COLUMN_ZERO: Bytes.toBytes("" + column);
-2250  scan.addColumn(FAMILY_ZERO, 
qualifier);
-2251}
-2252  } else {
-2253scan.addFamily(FAMILY_ZERO);
-2254  }
-2255  scan.setFilter(list);
-2256  return scan;
-2257}
-2258  }
-2259
-2260  /**
-2261   * Compute a throughput rate in 
MB/s.
-2262   * @param rows Number of records 
consumed.
-2263   * @param timeMs Time taken in 
milliseconds.
-2264   * @return String value with label, ie 
'123.76 MB/s'
-2265   */
-2266  private static String 
calculateMbps(int rows, long timeMs, final int valueSize, int families, int 
columns) {
-2267BigDecimal rowSize = 
BigDecimal.valueOf(ROW_LENGTH +
-2268  ((valueSize + 
(FAMILY_NAME_BASE.length()+1) + COLUMN_ZERO.length) * columns) * families);
-2269BigDecimal mbps = 
BigDecimal.valueOf(rows).multiply(rowSize, CXT)
-2270  
.divide(BigDecimal.valueOf(timeMs), CXT).multiply(MS_PER_SEC, CXT)
-2271  .divide(BYTES_PER_MB, CXT);
-2272return FMT.format(mbps) + " MB/s";
-2273  }
-2274
-2275  /*
-2276   * Format passed integer.
-2277   * @param number
-2278   * @return Returns zero-prefixed 
ROW_LENGTH-byte wide decimal version of passed
-2279   * number (Does absolute in case 
number is negative).
-2280   */
-2281  public static byte [] format(final int 
number) {
-2282byte [] b = new byte[ROW_LENGTH];
-2283int d = Math.abs(number);
-2284for (int i = b.length - 1; i = 
0; i--) {
-2285  b[i] = (byte)((d % 10) + '0');
-2286  d /= 10;
-2287}
-2288return b;
-2289  }
-2290
-2291  /*
-2292   * This method takes some time and is 
done inline uploading data.  For
-2293   * example, doing the mapfile test, 
generation of the key and value
-2294   * consumes about 30% of CPU time.
-2295   * @return Generated random value to 
insert into a table cell.
-2296   */
-2297  public static byte[] 
generateData(final Random r, int length) {
-2298byte [] b = new byte [length];
-2299int i;
-2300
-2301for(i = 0; i  (length-8); i += 
8) {
-2302  b[i] = (byte) (65 + 
r.nextInt(26));
-2303  b[i+1] = b[i];
-2304  b[i+2] = b[i];
-2305  b[i+3] = b[i];
-2306  b[i+4] = b[i];
-2307  b[i+5] = b[i];
-2308  b[i+6] = b[i];
-2309  b[i+7] = b[i];
-2310}
-2311
-2312byte a = (byte) (65 + 
r.nextInt(26));
-2313for(; i  length; i++) {
-2314  b[i] = a;
-2315}
-2316return b;
-2317  }
-2318
-2319  static byte [] getRandomRow(final 
Random random, final int totalRows) {
-2320return 
format(generateRandomRow(random, totalRows));
+2215  if (opts.perClientRunRows == 
DEFAULT_ROWS_PER_GB) {
+2216LOG.warn("Option \"rows\" 
unspecified. Using default value " + DEFAULT_ROWS_PER_GB +
+2217 

[39/43] hbase-site git commit: Published site at 993e74b7471a7fe483eae59ab198bb2533c55031.

2018-08-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/devapidocs/src-html/org/apache/hadoop/hbase/client/Table.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/Table.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/Table.html
index 192f920..fbdec56 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/Table.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/Table.html
@@ -571,426 +571,425 @@
 563   * under a single row lock, so write 
operations to a row are synchronized, and
 564   * readers are guaranteed to see this 
operation fully completed.
 565   *
-566   * @param append object that specifies 
the columns and amounts to be used
-567   *  for the increment 
operations
-568   * @throws IOException e
-569   * @return values of columns after the 
append operation (maybe null)
-570   */
-571  default Result append(final Append 
append) throws IOException {
-572throw new 
NotImplementedException("Add an implementation!");
-573  }
-574
-575  /**
-576   * Increments one or more columns 
within a single row.
-577   * p
-578   * This operation ensures atomicity to 
readers. Increments are done
-579   * under a single row lock, so write 
operations to a row are synchronized, and
-580   * readers are guaranteed to see this 
operation fully completed.
-581   *
-582   * @param increment object that 
specifies the columns and amounts to be used
-583   *  for the increment 
operations
-584   * @throws IOException e
-585   * @return values of columns after the 
increment
-586   */
-587  default Result increment(final 
Increment increment) throws IOException {
-588throw new 
NotImplementedException("Add an implementation!");
-589  }
-590
-591  /**
-592   * See {@link 
#incrementColumnValue(byte[], byte[], byte[], long, Durability)}
-593   * p
-594   * The {@link Durability} is defaulted 
to {@link Durability#SYNC_WAL}.
-595   * @param row The row that contains the 
cell to increment.
-596   * @param family The column family of 
the cell to increment.
-597   * @param qualifier The column 
qualifier of the cell to increment.
-598   * @param amount The amount to 
increment the cell with (or decrement, if the
-599   * amount is negative).
-600   * @return The new value, post 
increment.
-601   * @throws IOException if a remote or 
network exception occurs.
-602   */
-603  default long 
incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long 
amount)
-604  throws IOException {
-605Increment increment = new 
Increment(row).addColumn(family, qualifier, amount);
-606Cell cell = 
increment(increment).getColumnLatestCell(family, qualifier);
-607return 
Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), 
cell.getValueLength());
-608  }
-609
-610  /**
-611   * Atomically increments a column 
value. If the column value already exists
-612   * and is not a big-endian long, this 
could throw an exception. If the column
-613   * value does not yet exist it is 
initialized to codeamount/code and
-614   * written to the specified column.
-615   *
-616   * pSetting durability to 
{@link Durability#SKIP_WAL} means that in a fail
-617   * scenario you will lose any 
increments that have not been flushed.
-618   * @param row The row that contains the 
cell to increment.
-619   * @param family The column family of 
the cell to increment.
-620   * @param qualifier The column 
qualifier of the cell to increment.
-621   * @param amount The amount to 
increment the cell with (or decrement, if the
-622   * amount is negative).
-623   * @param durability The persistence 
guarantee for this increment.
-624   * @return The new value, post 
increment.
-625   * @throws IOException if a remote or 
network exception occurs.
-626   */
-627  default long 
incrementColumnValue(byte[] row, byte[] family, byte[] qualifier,
-628long amount, Durability durability) 
throws IOException {
-629Increment increment = new 
Increment(row)
-630.addColumn(family, qualifier, 
amount)
-631.setDurability(durability);
-632Cell cell = 
increment(increment).getColumnLatestCell(family, qualifier);
-633return 
Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), 
cell.getValueLength());
-634  }
-635
-636  /**
-637   * Releases any resources held or 
pending changes in internal buffers.
-638   *
-639   * @throws IOException if a remote or 
network exception occurs.
-640   */
-641  @Override
-642  default void close() throws IOException 
{
-643throw new 
NotImplementedException("Add an implementation!");
-644  }
-645
-646  /**
-647   * Creates and returns a {@link 
com.google.protobuf.RpcChannel} instance connected to the
-648   * table region containing the 
specified row.  The row given does not actually have
-649   * to exist.  Whichever region would 
contain the row based on start and end keys will
-650   * be used.  Note that 

[24/43] hbase-site git commit: Published site at 993e74b7471a7fe483eae59ab198bb2533c55031.

2018-08-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CmdDescriptor.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CmdDescriptor.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CmdDescriptor.html
index e38835a..e11b644 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CmdDescriptor.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CmdDescriptor.html
@@ -2220,698 +2220,702 @@
 2212
 2213FilteredScanTest(Connection con, 
TestOptions options, Status status) {
 2214  super(con, options, status);
-2215}
-2216
-2217@Override
-2218boolean testRow(int i) throws 
IOException {
-2219  byte[] value = 
generateData(this.rand, getValueLength(this.rand));
-2220  Scan scan = 
constructScan(value);
-2221  ResultScanner scanner = null;
-  try {
-2223scanner = 
this.table.getScanner(scan);
-2224for (Result r = null; (r = 
scanner.next()) != null;) {
-2225  updateValueSize(r);
-2226}
-2227  } finally {
-2228if (scanner != null) {
-2229  
updateScanMetrics(scanner.getScanMetrics());
-2230  scanner.close();
-2231}
-2232  }
-2233  return true;
-2234}
-2235
-2236protected Scan constructScan(byte[] 
valuePrefix) throws IOException {
-2237  FilterList list = new 
FilterList();
-2238  Filter filter = new 
SingleColumnValueFilter(FAMILY_ZERO, COLUMN_ZERO,
-2239CompareOperator.EQUAL, new 
BinaryComparator(valuePrefix));
-2240  list.addFilter(filter);
-2241  if (opts.filterAll) {
-2242list.addFilter(new 
FilterAllFilter());
-2243  }
-2244  Scan scan = new 
Scan().setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks)
-2245  
.setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType)
-2246  
.setScanMetricsEnabled(true);
-2247  if (opts.addColumns) {
-2248for (int column = 0; column  
opts.columns; column++) {
-2249  byte [] qualifier = column == 
0? COLUMN_ZERO: Bytes.toBytes("" + column);
-2250  scan.addColumn(FAMILY_ZERO, 
qualifier);
-2251}
-2252  } else {
-2253scan.addFamily(FAMILY_ZERO);
-2254  }
-2255  scan.setFilter(list);
-2256  return scan;
-2257}
-2258  }
-2259
-2260  /**
-2261   * Compute a throughput rate in 
MB/s.
-2262   * @param rows Number of records 
consumed.
-2263   * @param timeMs Time taken in 
milliseconds.
-2264   * @return String value with label, ie 
'123.76 MB/s'
-2265   */
-2266  private static String 
calculateMbps(int rows, long timeMs, final int valueSize, int families, int 
columns) {
-2267BigDecimal rowSize = 
BigDecimal.valueOf(ROW_LENGTH +
-2268  ((valueSize + 
(FAMILY_NAME_BASE.length()+1) + COLUMN_ZERO.length) * columns) * families);
-2269BigDecimal mbps = 
BigDecimal.valueOf(rows).multiply(rowSize, CXT)
-2270  
.divide(BigDecimal.valueOf(timeMs), CXT).multiply(MS_PER_SEC, CXT)
-2271  .divide(BYTES_PER_MB, CXT);
-2272return FMT.format(mbps) + " MB/s";
-2273  }
-2274
-2275  /*
-2276   * Format passed integer.
-2277   * @param number
-2278   * @return Returns zero-prefixed 
ROW_LENGTH-byte wide decimal version of passed
-2279   * number (Does absolute in case 
number is negative).
-2280   */
-2281  public static byte [] format(final int 
number) {
-2282byte [] b = new byte[ROW_LENGTH];
-2283int d = Math.abs(number);
-2284for (int i = b.length - 1; i = 
0; i--) {
-2285  b[i] = (byte)((d % 10) + '0');
-2286  d /= 10;
-2287}
-2288return b;
-2289  }
-2290
-2291  /*
-2292   * This method takes some time and is 
done inline uploading data.  For
-2293   * example, doing the mapfile test, 
generation of the key and value
-2294   * consumes about 30% of CPU time.
-2295   * @return Generated random value to 
insert into a table cell.
-2296   */
-2297  public static byte[] 
generateData(final Random r, int length) {
-2298byte [] b = new byte [length];
-2299int i;
-2300
-2301for(i = 0; i  (length-8); i += 
8) {
-2302  b[i] = (byte) (65 + 
r.nextInt(26));
-2303  b[i+1] = b[i];
-2304  b[i+2] = b[i];
-2305  b[i+3] = b[i];
-2306  b[i+4] = b[i];
-2307  b[i+5] = b[i];
-2308  b[i+6] = b[i];
-2309  b[i+7] = b[i];
-2310}
-2311
-2312byte a = (byte) (65 + 
r.nextInt(26));
-2313for(; i  length; i++) {
-2314  b[i] = a;
-2315}
-2316return b;
-2317  }
-2318
-2319  static byte [] getRandomRow(final 
Random random, final int totalRows) {
-2320return 
format(generateRandomRow(random, totalRows));
+2215  if (opts.perClientRunRows == 
DEFAULT_ROWS_PER_GB) {
+2216LOG.warn("Option \"rows\" 
unspecified. Using default value " + DEFAULT_ROWS_PER_GB +
+2217   

[43/43] hbase-site git commit: Published site at 993e74b7471a7fe483eae59ab198bb2533c55031.

2018-08-27 Thread git-site-role
Published site at 993e74b7471a7fe483eae59ab198bb2533c55031.


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/e312f773
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/e312f773
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/e312f773

Branch: refs/heads/asf-site
Commit: e312f7735bda50a725cefe3cae90478250171808
Parents: be23722
Author: jenkins 
Authored: Mon Aug 27 14:46:55 2018 +
Committer: jenkins 
Committed: Mon Aug 27 14:46:55 2018 +

--
 acid-semantics.html |4 +-
 apache_hbase_reference_guide.pdf|4 +-
 .../org/apache/hadoop/hbase/client/Table.html   |   51 +-
 .../hadoop/hbase/rest/client/RemoteHTable.html  |3 +-
 .../org/apache/hadoop/hbase/client/Table.html   |  839 ++-
 book.html   |2 +-
 bulk-loads.html |4 +-
 checkstyle-aggregate.html   |   38 +-
 coc.html|4 +-
 dependencies.html   |4 +-
 dependency-convergence.html |4 +-
 dependency-info.html|4 +-
 dependency-management.html  |4 +-
 devapidocs/constant-values.html |6 +-
 .../org/apache/hadoop/hbase/client/HTable.html  |3 +-
 .../org/apache/hadoop/hbase/client/Table.html   |   51 +-
 .../hadoop/hbase/rest/client/RemoteHTable.html  |3 +-
 .../org/apache/hadoop/hbase/Version.html|6 +-
 .../client/Table.CheckAndMutateBuilder.html |  839 ++-
 .../org/apache/hadoop/hbase/client/Table.html   |  839 ++-
 downloads.html  |4 +-
 export_control.html |4 +-
 index.html  |4 +-
 integration.html|4 +-
 issue-tracking.html |4 +-
 license.html|4 +-
 mail-lists.html |4 +-
 metrics.html|4 +-
 old_news.html   |4 +-
 plugin-management.html  |4 +-
 plugins.html|4 +-
 poweredbyhbase.html |4 +-
 project-info.html   |4 +-
 project-reports.html|4 +-
 project-summary.html|4 +-
 pseudo-distributed.html |4 +-
 replication.html|4 +-
 resources.html  |4 +-
 source-repository.html  |4 +-
 sponsors.html   |4 +-
 supportingprojects.html |4 +-
 team-list.html  |4 +-
 .../PerformanceEvaluation.FilteredScanTest.html |4 +-
 .../hadoop/hbase/PerformanceEvaluation.html |   38 +-
 .../hbase/util/TestHBaseFsckReplication.html|4 +-
 .../hbase/PerformanceEvaluation.AppendTest.html | 1362 +-
 ...rformanceEvaluation.AsyncRandomReadTest.html | 1362 +-
 ...formanceEvaluation.AsyncRandomWriteTest.html | 1362 +-
 .../PerformanceEvaluation.AsyncScanTest.html| 1362 +-
 ...manceEvaluation.AsyncSequentialReadTest.html | 1362 +-
 ...anceEvaluation.AsyncSequentialWriteTest.html | 1362 +-
 .../PerformanceEvaluation.AsyncTableTest.html   | 1362 +-
 .../hbase/PerformanceEvaluation.AsyncTest.html  | 1362 +-
 ...rformanceEvaluation.BufferedMutatorTest.html | 1362 +-
 .../PerformanceEvaluation.CASTableTest.html | 1362 +-
 ...erformanceEvaluation.CheckAndDeleteTest.html | 1362 +-
 ...erformanceEvaluation.CheckAndMutateTest.html | 1362 +-
 .../PerformanceEvaluation.CheckAndPutTest.html  | 1362 +-
 .../PerformanceEvaluation.CmdDescriptor.html| 1362 +-
 .../hbase/PerformanceEvaluation.Counter.html| 1362 +-
 ...PerformanceEvaluation.EvaluationMapTask.html | 1362 +-
 .../PerformanceEvaluation.FilteredScanTest.html | 1362 +-
 .../PerformanceEvaluation.IncrementTest.html| 1362 +-
 .../PerformanceEvaluation.RandomReadTest.html   | 1362 +-
 ...Evaluation.RandomScanWithRange1Test.html | 1362 +-
 ...eEvaluation.RandomScanWithRange1000Test.html | 1362 +-
 ...ceEvaluation.RandomScanWithRange100Test.html | 1362 +-
 

hbase-site git commit: INFRA-10751 Empty commit

2018-08-27 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site e312f7735 -> eccf5317c


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/eccf5317
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/eccf5317
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/eccf5317

Branch: refs/heads/asf-site
Commit: eccf5317cb58a09288a05ef69d0e1168cd05325d
Parents: e312f77
Author: jenkins 
Authored: Mon Aug 27 14:47:09 2018 +
Committer: jenkins 
Committed: Mon Aug 27 14:47:09 2018 +

--

--




[29/43] hbase-site git commit: Published site at 993e74b7471a7fe483eae59ab198bb2533c55031.

2018-08-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.BufferedMutatorTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.BufferedMutatorTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.BufferedMutatorTest.html
index e38835a..e11b644 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.BufferedMutatorTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.BufferedMutatorTest.html
@@ -2220,698 +2220,702 @@
 2212
 2213FilteredScanTest(Connection con, 
TestOptions options, Status status) {
 2214  super(con, options, status);
-2215}
-2216
-2217@Override
-2218boolean testRow(int i) throws 
IOException {
-2219  byte[] value = 
generateData(this.rand, getValueLength(this.rand));
-2220  Scan scan = 
constructScan(value);
-2221  ResultScanner scanner = null;
-  try {
-2223scanner = 
this.table.getScanner(scan);
-2224for (Result r = null; (r = 
scanner.next()) != null;) {
-2225  updateValueSize(r);
-2226}
-2227  } finally {
-2228if (scanner != null) {
-2229  
updateScanMetrics(scanner.getScanMetrics());
-2230  scanner.close();
-2231}
-2232  }
-2233  return true;
-2234}
-2235
-2236protected Scan constructScan(byte[] 
valuePrefix) throws IOException {
-2237  FilterList list = new 
FilterList();
-2238  Filter filter = new 
SingleColumnValueFilter(FAMILY_ZERO, COLUMN_ZERO,
-2239CompareOperator.EQUAL, new 
BinaryComparator(valuePrefix));
-2240  list.addFilter(filter);
-2241  if (opts.filterAll) {
-2242list.addFilter(new 
FilterAllFilter());
-2243  }
-2244  Scan scan = new 
Scan().setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks)
-2245  
.setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType)
-2246  
.setScanMetricsEnabled(true);
-2247  if (opts.addColumns) {
-2248for (int column = 0; column  
opts.columns; column++) {
-2249  byte [] qualifier = column == 
0? COLUMN_ZERO: Bytes.toBytes("" + column);
-2250  scan.addColumn(FAMILY_ZERO, 
qualifier);
-2251}
-2252  } else {
-2253scan.addFamily(FAMILY_ZERO);
-2254  }
-2255  scan.setFilter(list);
-2256  return scan;
-2257}
-2258  }
-2259
-2260  /**
-2261   * Compute a throughput rate in 
MB/s.
-2262   * @param rows Number of records 
consumed.
-2263   * @param timeMs Time taken in 
milliseconds.
-2264   * @return String value with label, ie 
'123.76 MB/s'
-2265   */
-2266  private static String 
calculateMbps(int rows, long timeMs, final int valueSize, int families, int 
columns) {
-2267BigDecimal rowSize = 
BigDecimal.valueOf(ROW_LENGTH +
-2268  ((valueSize + 
(FAMILY_NAME_BASE.length()+1) + COLUMN_ZERO.length) * columns) * families);
-2269BigDecimal mbps = 
BigDecimal.valueOf(rows).multiply(rowSize, CXT)
-2270  
.divide(BigDecimal.valueOf(timeMs), CXT).multiply(MS_PER_SEC, CXT)
-2271  .divide(BYTES_PER_MB, CXT);
-2272return FMT.format(mbps) + " MB/s";
-2273  }
-2274
-2275  /*
-2276   * Format passed integer.
-2277   * @param number
-2278   * @return Returns zero-prefixed 
ROW_LENGTH-byte wide decimal version of passed
-2279   * number (Does absolute in case 
number is negative).
-2280   */
-2281  public static byte [] format(final int 
number) {
-2282byte [] b = new byte[ROW_LENGTH];
-2283int d = Math.abs(number);
-2284for (int i = b.length - 1; i = 
0; i--) {
-2285  b[i] = (byte)((d % 10) + '0');
-2286  d /= 10;
-2287}
-2288return b;
-2289  }
-2290
-2291  /*
-2292   * This method takes some time and is 
done inline uploading data.  For
-2293   * example, doing the mapfile test, 
generation of the key and value
-2294   * consumes about 30% of CPU time.
-2295   * @return Generated random value to 
insert into a table cell.
-2296   */
-2297  public static byte[] 
generateData(final Random r, int length) {
-2298byte [] b = new byte [length];
-2299int i;
-2300
-2301for(i = 0; i  (length-8); i += 
8) {
-2302  b[i] = (byte) (65 + 
r.nextInt(26));
-2303  b[i+1] = b[i];
-2304  b[i+2] = b[i];
-2305  b[i+3] = b[i];
-2306  b[i+4] = b[i];
-2307  b[i+5] = b[i];
-2308  b[i+6] = b[i];
-2309  b[i+7] = b[i];
-2310}
-2311
-2312byte a = (byte) (65 + 
r.nextInt(26));
-2313for(; i  length; i++) {
-2314  b[i] = a;
-2315}
-2316return b;
-2317  }
-2318
-2319  static byte [] getRandomRow(final 
Random random, final int totalRows) {
-2320return 
format(generateRandomRow(random, totalRows));
+2215  if (opts.perClientRunRows == 
DEFAULT_ROWS_PER_GB) {
+2216LOG.warn("Option \"rows\" 
unspecified. Using default value " + 

[36/43] hbase-site git commit: Published site at 993e74b7471a7fe483eae59ab198bb2533c55031.

2018-08-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncRandomReadTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncRandomReadTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncRandomReadTest.html
index e38835a..e11b644 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncRandomReadTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncRandomReadTest.html
@@ -2220,698 +2220,702 @@
 2212
 2213FilteredScanTest(Connection con, 
TestOptions options, Status status) {
 2214  super(con, options, status);
-2215}
-2216
-2217@Override
-2218boolean testRow(int i) throws 
IOException {
-2219  byte[] value = 
generateData(this.rand, getValueLength(this.rand));
-2220  Scan scan = 
constructScan(value);
-2221  ResultScanner scanner = null;
-  try {
-2223scanner = 
this.table.getScanner(scan);
-2224for (Result r = null; (r = 
scanner.next()) != null;) {
-2225  updateValueSize(r);
-2226}
-2227  } finally {
-2228if (scanner != null) {
-2229  
updateScanMetrics(scanner.getScanMetrics());
-2230  scanner.close();
-2231}
-2232  }
-2233  return true;
-2234}
-2235
-2236protected Scan constructScan(byte[] 
valuePrefix) throws IOException {
-2237  FilterList list = new 
FilterList();
-2238  Filter filter = new 
SingleColumnValueFilter(FAMILY_ZERO, COLUMN_ZERO,
-2239CompareOperator.EQUAL, new 
BinaryComparator(valuePrefix));
-2240  list.addFilter(filter);
-2241  if (opts.filterAll) {
-2242list.addFilter(new 
FilterAllFilter());
-2243  }
-2244  Scan scan = new 
Scan().setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks)
-2245  
.setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType)
-2246  
.setScanMetricsEnabled(true);
-2247  if (opts.addColumns) {
-2248for (int column = 0; column  
opts.columns; column++) {
-2249  byte [] qualifier = column == 
0? COLUMN_ZERO: Bytes.toBytes("" + column);
-2250  scan.addColumn(FAMILY_ZERO, 
qualifier);
-2251}
-2252  } else {
-2253scan.addFamily(FAMILY_ZERO);
-2254  }
-2255  scan.setFilter(list);
-2256  return scan;
-2257}
-2258  }
-2259
-2260  /**
-2261   * Compute a throughput rate in 
MB/s.
-2262   * @param rows Number of records 
consumed.
-2263   * @param timeMs Time taken in 
milliseconds.
-2264   * @return String value with label, ie 
'123.76 MB/s'
-2265   */
-2266  private static String 
calculateMbps(int rows, long timeMs, final int valueSize, int families, int 
columns) {
-2267BigDecimal rowSize = 
BigDecimal.valueOf(ROW_LENGTH +
-2268  ((valueSize + 
(FAMILY_NAME_BASE.length()+1) + COLUMN_ZERO.length) * columns) * families);
-2269BigDecimal mbps = 
BigDecimal.valueOf(rows).multiply(rowSize, CXT)
-2270  
.divide(BigDecimal.valueOf(timeMs), CXT).multiply(MS_PER_SEC, CXT)
-2271  .divide(BYTES_PER_MB, CXT);
-2272return FMT.format(mbps) + " MB/s";
-2273  }
-2274
-2275  /*
-2276   * Format passed integer.
-2277   * @param number
-2278   * @return Returns zero-prefixed 
ROW_LENGTH-byte wide decimal version of passed
-2279   * number (Does absolute in case 
number is negative).
-2280   */
-2281  public static byte [] format(final int 
number) {
-2282byte [] b = new byte[ROW_LENGTH];
-2283int d = Math.abs(number);
-2284for (int i = b.length - 1; i = 
0; i--) {
-2285  b[i] = (byte)((d % 10) + '0');
-2286  d /= 10;
-2287}
-2288return b;
-2289  }
-2290
-2291  /*
-2292   * This method takes some time and is 
done inline uploading data.  For
-2293   * example, doing the mapfile test, 
generation of the key and value
-2294   * consumes about 30% of CPU time.
-2295   * @return Generated random value to 
insert into a table cell.
-2296   */
-2297  public static byte[] 
generateData(final Random r, int length) {
-2298byte [] b = new byte [length];
-2299int i;
-2300
-2301for(i = 0; i  (length-8); i += 
8) {
-2302  b[i] = (byte) (65 + 
r.nextInt(26));
-2303  b[i+1] = b[i];
-2304  b[i+2] = b[i];
-2305  b[i+3] = b[i];
-2306  b[i+4] = b[i];
-2307  b[i+5] = b[i];
-2308  b[i+6] = b[i];
-2309  b[i+7] = b[i];
-2310}
-2311
-2312byte a = (byte) (65 + 
r.nextInt(26));
-2313for(; i  length; i++) {
-2314  b[i] = a;
-2315}
-2316return b;
-2317  }
-2318
-2319  static byte [] getRandomRow(final 
Random random, final int totalRows) {
-2320return 
format(generateRandomRow(random, totalRows));
+2215  if (opts.perClientRunRows == 
DEFAULT_ROWS_PER_GB) {
+2216LOG.warn("Option \"rows\" 
unspecified. Using default value " + 

[28/43] hbase-site git commit: Published site at 993e74b7471a7fe483eae59ab198bb2533c55031.

2018-08-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CASTableTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CASTableTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CASTableTest.html
index e38835a..e11b644 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CASTableTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CASTableTest.html
@@ -2220,698 +2220,702 @@
 2212
 2213FilteredScanTest(Connection con, 
TestOptions options, Status status) {
 2214  super(con, options, status);
-2215}
-2216
-2217@Override
-2218boolean testRow(int i) throws 
IOException {
-2219  byte[] value = 
generateData(this.rand, getValueLength(this.rand));
-2220  Scan scan = 
constructScan(value);
-2221  ResultScanner scanner = null;
-  try {
-2223scanner = 
this.table.getScanner(scan);
-2224for (Result r = null; (r = 
scanner.next()) != null;) {
-2225  updateValueSize(r);
-2226}
-2227  } finally {
-2228if (scanner != null) {
-2229  
updateScanMetrics(scanner.getScanMetrics());
-2230  scanner.close();
-2231}
-2232  }
-2233  return true;
-2234}
-2235
-2236protected Scan constructScan(byte[] 
valuePrefix) throws IOException {
-2237  FilterList list = new 
FilterList();
-2238  Filter filter = new 
SingleColumnValueFilter(FAMILY_ZERO, COLUMN_ZERO,
-2239CompareOperator.EQUAL, new 
BinaryComparator(valuePrefix));
-2240  list.addFilter(filter);
-2241  if (opts.filterAll) {
-2242list.addFilter(new 
FilterAllFilter());
-2243  }
-2244  Scan scan = new 
Scan().setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks)
-2245  
.setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType)
-2246  
.setScanMetricsEnabled(true);
-2247  if (opts.addColumns) {
-2248for (int column = 0; column  
opts.columns; column++) {
-2249  byte [] qualifier = column == 
0? COLUMN_ZERO: Bytes.toBytes("" + column);
-2250  scan.addColumn(FAMILY_ZERO, 
qualifier);
-2251}
-2252  } else {
-2253scan.addFamily(FAMILY_ZERO);
-2254  }
-2255  scan.setFilter(list);
-2256  return scan;
-2257}
-2258  }
-2259
-2260  /**
-2261   * Compute a throughput rate in 
MB/s.
-2262   * @param rows Number of records 
consumed.
-2263   * @param timeMs Time taken in 
milliseconds.
-2264   * @return String value with label, ie 
'123.76 MB/s'
-2265   */
-2266  private static String 
calculateMbps(int rows, long timeMs, final int valueSize, int families, int 
columns) {
-2267BigDecimal rowSize = 
BigDecimal.valueOf(ROW_LENGTH +
-2268  ((valueSize + 
(FAMILY_NAME_BASE.length()+1) + COLUMN_ZERO.length) * columns) * families);
-2269BigDecimal mbps = 
BigDecimal.valueOf(rows).multiply(rowSize, CXT)
-2270  
.divide(BigDecimal.valueOf(timeMs), CXT).multiply(MS_PER_SEC, CXT)
-2271  .divide(BYTES_PER_MB, CXT);
-2272return FMT.format(mbps) + " MB/s";
-2273  }
-2274
-2275  /*
-2276   * Format passed integer.
-2277   * @param number
-2278   * @return Returns zero-prefixed 
ROW_LENGTH-byte wide decimal version of passed
-2279   * number (Does absolute in case 
number is negative).
-2280   */
-2281  public static byte [] format(final int 
number) {
-2282byte [] b = new byte[ROW_LENGTH];
-2283int d = Math.abs(number);
-2284for (int i = b.length - 1; i = 
0; i--) {
-2285  b[i] = (byte)((d % 10) + '0');
-2286  d /= 10;
-2287}
-2288return b;
-2289  }
-2290
-2291  /*
-2292   * This method takes some time and is 
done inline uploading data.  For
-2293   * example, doing the mapfile test, 
generation of the key and value
-2294   * consumes about 30% of CPU time.
-2295   * @return Generated random value to 
insert into a table cell.
-2296   */
-2297  public static byte[] 
generateData(final Random r, int length) {
-2298byte [] b = new byte [length];
-2299int i;
-2300
-2301for(i = 0; i  (length-8); i += 
8) {
-2302  b[i] = (byte) (65 + 
r.nextInt(26));
-2303  b[i+1] = b[i];
-2304  b[i+2] = b[i];
-2305  b[i+3] = b[i];
-2306  b[i+4] = b[i];
-2307  b[i+5] = b[i];
-2308  b[i+6] = b[i];
-2309  b[i+7] = b[i];
-2310}
-2311
-2312byte a = (byte) (65 + 
r.nextInt(26));
-2313for(; i  length; i++) {
-2314  b[i] = a;
-2315}
-2316return b;
-2317  }
-2318
-2319  static byte [] getRandomRow(final 
Random random, final int totalRows) {
-2320return 
format(generateRandomRow(random, totalRows));
+2215  if (opts.perClientRunRows == 
DEFAULT_ROWS_PER_GB) {
+2216LOG.warn("Option \"rows\" 
unspecified. Using default value " + DEFAULT_ROWS_PER_GB +
+2217

[26/43] hbase-site git commit: Published site at 993e74b7471a7fe483eae59ab198bb2533c55031.

2018-08-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndMutateTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndMutateTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndMutateTest.html
index e38835a..e11b644 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndMutateTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndMutateTest.html
@@ -2220,698 +2220,702 @@
 2212
 2213FilteredScanTest(Connection con, 
TestOptions options, Status status) {
 2214  super(con, options, status);
-2215}
-2216
-2217@Override
-2218boolean testRow(int i) throws 
IOException {
-2219  byte[] value = 
generateData(this.rand, getValueLength(this.rand));
-2220  Scan scan = 
constructScan(value);
-2221  ResultScanner scanner = null;
-  try {
-2223scanner = 
this.table.getScanner(scan);
-2224for (Result r = null; (r = 
scanner.next()) != null;) {
-2225  updateValueSize(r);
-2226}
-2227  } finally {
-2228if (scanner != null) {
-2229  
updateScanMetrics(scanner.getScanMetrics());
-2230  scanner.close();
-2231}
-2232  }
-2233  return true;
-2234}
-2235
-2236protected Scan constructScan(byte[] 
valuePrefix) throws IOException {
-2237  FilterList list = new 
FilterList();
-2238  Filter filter = new 
SingleColumnValueFilter(FAMILY_ZERO, COLUMN_ZERO,
-2239CompareOperator.EQUAL, new 
BinaryComparator(valuePrefix));
-2240  list.addFilter(filter);
-2241  if (opts.filterAll) {
-2242list.addFilter(new 
FilterAllFilter());
-2243  }
-2244  Scan scan = new 
Scan().setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks)
-2245  
.setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType)
-2246  
.setScanMetricsEnabled(true);
-2247  if (opts.addColumns) {
-2248for (int column = 0; column  
opts.columns; column++) {
-2249  byte [] qualifier = column == 
0? COLUMN_ZERO: Bytes.toBytes("" + column);
-2250  scan.addColumn(FAMILY_ZERO, 
qualifier);
-2251}
-2252  } else {
-2253scan.addFamily(FAMILY_ZERO);
-2254  }
-2255  scan.setFilter(list);
-2256  return scan;
-2257}
-2258  }
-2259
-2260  /**
-2261   * Compute a throughput rate in 
MB/s.
-2262   * @param rows Number of records 
consumed.
-2263   * @param timeMs Time taken in 
milliseconds.
-2264   * @return String value with label, ie 
'123.76 MB/s'
-2265   */
-2266  private static String 
calculateMbps(int rows, long timeMs, final int valueSize, int families, int 
columns) {
-2267BigDecimal rowSize = 
BigDecimal.valueOf(ROW_LENGTH +
-2268  ((valueSize + 
(FAMILY_NAME_BASE.length()+1) + COLUMN_ZERO.length) * columns) * families);
-2269BigDecimal mbps = 
BigDecimal.valueOf(rows).multiply(rowSize, CXT)
-2270  
.divide(BigDecimal.valueOf(timeMs), CXT).multiply(MS_PER_SEC, CXT)
-2271  .divide(BYTES_PER_MB, CXT);
-2272return FMT.format(mbps) + " MB/s";
-2273  }
-2274
-2275  /*
-2276   * Format passed integer.
-2277   * @param number
-2278   * @return Returns zero-prefixed 
ROW_LENGTH-byte wide decimal version of passed
-2279   * number (Does absolute in case 
number is negative).
-2280   */
-2281  public static byte [] format(final int 
number) {
-2282byte [] b = new byte[ROW_LENGTH];
-2283int d = Math.abs(number);
-2284for (int i = b.length - 1; i = 
0; i--) {
-2285  b[i] = (byte)((d % 10) + '0');
-2286  d /= 10;
-2287}
-2288return b;
-2289  }
-2290
-2291  /*
-2292   * This method takes some time and is 
done inline uploading data.  For
-2293   * example, doing the mapfile test, 
generation of the key and value
-2294   * consumes about 30% of CPU time.
-2295   * @return Generated random value to 
insert into a table cell.
-2296   */
-2297  public static byte[] 
generateData(final Random r, int length) {
-2298byte [] b = new byte [length];
-2299int i;
-2300
-2301for(i = 0; i  (length-8); i += 
8) {
-2302  b[i] = (byte) (65 + 
r.nextInt(26));
-2303  b[i+1] = b[i];
-2304  b[i+2] = b[i];
-2305  b[i+3] = b[i];
-2306  b[i+4] = b[i];
-2307  b[i+5] = b[i];
-2308  b[i+6] = b[i];
-2309  b[i+7] = b[i];
-2310}
-2311
-2312byte a = (byte) (65 + 
r.nextInt(26));
-2313for(; i  length; i++) {
-2314  b[i] = a;
-2315}
-2316return b;
-2317  }
-2318
-2319  static byte [] getRandomRow(final 
Random random, final int totalRows) {
-2320return 
format(generateRandomRow(random, totalRows));
+2215  if (opts.perClientRunRows == 
DEFAULT_ROWS_PER_GB) {
+2216LOG.warn("Option \"rows\" 
unspecified. Using default value " + 

[37/43] hbase-site git commit: Published site at 993e74b7471a7fe483eae59ab198bb2533c55031.

2018-08-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AppendTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AppendTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AppendTest.html
index e38835a..e11b644 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AppendTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AppendTest.html
@@ -2220,698 +2220,702 @@
 2212
 2213FilteredScanTest(Connection con, 
TestOptions options, Status status) {
 2214  super(con, options, status);
-2215}
-2216
-2217@Override
-2218boolean testRow(int i) throws 
IOException {
-2219  byte[] value = 
generateData(this.rand, getValueLength(this.rand));
-2220  Scan scan = 
constructScan(value);
-2221  ResultScanner scanner = null;
-  try {
-2223scanner = 
this.table.getScanner(scan);
-2224for (Result r = null; (r = 
scanner.next()) != null;) {
-2225  updateValueSize(r);
-2226}
-2227  } finally {
-2228if (scanner != null) {
-2229  
updateScanMetrics(scanner.getScanMetrics());
-2230  scanner.close();
-2231}
-2232  }
-2233  return true;
-2234}
-2235
-2236protected Scan constructScan(byte[] 
valuePrefix) throws IOException {
-2237  FilterList list = new 
FilterList();
-2238  Filter filter = new 
SingleColumnValueFilter(FAMILY_ZERO, COLUMN_ZERO,
-2239CompareOperator.EQUAL, new 
BinaryComparator(valuePrefix));
-2240  list.addFilter(filter);
-2241  if (opts.filterAll) {
-2242list.addFilter(new 
FilterAllFilter());
-2243  }
-2244  Scan scan = new 
Scan().setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks)
-2245  
.setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType)
-2246  
.setScanMetricsEnabled(true);
-2247  if (opts.addColumns) {
-2248for (int column = 0; column  
opts.columns; column++) {
-2249  byte [] qualifier = column == 
0? COLUMN_ZERO: Bytes.toBytes("" + column);
-2250  scan.addColumn(FAMILY_ZERO, 
qualifier);
-2251}
-2252  } else {
-2253scan.addFamily(FAMILY_ZERO);
-2254  }
-2255  scan.setFilter(list);
-2256  return scan;
-2257}
-2258  }
-2259
-2260  /**
-2261   * Compute a throughput rate in 
MB/s.
-2262   * @param rows Number of records 
consumed.
-2263   * @param timeMs Time taken in 
milliseconds.
-2264   * @return String value with label, ie 
'123.76 MB/s'
-2265   */
-2266  private static String 
calculateMbps(int rows, long timeMs, final int valueSize, int families, int 
columns) {
-2267BigDecimal rowSize = 
BigDecimal.valueOf(ROW_LENGTH +
-2268  ((valueSize + 
(FAMILY_NAME_BASE.length()+1) + COLUMN_ZERO.length) * columns) * families);
-2269BigDecimal mbps = 
BigDecimal.valueOf(rows).multiply(rowSize, CXT)
-2270  
.divide(BigDecimal.valueOf(timeMs), CXT).multiply(MS_PER_SEC, CXT)
-2271  .divide(BYTES_PER_MB, CXT);
-2272return FMT.format(mbps) + " MB/s";
-2273  }
-2274
-2275  /*
-2276   * Format passed integer.
-2277   * @param number
-2278   * @return Returns zero-prefixed 
ROW_LENGTH-byte wide decimal version of passed
-2279   * number (Does absolute in case 
number is negative).
-2280   */
-2281  public static byte [] format(final int 
number) {
-2282byte [] b = new byte[ROW_LENGTH];
-2283int d = Math.abs(number);
-2284for (int i = b.length - 1; i = 
0; i--) {
-2285  b[i] = (byte)((d % 10) + '0');
-2286  d /= 10;
-2287}
-2288return b;
-2289  }
-2290
-2291  /*
-2292   * This method takes some time and is 
done inline uploading data.  For
-2293   * example, doing the mapfile test, 
generation of the key and value
-2294   * consumes about 30% of CPU time.
-2295   * @return Generated random value to 
insert into a table cell.
-2296   */
-2297  public static byte[] 
generateData(final Random r, int length) {
-2298byte [] b = new byte [length];
-2299int i;
-2300
-2301for(i = 0; i  (length-8); i += 
8) {
-2302  b[i] = (byte) (65 + 
r.nextInt(26));
-2303  b[i+1] = b[i];
-2304  b[i+2] = b[i];
-2305  b[i+3] = b[i];
-2306  b[i+4] = b[i];
-2307  b[i+5] = b[i];
-2308  b[i+6] = b[i];
-2309  b[i+7] = b[i];
-2310}
-2311
-2312byte a = (byte) (65 + 
r.nextInt(26));
-2313for(; i  length; i++) {
-2314  b[i] = a;
-2315}
-2316return b;
-2317  }
-2318
-2319  static byte [] getRandomRow(final 
Random random, final int totalRows) {
-2320return 
format(generateRandomRow(random, totalRows));
+2215  if (opts.perClientRunRows == 
DEFAULT_ROWS_PER_GB) {
+2216LOG.warn("Option \"rows\" 
unspecified. Using default value " + DEFAULT_ROWS_PER_GB +
+2217". This 

[42/43] hbase-site git commit: Published site at 993e74b7471a7fe483eae59ab198bb2533c55031.

2018-08-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/apidocs/src-html/org/apache/hadoop/hbase/client/Table.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/Table.html 
b/apidocs/src-html/org/apache/hadoop/hbase/client/Table.html
index 192f920..fbdec56 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/client/Table.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/client/Table.html
@@ -571,426 +571,425 @@
 563   * under a single row lock, so write 
operations to a row are synchronized, and
 564   * readers are guaranteed to see this 
operation fully completed.
 565   *
-566   * @param append object that specifies 
the columns and amounts to be used
-567   *  for the increment 
operations
-568   * @throws IOException e
-569   * @return values of columns after the 
append operation (maybe null)
-570   */
-571  default Result append(final Append 
append) throws IOException {
-572throw new 
NotImplementedException("Add an implementation!");
-573  }
-574
-575  /**
-576   * Increments one or more columns 
within a single row.
-577   * p
-578   * This operation ensures atomicity to 
readers. Increments are done
-579   * under a single row lock, so write 
operations to a row are synchronized, and
-580   * readers are guaranteed to see this 
operation fully completed.
-581   *
-582   * @param increment object that 
specifies the columns and amounts to be used
-583   *  for the increment 
operations
-584   * @throws IOException e
-585   * @return values of columns after the 
increment
-586   */
-587  default Result increment(final 
Increment increment) throws IOException {
-588throw new 
NotImplementedException("Add an implementation!");
-589  }
-590
-591  /**
-592   * See {@link 
#incrementColumnValue(byte[], byte[], byte[], long, Durability)}
-593   * p
-594   * The {@link Durability} is defaulted 
to {@link Durability#SYNC_WAL}.
-595   * @param row The row that contains the 
cell to increment.
-596   * @param family The column family of 
the cell to increment.
-597   * @param qualifier The column 
qualifier of the cell to increment.
-598   * @param amount The amount to 
increment the cell with (or decrement, if the
-599   * amount is negative).
-600   * @return The new value, post 
increment.
-601   * @throws IOException if a remote or 
network exception occurs.
-602   */
-603  default long 
incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long 
amount)
-604  throws IOException {
-605Increment increment = new 
Increment(row).addColumn(family, qualifier, amount);
-606Cell cell = 
increment(increment).getColumnLatestCell(family, qualifier);
-607return 
Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), 
cell.getValueLength());
-608  }
-609
-610  /**
-611   * Atomically increments a column 
value. If the column value already exists
-612   * and is not a big-endian long, this 
could throw an exception. If the column
-613   * value does not yet exist it is 
initialized to codeamount/code and
-614   * written to the specified column.
-615   *
-616   * pSetting durability to 
{@link Durability#SKIP_WAL} means that in a fail
-617   * scenario you will lose any 
increments that have not been flushed.
-618   * @param row The row that contains the 
cell to increment.
-619   * @param family The column family of 
the cell to increment.
-620   * @param qualifier The column 
qualifier of the cell to increment.
-621   * @param amount The amount to 
increment the cell with (or decrement, if the
-622   * amount is negative).
-623   * @param durability The persistence 
guarantee for this increment.
-624   * @return The new value, post 
increment.
-625   * @throws IOException if a remote or 
network exception occurs.
-626   */
-627  default long 
incrementColumnValue(byte[] row, byte[] family, byte[] qualifier,
-628long amount, Durability durability) 
throws IOException {
-629Increment increment = new 
Increment(row)
-630.addColumn(family, qualifier, 
amount)
-631.setDurability(durability);
-632Cell cell = 
increment(increment).getColumnLatestCell(family, qualifier);
-633return 
Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), 
cell.getValueLength());
-634  }
-635
-636  /**
-637   * Releases any resources held or 
pending changes in internal buffers.
-638   *
-639   * @throws IOException if a remote or 
network exception occurs.
-640   */
-641  @Override
-642  default void close() throws IOException 
{
-643throw new 
NotImplementedException("Add an implementation!");
-644  }
-645
-646  /**
-647   * Creates and returns a {@link 
com.google.protobuf.RpcChannel} instance connected to the
-648   * table region containing the 
specified row.  The row given does not actually have
-649   * to exist.  Whichever region would 
contain the row based on start and end keys will
-650   * be used.  Note that the {@code 

[22/43] hbase-site git commit: Published site at 993e74b7471a7fe483eae59ab198bb2533c55031.

2018-08-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.EvaluationMapTask.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.EvaluationMapTask.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.EvaluationMapTask.html
index e38835a..e11b644 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.EvaluationMapTask.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.EvaluationMapTask.html
@@ -2220,698 +2220,702 @@
 2212
 2213FilteredScanTest(Connection con, 
TestOptions options, Status status) {
 2214  super(con, options, status);
-2215}
-2216
-2217@Override
-2218boolean testRow(int i) throws 
IOException {
-2219  byte[] value = 
generateData(this.rand, getValueLength(this.rand));
-2220  Scan scan = 
constructScan(value);
-2221  ResultScanner scanner = null;
-  try {
-2223scanner = 
this.table.getScanner(scan);
-2224for (Result r = null; (r = 
scanner.next()) != null;) {
-2225  updateValueSize(r);
-2226}
-2227  } finally {
-2228if (scanner != null) {
-2229  
updateScanMetrics(scanner.getScanMetrics());
-2230  scanner.close();
-2231}
-2232  }
-2233  return true;
-2234}
-2235
-2236protected Scan constructScan(byte[] 
valuePrefix) throws IOException {
-2237  FilterList list = new 
FilterList();
-2238  Filter filter = new 
SingleColumnValueFilter(FAMILY_ZERO, COLUMN_ZERO,
-2239CompareOperator.EQUAL, new 
BinaryComparator(valuePrefix));
-2240  list.addFilter(filter);
-2241  if (opts.filterAll) {
-2242list.addFilter(new 
FilterAllFilter());
-2243  }
-2244  Scan scan = new 
Scan().setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks)
-2245  
.setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType)
-2246  
.setScanMetricsEnabled(true);
-2247  if (opts.addColumns) {
-2248for (int column = 0; column  
opts.columns; column++) {
-2249  byte [] qualifier = column == 
0? COLUMN_ZERO: Bytes.toBytes("" + column);
-2250  scan.addColumn(FAMILY_ZERO, 
qualifier);
-2251}
-2252  } else {
-2253scan.addFamily(FAMILY_ZERO);
-2254  }
-2255  scan.setFilter(list);
-2256  return scan;
-2257}
-2258  }
-2259
-2260  /**
-2261   * Compute a throughput rate in 
MB/s.
-2262   * @param rows Number of records 
consumed.
-2263   * @param timeMs Time taken in 
milliseconds.
-2264   * @return String value with label, ie 
'123.76 MB/s'
-2265   */
-2266  private static String 
calculateMbps(int rows, long timeMs, final int valueSize, int families, int 
columns) {
-2267BigDecimal rowSize = 
BigDecimal.valueOf(ROW_LENGTH +
-2268  ((valueSize + 
(FAMILY_NAME_BASE.length()+1) + COLUMN_ZERO.length) * columns) * families);
-2269BigDecimal mbps = 
BigDecimal.valueOf(rows).multiply(rowSize, CXT)
-2270  
.divide(BigDecimal.valueOf(timeMs), CXT).multiply(MS_PER_SEC, CXT)
-2271  .divide(BYTES_PER_MB, CXT);
-2272return FMT.format(mbps) + " MB/s";
-2273  }
-2274
-2275  /*
-2276   * Format passed integer.
-2277   * @param number
-2278   * @return Returns zero-prefixed 
ROW_LENGTH-byte wide decimal version of passed
-2279   * number (Does absolute in case 
number is negative).
-2280   */
-2281  public static byte [] format(final int 
number) {
-2282byte [] b = new byte[ROW_LENGTH];
-2283int d = Math.abs(number);
-2284for (int i = b.length - 1; i = 
0; i--) {
-2285  b[i] = (byte)((d % 10) + '0');
-2286  d /= 10;
-2287}
-2288return b;
-2289  }
-2290
-2291  /*
-2292   * This method takes some time and is 
done inline uploading data.  For
-2293   * example, doing the mapfile test, 
generation of the key and value
-2294   * consumes about 30% of CPU time.
-2295   * @return Generated random value to 
insert into a table cell.
-2296   */
-2297  public static byte[] 
generateData(final Random r, int length) {
-2298byte [] b = new byte [length];
-2299int i;
-2300
-2301for(i = 0; i  (length-8); i += 
8) {
-2302  b[i] = (byte) (65 + 
r.nextInt(26));
-2303  b[i+1] = b[i];
-2304  b[i+2] = b[i];
-2305  b[i+3] = b[i];
-2306  b[i+4] = b[i];
-2307  b[i+5] = b[i];
-2308  b[i+6] = b[i];
-2309  b[i+7] = b[i];
-2310}
-2311
-2312byte a = (byte) (65 + 
r.nextInt(26));
-2313for(; i  length; i++) {
-2314  b[i] = a;
-2315}
-2316return b;
-2317  }
-2318
-2319  static byte [] getRandomRow(final 
Random random, final int totalRows) {
-2320return 
format(generateRandomRow(random, totalRows));
+2215  if (opts.perClientRunRows == 
DEFAULT_ROWS_PER_GB) {
+2216LOG.warn("Option \"rows\" 
unspecified. Using default value " + 

[18/43] hbase-site git commit: Published site at 993e74b7471a7fe483eae59ab198bb2533c55031.

2018-08-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRange1Test.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRange1Test.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRange1Test.html
index e38835a..e11b644 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRange1Test.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRange1Test.html
@@ -2220,698 +2220,702 @@
 2212
 2213FilteredScanTest(Connection con, 
TestOptions options, Status status) {
 2214  super(con, options, status);
-2215}
-2216
-2217@Override
-2218boolean testRow(int i) throws 
IOException {
-2219  byte[] value = 
generateData(this.rand, getValueLength(this.rand));
-2220  Scan scan = 
constructScan(value);
-2221  ResultScanner scanner = null;
-  try {
-2223scanner = 
this.table.getScanner(scan);
-2224for (Result r = null; (r = 
scanner.next()) != null;) {
-2225  updateValueSize(r);
-2226}
-2227  } finally {
-2228if (scanner != null) {
-2229  
updateScanMetrics(scanner.getScanMetrics());
-2230  scanner.close();
-2231}
-2232  }
-2233  return true;
-2234}
-2235
-2236protected Scan constructScan(byte[] 
valuePrefix) throws IOException {
-2237  FilterList list = new 
FilterList();
-2238  Filter filter = new 
SingleColumnValueFilter(FAMILY_ZERO, COLUMN_ZERO,
-2239CompareOperator.EQUAL, new 
BinaryComparator(valuePrefix));
-2240  list.addFilter(filter);
-2241  if (opts.filterAll) {
-2242list.addFilter(new 
FilterAllFilter());
-2243  }
-2244  Scan scan = new 
Scan().setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks)
-2245  
.setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType)
-2246  
.setScanMetricsEnabled(true);
-2247  if (opts.addColumns) {
-2248for (int column = 0; column  
opts.columns; column++) {
-2249  byte [] qualifier = column == 
0? COLUMN_ZERO: Bytes.toBytes("" + column);
-2250  scan.addColumn(FAMILY_ZERO, 
qualifier);
-2251}
-2252  } else {
-2253scan.addFamily(FAMILY_ZERO);
-2254  }
-2255  scan.setFilter(list);
-2256  return scan;
-2257}
-2258  }
-2259
-2260  /**
-2261   * Compute a throughput rate in 
MB/s.
-2262   * @param rows Number of records 
consumed.
-2263   * @param timeMs Time taken in 
milliseconds.
-2264   * @return String value with label, ie 
'123.76 MB/s'
-2265   */
-2266  private static String 
calculateMbps(int rows, long timeMs, final int valueSize, int families, int 
columns) {
-2267BigDecimal rowSize = 
BigDecimal.valueOf(ROW_LENGTH +
-2268  ((valueSize + 
(FAMILY_NAME_BASE.length()+1) + COLUMN_ZERO.length) * columns) * families);
-2269BigDecimal mbps = 
BigDecimal.valueOf(rows).multiply(rowSize, CXT)
-2270  
.divide(BigDecimal.valueOf(timeMs), CXT).multiply(MS_PER_SEC, CXT)
-2271  .divide(BYTES_PER_MB, CXT);
-2272return FMT.format(mbps) + " MB/s";
-2273  }
-2274
-2275  /*
-2276   * Format passed integer.
-2277   * @param number
-2278   * @return Returns zero-prefixed 
ROW_LENGTH-byte wide decimal version of passed
-2279   * number (Does absolute in case 
number is negative).
-2280   */
-2281  public static byte [] format(final int 
number) {
-2282byte [] b = new byte[ROW_LENGTH];
-2283int d = Math.abs(number);
-2284for (int i = b.length - 1; i = 
0; i--) {
-2285  b[i] = (byte)((d % 10) + '0');
-2286  d /= 10;
-2287}
-2288return b;
-2289  }
-2290
-2291  /*
-2292   * This method takes some time and is 
done inline uploading data.  For
-2293   * example, doing the mapfile test, 
generation of the key and value
-2294   * consumes about 30% of CPU time.
-2295   * @return Generated random value to 
insert into a table cell.
-2296   */
-2297  public static byte[] 
generateData(final Random r, int length) {
-2298byte [] b = new byte [length];
-2299int i;
-2300
-2301for(i = 0; i  (length-8); i += 
8) {
-2302  b[i] = (byte) (65 + 
r.nextInt(26));
-2303  b[i+1] = b[i];
-2304  b[i+2] = b[i];
-2305  b[i+3] = b[i];
-2306  b[i+4] = b[i];
-2307  b[i+5] = b[i];
-2308  b[i+6] = b[i];
-2309  b[i+7] = b[i];
-2310}
-2311
-2312byte a = (byte) (65 + 
r.nextInt(26));
-2313for(; i  length; i++) {
-2314  b[i] = a;
-2315}
-2316return b;
-2317  }
-2318
-2319  static byte [] getRandomRow(final 
Random random, final int totalRows) {
-2320return 
format(generateRandomRow(random, totalRows));
+2215  if (opts.perClientRunRows == 
DEFAULT_ROWS_PER_GB) {
+2216LOG.warn("Option \"rows\" 

[40/43] hbase-site git commit: Published site at 993e74b7471a7fe483eae59ab198bb2533c55031.

2018-08-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/devapidocs/src-html/org/apache/hadoop/hbase/client/Table.CheckAndMutateBuilder.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/Table.CheckAndMutateBuilder.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/Table.CheckAndMutateBuilder.html
index 192f920..fbdec56 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/Table.CheckAndMutateBuilder.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/Table.CheckAndMutateBuilder.html
@@ -571,426 +571,425 @@
 563   * under a single row lock, so write 
operations to a row are synchronized, and
 564   * readers are guaranteed to see this 
operation fully completed.
 565   *
-566   * @param append object that specifies 
the columns and amounts to be used
-567   *  for the increment 
operations
-568   * @throws IOException e
-569   * @return values of columns after the 
append operation (maybe null)
-570   */
-571  default Result append(final Append 
append) throws IOException {
-572throw new 
NotImplementedException("Add an implementation!");
-573  }
-574
-575  /**
-576   * Increments one or more columns 
within a single row.
-577   * p
-578   * This operation ensures atomicity to 
readers. Increments are done
-579   * under a single row lock, so write 
operations to a row are synchronized, and
-580   * readers are guaranteed to see this 
operation fully completed.
-581   *
-582   * @param increment object that 
specifies the columns and amounts to be used
-583   *  for the increment 
operations
-584   * @throws IOException e
-585   * @return values of columns after the 
increment
-586   */
-587  default Result increment(final 
Increment increment) throws IOException {
-588throw new 
NotImplementedException("Add an implementation!");
-589  }
-590
-591  /**
-592   * See {@link 
#incrementColumnValue(byte[], byte[], byte[], long, Durability)}
-593   * p
-594   * The {@link Durability} is defaulted 
to {@link Durability#SYNC_WAL}.
-595   * @param row The row that contains the 
cell to increment.
-596   * @param family The column family of 
the cell to increment.
-597   * @param qualifier The column 
qualifier of the cell to increment.
-598   * @param amount The amount to 
increment the cell with (or decrement, if the
-599   * amount is negative).
-600   * @return The new value, post 
increment.
-601   * @throws IOException if a remote or 
network exception occurs.
-602   */
-603  default long 
incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long 
amount)
-604  throws IOException {
-605Increment increment = new 
Increment(row).addColumn(family, qualifier, amount);
-606Cell cell = 
increment(increment).getColumnLatestCell(family, qualifier);
-607return 
Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), 
cell.getValueLength());
-608  }
-609
-610  /**
-611   * Atomically increments a column 
value. If the column value already exists
-612   * and is not a big-endian long, this 
could throw an exception. If the column
-613   * value does not yet exist it is 
initialized to codeamount/code and
-614   * written to the specified column.
-615   *
-616   * pSetting durability to 
{@link Durability#SKIP_WAL} means that in a fail
-617   * scenario you will lose any 
increments that have not been flushed.
-618   * @param row The row that contains the 
cell to increment.
-619   * @param family The column family of 
the cell to increment.
-620   * @param qualifier The column 
qualifier of the cell to increment.
-621   * @param amount The amount to 
increment the cell with (or decrement, if the
-622   * amount is negative).
-623   * @param durability The persistence 
guarantee for this increment.
-624   * @return The new value, post 
increment.
-625   * @throws IOException if a remote or 
network exception occurs.
-626   */
-627  default long 
incrementColumnValue(byte[] row, byte[] family, byte[] qualifier,
-628long amount, Durability durability) 
throws IOException {
-629Increment increment = new 
Increment(row)
-630.addColumn(family, qualifier, 
amount)
-631.setDurability(durability);
-632Cell cell = 
increment(increment).getColumnLatestCell(family, qualifier);
-633return 
Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), 
cell.getValueLength());
-634  }
-635
-636  /**
-637   * Releases any resources held or 
pending changes in internal buffers.
-638   *
-639   * @throws IOException if a remote or 
network exception occurs.
-640   */
-641  @Override
-642  default void close() throws IOException 
{
-643throw new 
NotImplementedException("Add an implementation!");
-644  }
-645
-646  /**
-647   * Creates and returns a {@link 
com.google.protobuf.RpcChannel} instance connected to the
-648   * table region containing the 
specified row.  The row given does not actually have
-649   

[34/43] hbase-site git commit: Published site at 993e74b7471a7fe483eae59ab198bb2533c55031.

2018-08-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncScanTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncScanTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncScanTest.html
index e38835a..e11b644 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncScanTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncScanTest.html
@@ -2220,698 +2220,702 @@
 2212
 2213FilteredScanTest(Connection con, 
TestOptions options, Status status) {
 2214  super(con, options, status);
-2215}
-2216
-2217@Override
-2218boolean testRow(int i) throws 
IOException {
-2219  byte[] value = 
generateData(this.rand, getValueLength(this.rand));
-2220  Scan scan = 
constructScan(value);
-2221  ResultScanner scanner = null;
-  try {
-2223scanner = 
this.table.getScanner(scan);
-2224for (Result r = null; (r = 
scanner.next()) != null;) {
-2225  updateValueSize(r);
-2226}
-2227  } finally {
-2228if (scanner != null) {
-2229  
updateScanMetrics(scanner.getScanMetrics());
-2230  scanner.close();
-2231}
-2232  }
-2233  return true;
-2234}
-2235
-2236protected Scan constructScan(byte[] 
valuePrefix) throws IOException {
-2237  FilterList list = new 
FilterList();
-2238  Filter filter = new 
SingleColumnValueFilter(FAMILY_ZERO, COLUMN_ZERO,
-2239CompareOperator.EQUAL, new 
BinaryComparator(valuePrefix));
-2240  list.addFilter(filter);
-2241  if (opts.filterAll) {
-2242list.addFilter(new 
FilterAllFilter());
-2243  }
-2244  Scan scan = new 
Scan().setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks)
-2245  
.setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType)
-2246  
.setScanMetricsEnabled(true);
-2247  if (opts.addColumns) {
-2248for (int column = 0; column  
opts.columns; column++) {
-2249  byte [] qualifier = column == 
0? COLUMN_ZERO: Bytes.toBytes("" + column);
-2250  scan.addColumn(FAMILY_ZERO, 
qualifier);
-2251}
-2252  } else {
-2253scan.addFamily(FAMILY_ZERO);
-2254  }
-2255  scan.setFilter(list);
-2256  return scan;
-2257}
-2258  }
-2259
-2260  /**
-2261   * Compute a throughput rate in 
MB/s.
-2262   * @param rows Number of records 
consumed.
-2263   * @param timeMs Time taken in 
milliseconds.
-2264   * @return String value with label, ie 
'123.76 MB/s'
-2265   */
-2266  private static String 
calculateMbps(int rows, long timeMs, final int valueSize, int families, int 
columns) {
-2267BigDecimal rowSize = 
BigDecimal.valueOf(ROW_LENGTH +
-2268  ((valueSize + 
(FAMILY_NAME_BASE.length()+1) + COLUMN_ZERO.length) * columns) * families);
-2269BigDecimal mbps = 
BigDecimal.valueOf(rows).multiply(rowSize, CXT)
-2270  
.divide(BigDecimal.valueOf(timeMs), CXT).multiply(MS_PER_SEC, CXT)
-2271  .divide(BYTES_PER_MB, CXT);
-2272return FMT.format(mbps) + " MB/s";
-2273  }
-2274
-2275  /*
-2276   * Format passed integer.
-2277   * @param number
-2278   * @return Returns zero-prefixed 
ROW_LENGTH-byte wide decimal version of passed
-2279   * number (Does absolute in case 
number is negative).
-2280   */
-2281  public static byte [] format(final int 
number) {
-2282byte [] b = new byte[ROW_LENGTH];
-2283int d = Math.abs(number);
-2284for (int i = b.length - 1; i = 
0; i--) {
-2285  b[i] = (byte)((d % 10) + '0');
-2286  d /= 10;
-2287}
-2288return b;
-2289  }
-2290
-2291  /*
-2292   * This method takes some time and is 
done inline uploading data.  For
-2293   * example, doing the mapfile test, 
generation of the key and value
-2294   * consumes about 30% of CPU time.
-2295   * @return Generated random value to 
insert into a table cell.
-2296   */
-2297  public static byte[] 
generateData(final Random r, int length) {
-2298byte [] b = new byte [length];
-2299int i;
-2300
-2301for(i = 0; i  (length-8); i += 
8) {
-2302  b[i] = (byte) (65 + 
r.nextInt(26));
-2303  b[i+1] = b[i];
-2304  b[i+2] = b[i];
-2305  b[i+3] = b[i];
-2306  b[i+4] = b[i];
-2307  b[i+5] = b[i];
-2308  b[i+6] = b[i];
-2309  b[i+7] = b[i];
-2310}
-2311
-2312byte a = (byte) (65 + 
r.nextInt(26));
-2313for(; i  length; i++) {
-2314  b[i] = a;
-2315}
-2316return b;
-2317  }
-2318
-2319  static byte [] getRandomRow(final 
Random random, final int totalRows) {
-2320return 
format(generateRandomRow(random, totalRows));
+2215  if (opts.perClientRunRows == 
DEFAULT_ROWS_PER_GB) {
+2216LOG.warn("Option \"rows\" 
unspecified. Using default value " + DEFAULT_ROWS_PER_GB +
+2217   

[11/43] hbase-site git commit: Published site at 993e74b7471a7fe483eae59ab198bb2533c55031.

2018-08-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RunResult.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RunResult.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RunResult.html
index e38835a..e11b644 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RunResult.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RunResult.html
@@ -2220,698 +2220,702 @@
 2212
 2213FilteredScanTest(Connection con, 
TestOptions options, Status status) {
 2214  super(con, options, status);
-2215}
-2216
-2217@Override
-2218boolean testRow(int i) throws 
IOException {
-2219  byte[] value = 
generateData(this.rand, getValueLength(this.rand));
-2220  Scan scan = 
constructScan(value);
-2221  ResultScanner scanner = null;
-  try {
-2223scanner = 
this.table.getScanner(scan);
-2224for (Result r = null; (r = 
scanner.next()) != null;) {
-2225  updateValueSize(r);
-2226}
-2227  } finally {
-2228if (scanner != null) {
-2229  
updateScanMetrics(scanner.getScanMetrics());
-2230  scanner.close();
-2231}
-2232  }
-2233  return true;
-2234}
-2235
-2236protected Scan constructScan(byte[] 
valuePrefix) throws IOException {
-2237  FilterList list = new 
FilterList();
-2238  Filter filter = new 
SingleColumnValueFilter(FAMILY_ZERO, COLUMN_ZERO,
-2239CompareOperator.EQUAL, new 
BinaryComparator(valuePrefix));
-2240  list.addFilter(filter);
-2241  if (opts.filterAll) {
-2242list.addFilter(new 
FilterAllFilter());
-2243  }
-2244  Scan scan = new 
Scan().setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks)
-2245  
.setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType)
-2246  
.setScanMetricsEnabled(true);
-2247  if (opts.addColumns) {
-2248for (int column = 0; column  
opts.columns; column++) {
-2249  byte [] qualifier = column == 
0? COLUMN_ZERO: Bytes.toBytes("" + column);
-2250  scan.addColumn(FAMILY_ZERO, 
qualifier);
-2251}
-2252  } else {
-2253scan.addFamily(FAMILY_ZERO);
-2254  }
-2255  scan.setFilter(list);
-2256  return scan;
-2257}
-2258  }
-2259
-2260  /**
-2261   * Compute a throughput rate in 
MB/s.
-2262   * @param rows Number of records 
consumed.
-2263   * @param timeMs Time taken in 
milliseconds.
-2264   * @return String value with label, ie 
'123.76 MB/s'
-2265   */
-2266  private static String 
calculateMbps(int rows, long timeMs, final int valueSize, int families, int 
columns) {
-2267BigDecimal rowSize = 
BigDecimal.valueOf(ROW_LENGTH +
-2268  ((valueSize + 
(FAMILY_NAME_BASE.length()+1) + COLUMN_ZERO.length) * columns) * families);
-2269BigDecimal mbps = 
BigDecimal.valueOf(rows).multiply(rowSize, CXT)
-2270  
.divide(BigDecimal.valueOf(timeMs), CXT).multiply(MS_PER_SEC, CXT)
-2271  .divide(BYTES_PER_MB, CXT);
-2272return FMT.format(mbps) + " MB/s";
-2273  }
-2274
-2275  /*
-2276   * Format passed integer.
-2277   * @param number
-2278   * @return Returns zero-prefixed 
ROW_LENGTH-byte wide decimal version of passed
-2279   * number (Does absolute in case 
number is negative).
-2280   */
-2281  public static byte [] format(final int 
number) {
-2282byte [] b = new byte[ROW_LENGTH];
-2283int d = Math.abs(number);
-2284for (int i = b.length - 1; i = 
0; i--) {
-2285  b[i] = (byte)((d % 10) + '0');
-2286  d /= 10;
-2287}
-2288return b;
-2289  }
-2290
-2291  /*
-2292   * This method takes some time and is 
done inline uploading data.  For
-2293   * example, doing the mapfile test, 
generation of the key and value
-2294   * consumes about 30% of CPU time.
-2295   * @return Generated random value to 
insert into a table cell.
-2296   */
-2297  public static byte[] 
generateData(final Random r, int length) {
-2298byte [] b = new byte [length];
-2299int i;
-2300
-2301for(i = 0; i  (length-8); i += 
8) {
-2302  b[i] = (byte) (65 + 
r.nextInt(26));
-2303  b[i+1] = b[i];
-2304  b[i+2] = b[i];
-2305  b[i+3] = b[i];
-2306  b[i+4] = b[i];
-2307  b[i+5] = b[i];
-2308  b[i+6] = b[i];
-2309  b[i+7] = b[i];
-2310}
-2311
-2312byte a = (byte) (65 + 
r.nextInt(26));
-2313for(; i  length; i++) {
-2314  b[i] = a;
-2315}
-2316return b;
-2317  }
-2318
-2319  static byte [] getRandomRow(final 
Random random, final int totalRows) {
-2320return 
format(generateRandomRow(random, totalRows));
+2215  if (opts.perClientRunRows == 
DEFAULT_ROWS_PER_GB) {
+2216LOG.warn("Option \"rows\" 
unspecified. Using default value " + DEFAULT_ROWS_PER_GB +
+2217". This could 

[02/43] hbase-site git commit: Published site at 993e74b7471a7fe483eae59ab198bb2533c55031.

2018-08-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.html 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.html
index e38835a..e11b644 100644
--- a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.html
+++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.html
@@ -2220,698 +2220,702 @@
 2212
 2213FilteredScanTest(Connection con, 
TestOptions options, Status status) {
 2214  super(con, options, status);
-2215}
-2216
-2217@Override
-2218boolean testRow(int i) throws 
IOException {
-2219  byte[] value = 
generateData(this.rand, getValueLength(this.rand));
-2220  Scan scan = 
constructScan(value);
-2221  ResultScanner scanner = null;
-  try {
-2223scanner = 
this.table.getScanner(scan);
-2224for (Result r = null; (r = 
scanner.next()) != null;) {
-2225  updateValueSize(r);
-2226}
-2227  } finally {
-2228if (scanner != null) {
-2229  
updateScanMetrics(scanner.getScanMetrics());
-2230  scanner.close();
-2231}
-2232  }
-2233  return true;
-2234}
-2235
-2236protected Scan constructScan(byte[] 
valuePrefix) throws IOException {
-2237  FilterList list = new 
FilterList();
-2238  Filter filter = new 
SingleColumnValueFilter(FAMILY_ZERO, COLUMN_ZERO,
-2239CompareOperator.EQUAL, new 
BinaryComparator(valuePrefix));
-2240  list.addFilter(filter);
-2241  if (opts.filterAll) {
-2242list.addFilter(new 
FilterAllFilter());
-2243  }
-2244  Scan scan = new 
Scan().setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks)
-2245  
.setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType)
-2246  
.setScanMetricsEnabled(true);
-2247  if (opts.addColumns) {
-2248for (int column = 0; column  
opts.columns; column++) {
-2249  byte [] qualifier = column == 
0? COLUMN_ZERO: Bytes.toBytes("" + column);
-2250  scan.addColumn(FAMILY_ZERO, 
qualifier);
-2251}
-2252  } else {
-2253scan.addFamily(FAMILY_ZERO);
-2254  }
-2255  scan.setFilter(list);
-2256  return scan;
-2257}
-2258  }
-2259
-2260  /**
-2261   * Compute a throughput rate in 
MB/s.
-2262   * @param rows Number of records 
consumed.
-2263   * @param timeMs Time taken in 
milliseconds.
-2264   * @return String value with label, ie 
'123.76 MB/s'
-2265   */
-2266  private static String 
calculateMbps(int rows, long timeMs, final int valueSize, int families, int 
columns) {
-2267BigDecimal rowSize = 
BigDecimal.valueOf(ROW_LENGTH +
-2268  ((valueSize + 
(FAMILY_NAME_BASE.length()+1) + COLUMN_ZERO.length) * columns) * families);
-2269BigDecimal mbps = 
BigDecimal.valueOf(rows).multiply(rowSize, CXT)
-2270  
.divide(BigDecimal.valueOf(timeMs), CXT).multiply(MS_PER_SEC, CXT)
-2271  .divide(BYTES_PER_MB, CXT);
-2272return FMT.format(mbps) + " MB/s";
-2273  }
-2274
-2275  /*
-2276   * Format passed integer.
-2277   * @param number
-2278   * @return Returns zero-prefixed 
ROW_LENGTH-byte wide decimal version of passed
-2279   * number (Does absolute in case 
number is negative).
-2280   */
-2281  public static byte [] format(final int 
number) {
-2282byte [] b = new byte[ROW_LENGTH];
-2283int d = Math.abs(number);
-2284for (int i = b.length - 1; i = 
0; i--) {
-2285  b[i] = (byte)((d % 10) + '0');
-2286  d /= 10;
-2287}
-2288return b;
-2289  }
-2290
-2291  /*
-2292   * This method takes some time and is 
done inline uploading data.  For
-2293   * example, doing the mapfile test, 
generation of the key and value
-2294   * consumes about 30% of CPU time.
-2295   * @return Generated random value to 
insert into a table cell.
-2296   */
-2297  public static byte[] 
generateData(final Random r, int length) {
-2298byte [] b = new byte [length];
-2299int i;
-2300
-2301for(i = 0; i  (length-8); i += 
8) {
-2302  b[i] = (byte) (65 + 
r.nextInt(26));
-2303  b[i+1] = b[i];
-2304  b[i+2] = b[i];
-2305  b[i+3] = b[i];
-2306  b[i+4] = b[i];
-2307  b[i+5] = b[i];
-2308  b[i+6] = b[i];
-2309  b[i+7] = b[i];
-2310}
-2311
-2312byte a = (byte) (65 + 
r.nextInt(26));
-2313for(; i  length; i++) {
-2314  b[i] = a;
-2315}
-2316return b;
-2317  }
-2318
-2319  static byte [] getRandomRow(final 
Random random, final int totalRows) {
-2320return 
format(generateRandomRow(random, totalRows));
+2215  if (opts.perClientRunRows == 
DEFAULT_ROWS_PER_GB) {
+2216LOG.warn("Option \"rows\" 
unspecified. Using default value " + DEFAULT_ROWS_PER_GB +
+2217". This could take a 
very long time.");
+2218  }
+2219}

[13/43] hbase-site git commit: Published site at 993e74b7471a7fe483eae59ab198bb2533c55031.

2018-08-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomSeekScanTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomSeekScanTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomSeekScanTest.html
index e38835a..e11b644 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomSeekScanTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomSeekScanTest.html
@@ -2220,698 +2220,702 @@
 2212
 2213FilteredScanTest(Connection con, 
TestOptions options, Status status) {
 2214  super(con, options, status);
-2215}
-2216
-2217@Override
-2218boolean testRow(int i) throws 
IOException {
-2219  byte[] value = 
generateData(this.rand, getValueLength(this.rand));
-2220  Scan scan = 
constructScan(value);
-2221  ResultScanner scanner = null;
-  try {
-2223scanner = 
this.table.getScanner(scan);
-2224for (Result r = null; (r = 
scanner.next()) != null;) {
-2225  updateValueSize(r);
-2226}
-2227  } finally {
-2228if (scanner != null) {
-2229  
updateScanMetrics(scanner.getScanMetrics());
-2230  scanner.close();
-2231}
-2232  }
-2233  return true;
-2234}
-2235
-2236protected Scan constructScan(byte[] 
valuePrefix) throws IOException {
-2237  FilterList list = new 
FilterList();
-2238  Filter filter = new 
SingleColumnValueFilter(FAMILY_ZERO, COLUMN_ZERO,
-2239CompareOperator.EQUAL, new 
BinaryComparator(valuePrefix));
-2240  list.addFilter(filter);
-2241  if (opts.filterAll) {
-2242list.addFilter(new 
FilterAllFilter());
-2243  }
-2244  Scan scan = new 
Scan().setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks)
-2245  
.setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType)
-2246  
.setScanMetricsEnabled(true);
-2247  if (opts.addColumns) {
-2248for (int column = 0; column  
opts.columns; column++) {
-2249  byte [] qualifier = column == 
0? COLUMN_ZERO: Bytes.toBytes("" + column);
-2250  scan.addColumn(FAMILY_ZERO, 
qualifier);
-2251}
-2252  } else {
-2253scan.addFamily(FAMILY_ZERO);
-2254  }
-2255  scan.setFilter(list);
-2256  return scan;
-2257}
-2258  }
-2259
-2260  /**
-2261   * Compute a throughput rate in 
MB/s.
-2262   * @param rows Number of records 
consumed.
-2263   * @param timeMs Time taken in 
milliseconds.
-2264   * @return String value with label, ie 
'123.76 MB/s'
-2265   */
-2266  private static String 
calculateMbps(int rows, long timeMs, final int valueSize, int families, int 
columns) {
-2267BigDecimal rowSize = 
BigDecimal.valueOf(ROW_LENGTH +
-2268  ((valueSize + 
(FAMILY_NAME_BASE.length()+1) + COLUMN_ZERO.length) * columns) * families);
-2269BigDecimal mbps = 
BigDecimal.valueOf(rows).multiply(rowSize, CXT)
-2270  
.divide(BigDecimal.valueOf(timeMs), CXT).multiply(MS_PER_SEC, CXT)
-2271  .divide(BYTES_PER_MB, CXT);
-2272return FMT.format(mbps) + " MB/s";
-2273  }
-2274
-2275  /*
-2276   * Format passed integer.
-2277   * @param number
-2278   * @return Returns zero-prefixed 
ROW_LENGTH-byte wide decimal version of passed
-2279   * number (Does absolute in case 
number is negative).
-2280   */
-2281  public static byte [] format(final int 
number) {
-2282byte [] b = new byte[ROW_LENGTH];
-2283int d = Math.abs(number);
-2284for (int i = b.length - 1; i = 
0; i--) {
-2285  b[i] = (byte)((d % 10) + '0');
-2286  d /= 10;
-2287}
-2288return b;
-2289  }
-2290
-2291  /*
-2292   * This method takes some time and is 
done inline uploading data.  For
-2293   * example, doing the mapfile test, 
generation of the key and value
-2294   * consumes about 30% of CPU time.
-2295   * @return Generated random value to 
insert into a table cell.
-2296   */
-2297  public static byte[] 
generateData(final Random r, int length) {
-2298byte [] b = new byte [length];
-2299int i;
-2300
-2301for(i = 0; i  (length-8); i += 
8) {
-2302  b[i] = (byte) (65 + 
r.nextInt(26));
-2303  b[i+1] = b[i];
-2304  b[i+2] = b[i];
-2305  b[i+3] = b[i];
-2306  b[i+4] = b[i];
-2307  b[i+5] = b[i];
-2308  b[i+6] = b[i];
-2309  b[i+7] = b[i];
-2310}
-2311
-2312byte a = (byte) (65 + 
r.nextInt(26));
-2313for(; i  length; i++) {
-2314  b[i] = a;
-2315}
-2316return b;
-2317  }
-2318
-2319  static byte [] getRandomRow(final 
Random random, final int totalRows) {
-2320return 
format(generateRandomRow(random, totalRows));
+2215  if (opts.perClientRunRows == 
DEFAULT_ROWS_PER_GB) {
+2216LOG.warn("Option \"rows\" 
unspecified. Using default value " + 

[05/43] hbase-site git commit: Published site at 993e74b7471a7fe483eae59ab198bb2533c55031.

2018-08-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.Test.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.Test.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.Test.html
index e38835a..e11b644 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.Test.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.Test.html
@@ -2220,698 +2220,702 @@
 2212
 2213FilteredScanTest(Connection con, 
TestOptions options, Status status) {
 2214  super(con, options, status);
-2215}
-2216
-2217@Override
-2218boolean testRow(int i) throws 
IOException {
-2219  byte[] value = 
generateData(this.rand, getValueLength(this.rand));
-2220  Scan scan = 
constructScan(value);
-2221  ResultScanner scanner = null;
-  try {
-2223scanner = 
this.table.getScanner(scan);
-2224for (Result r = null; (r = 
scanner.next()) != null;) {
-2225  updateValueSize(r);
-2226}
-2227  } finally {
-2228if (scanner != null) {
-2229  
updateScanMetrics(scanner.getScanMetrics());
-2230  scanner.close();
-2231}
-2232  }
-2233  return true;
-2234}
-2235
-2236protected Scan constructScan(byte[] 
valuePrefix) throws IOException {
-2237  FilterList list = new 
FilterList();
-2238  Filter filter = new 
SingleColumnValueFilter(FAMILY_ZERO, COLUMN_ZERO,
-2239CompareOperator.EQUAL, new 
BinaryComparator(valuePrefix));
-2240  list.addFilter(filter);
-2241  if (opts.filterAll) {
-2242list.addFilter(new 
FilterAllFilter());
-2243  }
-2244  Scan scan = new 
Scan().setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks)
-2245  
.setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType)
-2246  
.setScanMetricsEnabled(true);
-2247  if (opts.addColumns) {
-2248for (int column = 0; column  
opts.columns; column++) {
-2249  byte [] qualifier = column == 
0? COLUMN_ZERO: Bytes.toBytes("" + column);
-2250  scan.addColumn(FAMILY_ZERO, 
qualifier);
-2251}
-2252  } else {
-2253scan.addFamily(FAMILY_ZERO);
-2254  }
-2255  scan.setFilter(list);
-2256  return scan;
-2257}
-2258  }
-2259
-2260  /**
-2261   * Compute a throughput rate in 
MB/s.
-2262   * @param rows Number of records 
consumed.
-2263   * @param timeMs Time taken in 
milliseconds.
-2264   * @return String value with label, ie 
'123.76 MB/s'
-2265   */
-2266  private static String 
calculateMbps(int rows, long timeMs, final int valueSize, int families, int 
columns) {
-2267BigDecimal rowSize = 
BigDecimal.valueOf(ROW_LENGTH +
-2268  ((valueSize + 
(FAMILY_NAME_BASE.length()+1) + COLUMN_ZERO.length) * columns) * families);
-2269BigDecimal mbps = 
BigDecimal.valueOf(rows).multiply(rowSize, CXT)
-2270  
.divide(BigDecimal.valueOf(timeMs), CXT).multiply(MS_PER_SEC, CXT)
-2271  .divide(BYTES_PER_MB, CXT);
-2272return FMT.format(mbps) + " MB/s";
-2273  }
-2274
-2275  /*
-2276   * Format passed integer.
-2277   * @param number
-2278   * @return Returns zero-prefixed 
ROW_LENGTH-byte wide decimal version of passed
-2279   * number (Does absolute in case 
number is negative).
-2280   */
-2281  public static byte [] format(final int 
number) {
-2282byte [] b = new byte[ROW_LENGTH];
-2283int d = Math.abs(number);
-2284for (int i = b.length - 1; i = 
0; i--) {
-2285  b[i] = (byte)((d % 10) + '0');
-2286  d /= 10;
-2287}
-2288return b;
-2289  }
-2290
-2291  /*
-2292   * This method takes some time and is 
done inline uploading data.  For
-2293   * example, doing the mapfile test, 
generation of the key and value
-2294   * consumes about 30% of CPU time.
-2295   * @return Generated random value to 
insert into a table cell.
-2296   */
-2297  public static byte[] 
generateData(final Random r, int length) {
-2298byte [] b = new byte [length];
-2299int i;
-2300
-2301for(i = 0; i  (length-8); i += 
8) {
-2302  b[i] = (byte) (65 + 
r.nextInt(26));
-2303  b[i+1] = b[i];
-2304  b[i+2] = b[i];
-2305  b[i+3] = b[i];
-2306  b[i+4] = b[i];
-2307  b[i+5] = b[i];
-2308  b[i+6] = b[i];
-2309  b[i+7] = b[i];
-2310}
-2311
-2312byte a = (byte) (65 + 
r.nextInt(26));
-2313for(; i  length; i++) {
-2314  b[i] = a;
-2315}
-2316return b;
-2317  }
-2318
-2319  static byte [] getRandomRow(final 
Random random, final int totalRows) {
-2320return 
format(generateRandomRow(random, totalRows));
+2215  if (opts.perClientRunRows == 
DEFAULT_ROWS_PER_GB) {
+2216LOG.warn("Option \"rows\" 
unspecified. Using default value " + DEFAULT_ROWS_PER_GB +
+2217". This could take a 
very long 

[12/43] hbase-site git commit: Published site at 993e74b7471a7fe483eae59ab198bb2533c55031.

2018-08-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomWriteTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomWriteTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomWriteTest.html
index e38835a..e11b644 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomWriteTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomWriteTest.html
@@ -2220,698 +2220,702 @@
 2212
 2213FilteredScanTest(Connection con, 
TestOptions options, Status status) {
 2214  super(con, options, status);
-2215}
-2216
-2217@Override
-2218boolean testRow(int i) throws 
IOException {
-2219  byte[] value = 
generateData(this.rand, getValueLength(this.rand));
-2220  Scan scan = 
constructScan(value);
-2221  ResultScanner scanner = null;
-  try {
-2223scanner = 
this.table.getScanner(scan);
-2224for (Result r = null; (r = 
scanner.next()) != null;) {
-2225  updateValueSize(r);
-2226}
-2227  } finally {
-2228if (scanner != null) {
-2229  
updateScanMetrics(scanner.getScanMetrics());
-2230  scanner.close();
-2231}
-2232  }
-2233  return true;
-2234}
-2235
-2236protected Scan constructScan(byte[] 
valuePrefix) throws IOException {
-2237  FilterList list = new 
FilterList();
-2238  Filter filter = new 
SingleColumnValueFilter(FAMILY_ZERO, COLUMN_ZERO,
-2239CompareOperator.EQUAL, new 
BinaryComparator(valuePrefix));
-2240  list.addFilter(filter);
-2241  if (opts.filterAll) {
-2242list.addFilter(new 
FilterAllFilter());
-2243  }
-2244  Scan scan = new 
Scan().setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks)
-2245  
.setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType)
-2246  
.setScanMetricsEnabled(true);
-2247  if (opts.addColumns) {
-2248for (int column = 0; column  
opts.columns; column++) {
-2249  byte [] qualifier = column == 
0? COLUMN_ZERO: Bytes.toBytes("" + column);
-2250  scan.addColumn(FAMILY_ZERO, 
qualifier);
-2251}
-2252  } else {
-2253scan.addFamily(FAMILY_ZERO);
-2254  }
-2255  scan.setFilter(list);
-2256  return scan;
-2257}
-2258  }
-2259
-2260  /**
-2261   * Compute a throughput rate in 
MB/s.
-2262   * @param rows Number of records 
consumed.
-2263   * @param timeMs Time taken in 
milliseconds.
-2264   * @return String value with label, ie 
'123.76 MB/s'
-2265   */
-2266  private static String 
calculateMbps(int rows, long timeMs, final int valueSize, int families, int 
columns) {
-2267BigDecimal rowSize = 
BigDecimal.valueOf(ROW_LENGTH +
-2268  ((valueSize + 
(FAMILY_NAME_BASE.length()+1) + COLUMN_ZERO.length) * columns) * families);
-2269BigDecimal mbps = 
BigDecimal.valueOf(rows).multiply(rowSize, CXT)
-2270  
.divide(BigDecimal.valueOf(timeMs), CXT).multiply(MS_PER_SEC, CXT)
-2271  .divide(BYTES_PER_MB, CXT);
-2272return FMT.format(mbps) + " MB/s";
-2273  }
-2274
-2275  /*
-2276   * Format passed integer.
-2277   * @param number
-2278   * @return Returns zero-prefixed 
ROW_LENGTH-byte wide decimal version of passed
-2279   * number (Does absolute in case 
number is negative).
-2280   */
-2281  public static byte [] format(final int 
number) {
-2282byte [] b = new byte[ROW_LENGTH];
-2283int d = Math.abs(number);
-2284for (int i = b.length - 1; i = 
0; i--) {
-2285  b[i] = (byte)((d % 10) + '0');
-2286  d /= 10;
-2287}
-2288return b;
-2289  }
-2290
-2291  /*
-2292   * This method takes some time and is 
done inline uploading data.  For
-2293   * example, doing the mapfile test, 
generation of the key and value
-2294   * consumes about 30% of CPU time.
-2295   * @return Generated random value to 
insert into a table cell.
-2296   */
-2297  public static byte[] 
generateData(final Random r, int length) {
-2298byte [] b = new byte [length];
-2299int i;
-2300
-2301for(i = 0; i  (length-8); i += 
8) {
-2302  b[i] = (byte) (65 + 
r.nextInt(26));
-2303  b[i+1] = b[i];
-2304  b[i+2] = b[i];
-2305  b[i+3] = b[i];
-2306  b[i+4] = b[i];
-2307  b[i+5] = b[i];
-2308  b[i+6] = b[i];
-2309  b[i+7] = b[i];
-2310}
-2311
-2312byte a = (byte) (65 + 
r.nextInt(26));
-2313for(; i  length; i++) {
-2314  b[i] = a;
-2315}
-2316return b;
-2317  }
-2318
-2319  static byte [] getRandomRow(final 
Random random, final int totalRows) {
-2320return 
format(generateRandomRow(random, totalRows));
+2215  if (opts.perClientRunRows == 
DEFAULT_ROWS_PER_GB) {
+2216LOG.warn("Option \"rows\" 
unspecified. Using default value " + DEFAULT_ROWS_PER_GB +
+2217 

[08/43] hbase-site git commit: Published site at 993e74b7471a7fe483eae59ab198bb2533c55031.

2018-08-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialWriteTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialWriteTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialWriteTest.html
index e38835a..e11b644 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialWriteTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialWriteTest.html
@@ -2220,698 +2220,702 @@
 2212
 2213FilteredScanTest(Connection con, 
TestOptions options, Status status) {
 2214  super(con, options, status);
-2215}
-2216
-2217@Override
-2218boolean testRow(int i) throws 
IOException {
-2219  byte[] value = 
generateData(this.rand, getValueLength(this.rand));
-2220  Scan scan = 
constructScan(value);
-2221  ResultScanner scanner = null;
-  try {
-2223scanner = 
this.table.getScanner(scan);
-2224for (Result r = null; (r = 
scanner.next()) != null;) {
-2225  updateValueSize(r);
-2226}
-2227  } finally {
-2228if (scanner != null) {
-2229  
updateScanMetrics(scanner.getScanMetrics());
-2230  scanner.close();
-2231}
-2232  }
-2233  return true;
-2234}
-2235
-2236protected Scan constructScan(byte[] 
valuePrefix) throws IOException {
-2237  FilterList list = new 
FilterList();
-2238  Filter filter = new 
SingleColumnValueFilter(FAMILY_ZERO, COLUMN_ZERO,
-2239CompareOperator.EQUAL, new 
BinaryComparator(valuePrefix));
-2240  list.addFilter(filter);
-2241  if (opts.filterAll) {
-2242list.addFilter(new 
FilterAllFilter());
-2243  }
-2244  Scan scan = new 
Scan().setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks)
-2245  
.setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType)
-2246  
.setScanMetricsEnabled(true);
-2247  if (opts.addColumns) {
-2248for (int column = 0; column  
opts.columns; column++) {
-2249  byte [] qualifier = column == 
0? COLUMN_ZERO: Bytes.toBytes("" + column);
-2250  scan.addColumn(FAMILY_ZERO, 
qualifier);
-2251}
-2252  } else {
-2253scan.addFamily(FAMILY_ZERO);
-2254  }
-2255  scan.setFilter(list);
-2256  return scan;
-2257}
-2258  }
-2259
-2260  /**
-2261   * Compute a throughput rate in 
MB/s.
-2262   * @param rows Number of records 
consumed.
-2263   * @param timeMs Time taken in 
milliseconds.
-2264   * @return String value with label, ie 
'123.76 MB/s'
-2265   */
-2266  private static String 
calculateMbps(int rows, long timeMs, final int valueSize, int families, int 
columns) {
-2267BigDecimal rowSize = 
BigDecimal.valueOf(ROW_LENGTH +
-2268  ((valueSize + 
(FAMILY_NAME_BASE.length()+1) + COLUMN_ZERO.length) * columns) * families);
-2269BigDecimal mbps = 
BigDecimal.valueOf(rows).multiply(rowSize, CXT)
-2270  
.divide(BigDecimal.valueOf(timeMs), CXT).multiply(MS_PER_SEC, CXT)
-2271  .divide(BYTES_PER_MB, CXT);
-2272return FMT.format(mbps) + " MB/s";
-2273  }
-2274
-2275  /*
-2276   * Format passed integer.
-2277   * @param number
-2278   * @return Returns zero-prefixed 
ROW_LENGTH-byte wide decimal version of passed
-2279   * number (Does absolute in case 
number is negative).
-2280   */
-2281  public static byte [] format(final int 
number) {
-2282byte [] b = new byte[ROW_LENGTH];
-2283int d = Math.abs(number);
-2284for (int i = b.length - 1; i = 
0; i--) {
-2285  b[i] = (byte)((d % 10) + '0');
-2286  d /= 10;
-2287}
-2288return b;
-2289  }
-2290
-2291  /*
-2292   * This method takes some time and is 
done inline uploading data.  For
-2293   * example, doing the mapfile test, 
generation of the key and value
-2294   * consumes about 30% of CPU time.
-2295   * @return Generated random value to 
insert into a table cell.
-2296   */
-2297  public static byte[] 
generateData(final Random r, int length) {
-2298byte [] b = new byte [length];
-2299int i;
-2300
-2301for(i = 0; i  (length-8); i += 
8) {
-2302  b[i] = (byte) (65 + 
r.nextInt(26));
-2303  b[i+1] = b[i];
-2304  b[i+2] = b[i];
-2305  b[i+3] = b[i];
-2306  b[i+4] = b[i];
-2307  b[i+5] = b[i];
-2308  b[i+6] = b[i];
-2309  b[i+7] = b[i];
-2310}
-2311
-2312byte a = (byte) (65 + 
r.nextInt(26));
-2313for(; i  length; i++) {
-2314  b[i] = a;
-2315}
-2316return b;
-2317  }
-2318
-2319  static byte [] getRandomRow(final 
Random random, final int totalRows) {
-2320return 
format(generateRandomRow(random, totalRows));
+2215  if (opts.perClientRunRows == 
DEFAULT_ROWS_PER_GB) {
+2216LOG.warn("Option \"rows\" 
unspecified. Using default value " + 

[04/43] hbase-site git commit: Published site at 993e74b7471a7fe483eae59ab198bb2533c55031.

2018-08-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.TestBase.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.TestBase.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.TestBase.html
index e38835a..e11b644 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.TestBase.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.TestBase.html
@@ -2220,698 +2220,702 @@
 2212
 2213FilteredScanTest(Connection con, 
TestOptions options, Status status) {
 2214  super(con, options, status);
-2215}
-2216
-2217@Override
-2218boolean testRow(int i) throws 
IOException {
-2219  byte[] value = 
generateData(this.rand, getValueLength(this.rand));
-2220  Scan scan = 
constructScan(value);
-2221  ResultScanner scanner = null;
-  try {
-2223scanner = 
this.table.getScanner(scan);
-2224for (Result r = null; (r = 
scanner.next()) != null;) {
-2225  updateValueSize(r);
-2226}
-2227  } finally {
-2228if (scanner != null) {
-2229  
updateScanMetrics(scanner.getScanMetrics());
-2230  scanner.close();
-2231}
-2232  }
-2233  return true;
-2234}
-2235
-2236protected Scan constructScan(byte[] 
valuePrefix) throws IOException {
-2237  FilterList list = new 
FilterList();
-2238  Filter filter = new 
SingleColumnValueFilter(FAMILY_ZERO, COLUMN_ZERO,
-2239CompareOperator.EQUAL, new 
BinaryComparator(valuePrefix));
-2240  list.addFilter(filter);
-2241  if (opts.filterAll) {
-2242list.addFilter(new 
FilterAllFilter());
-2243  }
-2244  Scan scan = new 
Scan().setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks)
-2245  
.setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType)
-2246  
.setScanMetricsEnabled(true);
-2247  if (opts.addColumns) {
-2248for (int column = 0; column  
opts.columns; column++) {
-2249  byte [] qualifier = column == 
0? COLUMN_ZERO: Bytes.toBytes("" + column);
-2250  scan.addColumn(FAMILY_ZERO, 
qualifier);
-2251}
-2252  } else {
-2253scan.addFamily(FAMILY_ZERO);
-2254  }
-2255  scan.setFilter(list);
-2256  return scan;
-2257}
-2258  }
-2259
-2260  /**
-2261   * Compute a throughput rate in 
MB/s.
-2262   * @param rows Number of records 
consumed.
-2263   * @param timeMs Time taken in 
milliseconds.
-2264   * @return String value with label, ie 
'123.76 MB/s'
-2265   */
-2266  private static String 
calculateMbps(int rows, long timeMs, final int valueSize, int families, int 
columns) {
-2267BigDecimal rowSize = 
BigDecimal.valueOf(ROW_LENGTH +
-2268  ((valueSize + 
(FAMILY_NAME_BASE.length()+1) + COLUMN_ZERO.length) * columns) * families);
-2269BigDecimal mbps = 
BigDecimal.valueOf(rows).multiply(rowSize, CXT)
-2270  
.divide(BigDecimal.valueOf(timeMs), CXT).multiply(MS_PER_SEC, CXT)
-2271  .divide(BYTES_PER_MB, CXT);
-2272return FMT.format(mbps) + " MB/s";
-2273  }
-2274
-2275  /*
-2276   * Format passed integer.
-2277   * @param number
-2278   * @return Returns zero-prefixed 
ROW_LENGTH-byte wide decimal version of passed
-2279   * number (Does absolute in case 
number is negative).
-2280   */
-2281  public static byte [] format(final int 
number) {
-2282byte [] b = new byte[ROW_LENGTH];
-2283int d = Math.abs(number);
-2284for (int i = b.length - 1; i = 
0; i--) {
-2285  b[i] = (byte)((d % 10) + '0');
-2286  d /= 10;
-2287}
-2288return b;
-2289  }
-2290
-2291  /*
-2292   * This method takes some time and is 
done inline uploading data.  For
-2293   * example, doing the mapfile test, 
generation of the key and value
-2294   * consumes about 30% of CPU time.
-2295   * @return Generated random value to 
insert into a table cell.
-2296   */
-2297  public static byte[] 
generateData(final Random r, int length) {
-2298byte [] b = new byte [length];
-2299int i;
-2300
-2301for(i = 0; i  (length-8); i += 
8) {
-2302  b[i] = (byte) (65 + 
r.nextInt(26));
-2303  b[i+1] = b[i];
-2304  b[i+2] = b[i];
-2305  b[i+3] = b[i];
-2306  b[i+4] = b[i];
-2307  b[i+5] = b[i];
-2308  b[i+6] = b[i];
-2309  b[i+7] = b[i];
-2310}
-2311
-2312byte a = (byte) (65 + 
r.nextInt(26));
-2313for(; i  length; i++) {
-2314  b[i] = a;
-2315}
-2316return b;
-2317  }
-2318
-2319  static byte [] getRandomRow(final 
Random random, final int totalRows) {
-2320return 
format(generateRandomRow(random, totalRows));
+2215  if (opts.perClientRunRows == 
DEFAULT_ROWS_PER_GB) {
+2216LOG.warn("Option \"rows\" 
unspecified. Using default value " + DEFAULT_ROWS_PER_GB +
+2217". This could take 

[10/43] hbase-site git commit: Published site at 993e74b7471a7fe483eae59ab198bb2533c55031.

2018-08-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e312f773/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.ScanTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.ScanTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.ScanTest.html
index e38835a..e11b644 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.ScanTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.ScanTest.html
@@ -2220,698 +2220,702 @@
 2212
 2213FilteredScanTest(Connection con, 
TestOptions options, Status status) {
 2214  super(con, options, status);
-2215}
-2216
-2217@Override
-2218boolean testRow(int i) throws 
IOException {
-2219  byte[] value = 
generateData(this.rand, getValueLength(this.rand));
-2220  Scan scan = 
constructScan(value);
-2221  ResultScanner scanner = null;
-  try {
-2223scanner = 
this.table.getScanner(scan);
-2224for (Result r = null; (r = 
scanner.next()) != null;) {
-2225  updateValueSize(r);
-2226}
-2227  } finally {
-2228if (scanner != null) {
-2229  
updateScanMetrics(scanner.getScanMetrics());
-2230  scanner.close();
-2231}
-2232  }
-2233  return true;
-2234}
-2235
-2236protected Scan constructScan(byte[] 
valuePrefix) throws IOException {
-2237  FilterList list = new 
FilterList();
-2238  Filter filter = new 
SingleColumnValueFilter(FAMILY_ZERO, COLUMN_ZERO,
-2239CompareOperator.EQUAL, new 
BinaryComparator(valuePrefix));
-2240  list.addFilter(filter);
-2241  if (opts.filterAll) {
-2242list.addFilter(new 
FilterAllFilter());
-2243  }
-2244  Scan scan = new 
Scan().setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks)
-2245  
.setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType)
-2246  
.setScanMetricsEnabled(true);
-2247  if (opts.addColumns) {
-2248for (int column = 0; column  
opts.columns; column++) {
-2249  byte [] qualifier = column == 
0? COLUMN_ZERO: Bytes.toBytes("" + column);
-2250  scan.addColumn(FAMILY_ZERO, 
qualifier);
-2251}
-2252  } else {
-2253scan.addFamily(FAMILY_ZERO);
-2254  }
-2255  scan.setFilter(list);
-2256  return scan;
-2257}
-2258  }
-2259
-2260  /**
-2261   * Compute a throughput rate in 
MB/s.
-2262   * @param rows Number of records 
consumed.
-2263   * @param timeMs Time taken in 
milliseconds.
-2264   * @return String value with label, ie 
'123.76 MB/s'
-2265   */
-2266  private static String 
calculateMbps(int rows, long timeMs, final int valueSize, int families, int 
columns) {
-2267BigDecimal rowSize = 
BigDecimal.valueOf(ROW_LENGTH +
-2268  ((valueSize + 
(FAMILY_NAME_BASE.length()+1) + COLUMN_ZERO.length) * columns) * families);
-2269BigDecimal mbps = 
BigDecimal.valueOf(rows).multiply(rowSize, CXT)
-2270  
.divide(BigDecimal.valueOf(timeMs), CXT).multiply(MS_PER_SEC, CXT)
-2271  .divide(BYTES_PER_MB, CXT);
-2272return FMT.format(mbps) + " MB/s";
-2273  }
-2274
-2275  /*
-2276   * Format passed integer.
-2277   * @param number
-2278   * @return Returns zero-prefixed 
ROW_LENGTH-byte wide decimal version of passed
-2279   * number (Does absolute in case 
number is negative).
-2280   */
-2281  public static byte [] format(final int 
number) {
-2282byte [] b = new byte[ROW_LENGTH];
-2283int d = Math.abs(number);
-2284for (int i = b.length - 1; i = 
0; i--) {
-2285  b[i] = (byte)((d % 10) + '0');
-2286  d /= 10;
-2287}
-2288return b;
-2289  }
-2290
-2291  /*
-2292   * This method takes some time and is 
done inline uploading data.  For
-2293   * example, doing the mapfile test, 
generation of the key and value
-2294   * consumes about 30% of CPU time.
-2295   * @return Generated random value to 
insert into a table cell.
-2296   */
-2297  public static byte[] 
generateData(final Random r, int length) {
-2298byte [] b = new byte [length];
-2299int i;
-2300
-2301for(i = 0; i  (length-8); i += 
8) {
-2302  b[i] = (byte) (65 + 
r.nextInt(26));
-2303  b[i+1] = b[i];
-2304  b[i+2] = b[i];
-2305  b[i+3] = b[i];
-2306  b[i+4] = b[i];
-2307  b[i+5] = b[i];
-2308  b[i+6] = b[i];
-2309  b[i+7] = b[i];
-2310}
-2311
-2312byte a = (byte) (65 + 
r.nextInt(26));
-2313for(; i  length; i++) {
-2314  b[i] = a;
-2315}
-2316return b;
-2317  }
-2318
-2319  static byte [] getRandomRow(final 
Random random, final int totalRows) {
-2320return 
format(generateRandomRow(random, totalRows));
+2215  if (opts.perClientRunRows == 
DEFAULT_ROWS_PER_GB) {
+2216LOG.warn("Option \"rows\" 
unspecified. Using default value " + DEFAULT_ROWS_PER_GB +
+2217". This could take 

hbase git commit: HBASE-21072 Addendum do not write lock file when running TestHBaseFsckReplication

2018-08-27 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/branch-2.1 e10d48df5 -> 625be5137


HBASE-21072 Addendum do not write lock file when running 
TestHBaseFsckReplication


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/625be513
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/625be513
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/625be513

Branch: refs/heads/branch-2.1
Commit: 625be5137e36f1460a0cefdd445861b579826e8e
Parents: e10d48d
Author: zhangduo 
Authored: Mon Aug 27 21:02:54 2018 +0800
Committer: zhangduo 
Committed: Mon Aug 27 21:05:16 2018 +0800

--
 .../java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/625be513/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
index 8911982..3ae09f6 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
@@ -49,6 +49,7 @@ public class TestHBaseFsckReplication {
 
   @BeforeClass
   public static void setUp() throws Exception {
+UTIL.getConfiguration().setBoolean("hbase.write.hbck1.lock.file", false);
 UTIL.startMiniCluster(1);
   }
 



hbase git commit: HBASE-21072 Addendum do not write lock file when running TestHBaseFsckReplication

2018-08-27 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/branch-2 9c19692c3 -> 4f32883e9


HBASE-21072 Addendum do not write lock file when running 
TestHBaseFsckReplication


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4f32883e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4f32883e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4f32883e

Branch: refs/heads/branch-2
Commit: 4f32883e95c7ac3647f7f60298a27d20bcededab
Parents: 9c19692
Author: zhangduo 
Authored: Mon Aug 27 21:02:54 2018 +0800
Committer: zhangduo 
Committed: Mon Aug 27 21:04:48 2018 +0800

--
 .../java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4f32883e/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
index 8911982..3ae09f6 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
@@ -49,6 +49,7 @@ public class TestHBaseFsckReplication {
 
   @BeforeClass
   public static void setUp() throws Exception {
+UTIL.getConfiguration().setBoolean("hbase.write.hbck1.lock.file", false);
 UTIL.startMiniCluster(1);
   }
 



hbase git commit: HBASE-21072 Addendum do not write lock file when running TestHBaseFsckReplication

2018-08-27 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/master dd9880e0d -> 993e74b74


HBASE-21072 Addendum do not write lock file when running 
TestHBaseFsckReplication


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/993e74b7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/993e74b7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/993e74b7

Branch: refs/heads/master
Commit: 993e74b7471a7fe483eae59ab198bb2533c55031
Parents: dd9880e
Author: zhangduo 
Authored: Mon Aug 27 21:02:54 2018 +0800
Committer: zhangduo 
Committed: Mon Aug 27 21:03:32 2018 +0800

--
 .../java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/993e74b7/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
index f5eca39..77ed04f 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplication.java
@@ -50,6 +50,7 @@ public class TestHBaseFsckReplication {
 
   @BeforeClass
   public static void setUp() throws Exception {
+UTIL.getConfiguration().setBoolean("hbase.write.hbck1.lock.file", false);
 UTIL.startMiniCluster(1);
   }
 



hbase git commit: HBASE-20890 PE filterScan seems to be stuck forever

2018-08-27 Thread vikasv
Repository: hbase
Updated Branches:
  refs/heads/master 8e1002913 -> dd9880e0d


HBASE-20890 PE filterScan seems to be stuck forever


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/dd9880e0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/dd9880e0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/dd9880e0

Branch: refs/heads/master
Commit: dd9880e0de13fc7dac1c570f90f00bc914d1fad5
Parents: 8e10029
Author: Vikas Vishwakarma 
Authored: Mon Aug 27 13:05:29 2018 +0530
Committer: Vikas Vishwakarma 
Committed: Mon Aug 27 13:05:29 2018 +0530

--
 .../test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java | 4 
 1 file changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/dd9880e0/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
--
diff --git 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
index 1a72ece..04f5aae 100644
--- 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
+++ 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
@@ -2212,6 +2212,10 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
 
 FilteredScanTest(Connection con, TestOptions options, Status status) {
   super(con, options, status);
+  if (opts.perClientRunRows == DEFAULT_ROWS_PER_GB) {
+LOG.warn("Option \"rows\" unspecified. Using default value " + 
DEFAULT_ROWS_PER_GB +
+". This could take a very long time.");
+  }
 }
 
 @Override



hbase git commit: HBASE-20890 PE filterScan seems to be stuck forever

2018-08-27 Thread vikasv
Repository: hbase
Updated Branches:
  refs/heads/branch-2 2369afd7e -> 9c19692c3


HBASE-20890 PE filterScan seems to be stuck forever


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9c19692c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9c19692c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9c19692c

Branch: refs/heads/branch-2
Commit: 9c19692c372290568df029e45a84051525623aac
Parents: 2369afd
Author: Vikas Vishwakarma 
Authored: Mon Aug 27 13:01:52 2018 +0530
Committer: Vikas Vishwakarma 
Committed: Mon Aug 27 13:01:52 2018 +0530

--
 .../test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java | 4 
 1 file changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9c19692c/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
--
diff --git 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
index 1a72ece..04f5aae 100644
--- 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
+++ 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
@@ -2212,6 +2212,10 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
 
 FilteredScanTest(Connection con, TestOptions options, Status status) {
   super(con, options, status);
+  if (opts.perClientRunRows == DEFAULT_ROWS_PER_GB) {
+LOG.warn("Option \"rows\" unspecified. Using default value " + 
DEFAULT_ROWS_PER_GB +
+". This could take a very long time.");
+  }
 }
 
 @Override



hbase git commit: HBASE-20890 PE filterScan seems to be stuck forever

2018-08-27 Thread vikasv
Repository: hbase
Updated Branches:
  refs/heads/branch-1 3258b83f3 -> 3c17c8391


HBASE-20890 PE filterScan seems to be stuck forever


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3c17c839
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3c17c839
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3c17c839

Branch: refs/heads/branch-1
Commit: 3c17c8391e189246409de3f5f8b98e47750e0358
Parents: 3258b83
Author: Vikas Vishwakarma 
Authored: Mon Aug 27 12:51:55 2018 +0530
Committer: Vikas Vishwakarma 
Committed: Mon Aug 27 12:51:55 2018 +0530

--
 .../test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java | 4 
 1 file changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3c17c839/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
index 8de5e32..4455a0b 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
@@ -1790,6 +1790,10 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
 
 FilteredScanTest(Connection con, TestOptions options, Status status) {
   super(con, options, status);
+  if (opts.perClientRunRows == DEFAULT_ROWS_PER_GB) {
+LOG.warn("Option \"rows\" unspecified. Using default value " + 
DEFAULT_ROWS_PER_GB
++ ". This could take a very long time.");
+  }
 }
 
 @Override



hbase git commit: HBASE-20890 PE filterScan seems to be stuck forever

2018-08-27 Thread vikasv
Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 d965436ee -> 525a6f9f8


HBASE-20890 PE filterScan seems to be stuck forever


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/525a6f9f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/525a6f9f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/525a6f9f

Branch: refs/heads/branch-1.4
Commit: 525a6f9f80b3f6e544026f78bc7fdf579584e2c3
Parents: d965436
Author: Vikas Vishwakarma 
Authored: Mon Aug 27 12:44:55 2018 +0530
Committer: Vikas Vishwakarma 
Committed: Mon Aug 27 12:44:55 2018 +0530

--
 .../test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java | 4 
 1 file changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/525a6f9f/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
index 8de5e32..4455a0b 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
@@ -1790,6 +1790,10 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
 
 FilteredScanTest(Connection con, TestOptions options, Status status) {
   super(con, options, status);
+  if (opts.perClientRunRows == DEFAULT_ROWS_PER_GB) {
+LOG.warn("Option \"rows\" unspecified. Using default value " + 
DEFAULT_ROWS_PER_GB
++ ". This could take a very long time.");
+  }
 }
 
 @Override



hbase git commit: HBASE-20890 PE filterScan seems to be stuck forever

2018-08-27 Thread vikasv
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 424706ab4 -> 2750a574a


HBASE-20890 PE filterScan seems to be stuck forever


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2750a574
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2750a574
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2750a574

Branch: refs/heads/branch-1.3
Commit: 2750a574a08d635ab71bfe21750f055a66831aab
Parents: 424706a
Author: Vikas Vishwakarma 
Authored: Mon Aug 27 12:39:17 2018 +0530
Committer: Vikas Vishwakarma 
Committed: Mon Aug 27 12:39:17 2018 +0530

--
 .../test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java | 4 
 1 file changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2750a574/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
index dbc13e6..66a2dce 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
@@ -1759,6 +1759,10 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
 
 FilteredScanTest(Connection con, TestOptions options, Status status) {
   super(con, options, status);
+  if (opts.perClientRunRows == DEFAULT_ROWS_PER_GB) {
+LOG.warn("Option \"rows\" unspecified. Using default value " + 
DEFAULT_ROWS_PER_GB
++ ". This could take a very long time.");
+  }
 }
 
 @Override