hbase git commit: MoveRegionProcedure was not passing its Region to super class. NPEs when locking.

2017-05-11 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/HBASE-14614 a23fcc97d -> d64de305f


MoveRegionProcedure was not passing its Region to super class. NPEs when
locking.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d64de305
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d64de305
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d64de305

Branch: refs/heads/HBASE-14614
Commit: d64de305fd162c2b502a6d5bdc477f51e32f1e6c
Parents: a23fcc9
Author: Michael Stack 
Authored: Thu May 11 20:22:33 2017 -0700
Committer: Michael Stack 
Committed: Thu May 11 20:22:33 2017 -0700

--
 .../org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java| 2 +-
 .../apache/hadoop/hbase/master/assignment/AssignmentManager.java | 4 +---
 .../hadoop/hbase/master/assignment/MoveRegionProcedure.java  | 3 ++-
 .../master/procedure/AbstractStateMachineTableProcedure.java | 3 +++
 4 files changed, 7 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d64de305/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index f065a98..fa3df04 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -1669,7 +1669,7 @@ public class ProcedureExecutor {
   }
 }
   } catch (Throwable t) {
-LOG.warn("Worker terminating because", t);
+LOG.warn("Worker terminating UNNATURALLY " + this.activeProcedure, t);
   } finally {
 LOG.debug("Worker terminated.");
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/d64de305/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
index e567d2d..eceb624 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
@@ -720,9 +720,7 @@ public class AssignmentManager implements ServerListener {
   }
 
   public MoveRegionProcedure createMoveRegionProcedure(final RegionPlan plan) {
-MoveRegionProcedure proc = new MoveRegionProcedure(plan);
-proc.setOwner(getProcedureEnvironment().getRequestUser().getShortName());
-return proc;
+return new MoveRegionProcedure(getProcedureEnvironment(), plan);
   }
 
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/d64de305/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.java
index 6cc04e4..f998af8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.java
@@ -52,7 +52,8 @@ public class MoveRegionProcedure extends 
AbstractStateMachineRegionProcedurehttp://git-wip-us.apache.org/repos/asf/hbase/blob/d64de305/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java
index eca963d..1417159 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java
@@ -50,6 +50,9 @@ public abstract class 
AbstractStateMachineTableProcedure
 this(env, null);
   }
 
+  /**
+   * @param env Uses this to set Procedure Owner at least.
+   */
   protected AbstractStateMachineTableProcedure(final MasterProcedureEnv env,
   final ProcedurePrepareLatch latch) {
 if (env != 

hbase git commit: HBASE-11013: Clone Snapshots on Secure Cluster Should provide option to apply Retained User Permissions - revert, pending work in snapshot descriptor

2017-05-11 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/master b3dcfb659 -> 5e046151d


HBASE-11013: Clone Snapshots on Secure Cluster Should provide option to apply 
Retained User Permissions - revert, pending work in snapshot descriptor


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5e046151
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5e046151
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5e046151

Branch: refs/heads/master
Commit: 5e046151d6280e035f4448c72f9a180d59a336e2
Parents: b3dcfb6
Author: tedyu 
Authored: Thu May 11 18:53:14 2017 -0700
Committer: tedyu 
Committed: Thu May 11 18:53:14 2017 -0700

--
 .../org/apache/hadoop/hbase/client/Admin.java   |  13 -
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |  25 +-
 .../hbase/security/access/TablePermission.java  |   4 -
 .../shaded/protobuf/generated/MasterProtos.java | 597 ---
 .../src/main/protobuf/Master.proto  |   1 -
 .../org/apache/hadoop/hbase/master/HMaster.java |   5 +-
 .../hadoop/hbase/master/MasterRpcServices.java  |   4 +-
 .../procedure/CloneSnapshotProcedure.java   |  21 +-
 .../hbase/master/snapshot/SnapshotManager.java  |  17 +-
 .../master/snapshot/TakeSnapshotHandler.java|   3 -
 .../security/access/AccessControlLists.java |   2 +-
 .../snapshot/SnapshotDescriptionUtils.java  |  97 +--
 .../hbase/client/TestSnapshotWithAcl.java   | 203 ---
 .../hbase/security/access/SecureTestUtil.java   |   2 +-
 hbase-shell/src/main/ruby/hbase/admin.rb|   4 +-
 hbase-shell/src/main/ruby/hbase_constants.rb|   1 -
 .../main/ruby/shell/commands/clone_snapshot.rb  |  11 +-
 .../hbase/client/TestReplicationShell.java  |   1 +
 18 files changed, 284 insertions(+), 727 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5e046151/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index 2efc9cb..6e7c566 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -1535,19 +1535,6 @@ public interface Admin extends Abortable, Closeable {
 
   /**
* Create a new table by cloning the snapshot content.
-   * @param snapshotName name of the snapshot to be cloned
-   * @param tableName name of the table where the snapshot will be restored
-   * @param restoreAcl true to clone acl into newly created table
-   * @throws IOException if a remote or network exception occurs
-   * @throws TableExistsException if table to be created already exists
-   * @throws RestoreSnapshotException if snapshot failed to be cloned
-   * @throws IllegalArgumentException if the specified table has not a valid 
name
-   */
-  void cloneSnapshot(final String snapshotName, final TableName tableName, 
final boolean restoreAcl)
-  throws IOException, TableExistsException, RestoreSnapshotException;
-
-  /**
-   * Create a new table by cloning the snapshot content.
*
* @param snapshotName name of the snapshot to be cloned
* @param tableName name of the table where the snapshot will be restored

http://git-wip-us.apache.org/repos/asf/hbase/blob/5e046151/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index 9192b10..ca5f0d2 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -2573,7 +2573,7 @@ public class HBaseAdmin implements Admin {
 try {
   // Restore snapshot
   get(
-internalRestoreSnapshotAsync(snapshotName, tableName, false),
+internalRestoreSnapshotAsync(snapshotName, tableName),
 syncWaitTimeout,
 TimeUnit.MILLISECONDS);
 } catch (IOException e) {
@@ -2582,7 +2582,7 @@ public class HBaseAdmin implements Admin {
   if (takeFailSafeSnapshot) {
 try {
   get(
-internalRestoreSnapshotAsync(failSafeSnapshotSnapshotName, 
tableName, false),
+internalRestoreSnapshotAsync(failSafeSnapshotSnapshotName, 
tableName),
 syncWaitTimeout,
 TimeUnit.MILLISECONDS);
   String msg = "Restore snapshot=" + snapshotName +
@@ -2625,7 +2625,7 @@ public class HBaseAdmin implements Admin {
   throw new 

[1/2] hbase git commit: HBASE-17928 Shell tool to clear compaction queues (Guangxu Cheng)

2017-05-11 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/master d8d4ba7c5 -> b3dcfb659


http://git-wip-us.apache.org/repos/asf/hbase/blob/b3dcfb65/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java
index 9fbf4db..140bdbe 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java
@@ -171,4 +171,20 @@ public interface RegionServerObserver extends Coprocessor {
   default void postReplicateLogEntries(
   final ObserverContext ctx,
   List entries, CellScanner cells) throws IOException {}
+
+  /**
+   * This will be called before clearing compaction queues
+   * @param ctx the environment to interact with the framework and region 
server.
+   */
+  default void preClearCompactionQueues(
+  final ObserverContext ctx)
+  throws IOException {}
+
+  /**
+   * This will be called after clearing compaction queues
+   * @param ctx the environment to interact with the framework and region 
server.
+   */
+  default void postClearCompactionQueues(
+  final ObserverContext ctx)
+  throws IOException {}
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b3dcfb65/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
index 5356ee1..a74c4cb 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
@@ -672,4 +672,12 @@ public class CompactSplitThread implements 
CompactionRequestor, PropagatingConfi
   void shutdownLongCompactions(){
 this.longCompactions.shutdown();
   }
+
+  public void clearLongCompactionsQueue() {
+longCompactions.getQueue().clear();
+  }
+
+  public void clearShortCompactionsQueue() {
+shortCompactions.getQueue().clear();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b3dcfb65/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 9f1ef0b..95408b7 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -31,6 +31,7 @@ import java.util.*;
 import java.util.Map.Entry;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.atomic.LongAdder;
 
@@ -110,6 +111,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
 import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
@@ -268,6 +271,8 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
*/
   private final long minimumScanTimeLimitDelta;
 
+  final AtomicBoolean clearCompactionQueues = new AtomicBoolean(false);
+
   /**
* An Rpc callback for closing a RegionScanner.
*/
@@ -1611,6 +1616,44 @@ public class RSRpcServices implements 
HBaseRPCErrorHandler,
 return builder.build();
   }
 
+  @Override
+  @QosPriority(priority=HConstants.ADMIN_QOS)
+  public ClearCompactionQueuesResponse clearCompactionQueues(RpcController 
controller,
+ClearCompactionQueuesRequest request) throws ServiceException {
+LOG.debug("Client=" + RpcServer.getRequestUserName() + "/" + 
RpcServer.getRemoteAddress()
++ " clear compactions queue");
+ClearCompactionQueuesResponse.Builder 

[2/2] hbase git commit: HBASE-17928 Shell tool to clear compaction queues (Guangxu Cheng)

2017-05-11 Thread tedyu
HBASE-17928 Shell tool to clear compaction queues (Guangxu Cheng)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b3dcfb65
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b3dcfb65
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b3dcfb65

Branch: refs/heads/master
Commit: b3dcfb659e0f96ff1254e13949bbedb56465e700
Parents: d8d4ba7
Author: tedyu 
Authored: Thu May 11 18:47:12 2017 -0700
Committer: tedyu 
Committed: Thu May 11 18:47:12 2017 -0700

--
 .../org/apache/hadoop/hbase/client/Admin.java   |   11 +
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |   22 +
 .../hbase/shaded/protobuf/RequestConverter.java |   10 +
 .../shaded/protobuf/generated/AdminProtos.java  | 1356 +++---
 .../src/main/protobuf/Admin.proto   |   10 +
 .../hbase/coprocessor/RegionServerObserver.java |   16 +
 .../hbase/regionserver/CompactSplitThread.java  |8 +
 .../hbase/regionserver/RSRpcServices.java   |   43 +
 .../RegionServerCoprocessorHost.java|   20 +
 .../hbase/security/access/AccessController.java |6 +
 .../hadoop/hbase/master/MockRegionServer.java   |8 +
 hbase-shell/src/main/ruby/hbase/admin.rb|   26 +
 hbase-shell/src/main/ruby/shell.rb  |1 +
 .../shell/commands/clear_compaction_queues.rb   |   41 +
 14 files changed, 1418 insertions(+), 160 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b3dcfb65/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index 414c5ac..2efc9cb 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -24,6 +24,7 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 import java.util.concurrent.Future;
 import java.util.regex.Pattern;
 
@@ -2026,4 +2027,14 @@ public interface Admin extends Abortable, Closeable {
* @throws IOException if a remote or network exception occurs
*/
   void disableTableReplication(final TableName tableName) throws IOException;
+
+  /**
+   * Clear compacting queues on a regionserver.
+   * @param sn the region server name
+   * @param queues the set of queue name
+   * @throws IOException if a remote or network exception occurs
+   * @throws InterruptedException
+   */
+  void clearCompactionQueues(final ServerName sn, final Set queues)
+throws IOException, InterruptedException;
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b3dcfb65/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index ac5c239..9192b10 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -92,6 +92,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
@@ -4227,4 +4228,25 @@ public class HBaseAdmin implements Admin {
 
 return otherConf;
   }
+
+  @Override
+  public void clearCompactionQueues(final ServerName sn, final Set 
queues)
+throws IOException, InterruptedException {
+if (queues == null || queues.size() == 0) {
+  throw new IllegalArgumentException("queues cannot be null or empty");
+}
+final AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
+Callable callable = new Callable() {
+  @Override
+  public Void call() throws Exception {
+// TODO: There is no timeout on this controller. Set one!
+HBaseRpcController controller = rpcControllerFactory.newController();
+ClearCompactionQueuesRequest request =
+

[2/2] hbase git commit: HBASE-17928 Shell tool to clear compaction queues - revert pending work in snapshot descriptor

2017-05-11 Thread tedyu
HBASE-17928 Shell tool to clear compaction queues - revert pending work in 
snapshot descriptor


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d8d4ba7c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d8d4ba7c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d8d4ba7c

Branch: refs/heads/master
Commit: d8d4ba7c59e123dd792c48d2845498e0882a3a76
Parents: d64acfd
Author: tedyu 
Authored: Thu May 11 18:43:59 2017 -0700
Committer: tedyu 
Committed: Thu May 11 18:43:59 2017 -0700

--
 .../org/apache/hadoop/hbase/client/Admin.java   |   11 -
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |   22 -
 .../hbase/shaded/protobuf/RequestConverter.java |   10 -
 .../shaded/protobuf/generated/AdminProtos.java  | 1284 ++
 .../src/main/protobuf/Admin.proto   |   10 -
 .../hbase/coprocessor/RegionServerObserver.java |   16 -
 .../hbase/regionserver/CompactSplitThread.java  |8 -
 .../hbase/regionserver/RSRpcServices.java   |   43 -
 .../RegionServerCoprocessorHost.java|   20 -
 .../hbase/security/access/AccessController.java |6 -
 .../hadoop/hbase/master/MockRegionServer.java   |8 -
 hbase-shell/src/main/ruby/hbase/admin.rb|   26 -
 hbase-shell/src/main/ruby/shell.rb  |1 -
 .../shell/commands/clear_compaction_queues.rb   |   41 -
 14 files changed, 124 insertions(+), 1382 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d8d4ba7c/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index 2efc9cb..414c5ac 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -24,7 +24,6 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
 import java.util.Map;
-import java.util.Set;
 import java.util.concurrent.Future;
 import java.util.regex.Pattern;
 
@@ -2027,14 +2026,4 @@ public interface Admin extends Abortable, Closeable {
* @throws IOException if a remote or network exception occurs
*/
   void disableTableReplication(final TableName tableName) throws IOException;
-
-  /**
-   * Clear compacting queues on a regionserver.
-   * @param sn the region server name
-   * @param queues the set of queue name
-   * @throws IOException if a remote or network exception occurs
-   * @throws InterruptedException
-   */
-  void clearCompactionQueues(final ServerName sn, final Set queues)
-throws IOException, InterruptedException;
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/d8d4ba7c/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index 9192b10..ac5c239 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -92,7 +92,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
@@ -4228,25 +4227,4 @@ public class HBaseAdmin implements Admin {
 
 return otherConf;
   }
-
-  @Override
-  public void clearCompactionQueues(final ServerName sn, final Set 
queues)
-throws IOException, InterruptedException {
-if (queues == null || queues.size() == 0) {
-  throw new IllegalArgumentException("queues cannot be null or empty");
-}
-final AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
-Callable callable = new Callable() {
-  @Override
-  public Void call() throws Exception {
-// TODO: There is no timeout on this controller. Set one!
-HBaseRpcController controller = rpcControllerFactory.newController();
-ClearCompactionQueuesRequest request =
-

[1/2] hbase git commit: HBASE-17928 Shell tool to clear compaction queues - revert pending work in snapshot descriptor

2017-05-11 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/master d64acfd30 -> d8d4ba7c5


http://git-wip-us.apache.org/repos/asf/hbase/blob/d8d4ba7c/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 95408b7..9f1ef0b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -31,7 +31,6 @@ import java.util.*;
 import java.util.Map.Entry;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.atomic.LongAdder;
 
@@ -111,8 +110,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
 import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
@@ -271,8 +268,6 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
*/
   private final long minimumScanTimeLimitDelta;
 
-  final AtomicBoolean clearCompactionQueues = new AtomicBoolean(false);
-
   /**
* An Rpc callback for closing a RegionScanner.
*/
@@ -1616,44 +1611,6 @@ public class RSRpcServices implements 
HBaseRPCErrorHandler,
 return builder.build();
   }
 
-  @Override
-  @QosPriority(priority=HConstants.ADMIN_QOS)
-  public ClearCompactionQueuesResponse clearCompactionQueues(RpcController 
controller,
-ClearCompactionQueuesRequest request) throws ServiceException {
-LOG.debug("Client=" + RpcServer.getRequestUserName() + "/" + 
RpcServer.getRemoteAddress()
-+ " clear compactions queue");
-ClearCompactionQueuesResponse.Builder respBuilder = 
ClearCompactionQueuesResponse.newBuilder();
-requestCount.increment();
-if (clearCompactionQueues.compareAndSet(false,true)) {
-  try {
-checkOpen();
-
regionServer.getRegionServerCoprocessorHost().preClearCompactionQueues();
-for (String queueName : request.getQueueNameList()) {
-  LOG.debug("clear " + queueName + " compaction queue");
-  switch (queueName) {
-case "long":
-  regionServer.compactSplitThread.clearLongCompactionsQueue();
-  break;
-case "short":
-  regionServer.compactSplitThread.clearShortCompactionsQueue();
-  break;
-default:
-  LOG.warn("Unknown queue name " + queueName);
-  throw new IOException("Unknown queue name " + queueName);
-  }
-}
-
regionServer.getRegionServerCoprocessorHost().postClearCompactionQueues();
-  } catch (IOException ie) {
-throw new ServiceException(ie);
-  } finally {
-clearCompactionQueues.set(false);
-  }
-} else {
-  LOG.warn("Clear compactions queue is executing by other admin.");
-}
-return respBuilder.build();
-  }
-
   /**
* Get some information of the region server.
*

http://git-wip-us.apache.org/repos/asf/hbase/blob/d8d4ba7c/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.java
index 9d68d1b..7732827 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.java
@@ -218,26 +218,6 @@ public class RegionServerCoprocessorHost extends
 });
   }
 
-  public void preClearCompactionQueues() throws IOException {
-execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
-  @Override
-  public void call(RegionServerObserver oserver,
-   ObserverContext 
ctx) throws IOException {
-oserver.preClearCompactionQueues(ctx);
-  }
-

[4/4] hbase git commit: Fix CatalogTracker. Make it use Procedures doing clean up of Region data on split/merge. Without these changes, ITBLL was failing at larger scale (3-4hours 5B rows) because we

2017-05-11 Thread stack
Fix CatalogTracker. Make it use Procedures doing clean up of Region
data on split/merge. Without these changes, ITBLL was failing at
larger scale (3-4hours 5B rows) because we were splitting split
Regions.

Added a bunch of doc. on Procedure primitives.

Added new region-based state machine base class. Moved region-based
state machines on to it.

Found bugs in the way procedure locking was doing in a few of the
region-based Procedures. Having them all have same subclass helps here.

Added isSplittable and isMergeable to the Region Interface.

Master would split/merge even though the Regions still had
references. Fixed it so Master asks RegionServer if Region
is splittable.

Messing more w/ logging. Made all procedures log the same and report
the state the same; helps when logging is regular.

Rewrote TestCatalogTracker. Enabled TestMergeTableRegionProcedure.

Added more functionality to MockMasterServices so can use it doing
standalone testing of Procedures (made TestCatalogTracker use it
instead of its own version).


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a23fcc97
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a23fcc97
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a23fcc97

Branch: refs/heads/HBASE-14614
Commit: a23fcc97dea57d58bfc3fc0ea97d8e9adaf8be8a
Parents: 9464f46
Author: Michael Stack 
Authored: Thu May 11 16:59:27 2017 -0700
Committer: Michael Stack 
Committed: Thu May 11 17:14:02 2017 -0700

--
 .../org/apache/hadoop/hbase/HRegionInfo.java|4 +
 .../apache/hadoop/hbase/MetaTableAccessor.java  |7 +-
 .../hadoop/hbase/procedure2/Procedure.java  |  161 +-
 .../hbase/procedure2/ProcedureExecutor.java |   36 +-
 .../hbase/procedure2/StateMachineProcedure.java |6 +-
 .../shaded/protobuf/generated/AdminProtos.java  |  483 +++-
 .../generated/MasterProcedureProtos.java| 2318 --
 .../shaded/protobuf/generated/MasterProtos.java |   32 +
 .../src/main/protobuf/Admin.proto   |4 +
 .../src/main/protobuf/Master.proto  |1 +
 .../src/main/protobuf/MasterProcedure.proto |   22 +
 .../hadoop/hbase/backup/HFileArchiver.java  |   15 +-
 .../hadoop/hbase/master/CatalogJanitor.java |   79 +-
 .../hadoop/hbase/master/TableStateManager.java  |3 +-
 .../master/assignment/AssignProcedure.java  |   61 +-
 .../assignment/GCMergedRegionsProcedure.java|  170 ++
 .../master/assignment/GCRegionProcedure.java|  154 ++
 .../assignment/MergeTableRegionsProcedure.java  |  131 +-
 .../master/assignment/MoveRegionProcedure.java  |   22 +-
 .../master/assignment/RegionStateStore.java |8 +-
 .../hbase/master/assignment/RegionStates.java   |   12 +-
 .../assignment/RegionTransitionProcedure.java   |   21 +-
 .../assignment/SplitTableRegionProcedure.java   |  125 +-
 .../master/assignment/UnassignProcedure.java|   23 +-
 .../hadoop/hbase/master/assignment/Util.java|   60 +
 .../hbase/master/balancer/BaseLoadBalancer.java |2 -
 .../AbstractStateMachineRegionProcedure.java|  118 +
 .../AbstractStateMachineTableProcedure.java |   11 +-
 .../DispatchMergingRegionsProcedure.java|2 +-
 .../procedure/MasterProcedureScheduler.java |   10 +-
 .../master/procedure/ServerCrashProcedure.java  |8 +-
 .../procedure/TableProcedureInterface.java  |3 +-
 .../hadoop/hbase/regionserver/HRegion.java  |6 +-
 .../hbase/regionserver/HRegionFileSystem.java   |3 +-
 .../hbase/regionserver/RSRpcServices.java   |2 +
 .../hadoop/hbase/regionserver/Region.java   |8 +
 .../hadoop/hbase/HBaseTestingUtility.java   |2 +-
 .../hbase/client/TestAsyncRegionAdminApi.java   |3 +-
 .../hadoop/hbase/master/TestCatalogJanitor.java |  596 ++---
 .../master/assignment/MockMasterServices.java   |  184 +-
 .../TestMergeTableRegionsProcedure.java |   44 +-
 .../TestSplitTableRegionProcedure.java  |   20 +-
 ...ProcedureSchedulerPerformanceEvaluation.java |2 +-
 .../procedure/TestMasterProcedureScheduler.java |   20 +-
 44 files changed, 3839 insertions(+), 1163 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a23fcc97/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
index 5b9cbec..d470ffa 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
@@ -168,6 +168,10 @@ public class HRegionInfo implements 
Comparable {
 return 

[2/4] hbase git commit: Fix CatalogTracker. Make it use Procedures doing clean up of Region data on split/merge. Without these changes, ITBLL was failing at larger scale (3-4hours 5B rows) because we

2017-05-11 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/a23fcc97/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
index 64732a7..4e3e784 100644
--- 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
@@ -39686,10 +39686,18 @@ public final class MasterProtos {
   org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
 
 /**
+ * 
+ * This is how many archiving tasks we started as a result of this scan.
+ * 
+ *
  * optional int32 scan_result = 1;
  */
 boolean hasScanResult();
 /**
+ * 
+ * This is how many archiving tasks we started as a result of this scan.
+ * 
+ *
  * optional int32 scan_result = 1;
  */
 int getScanResult();
@@ -39770,12 +39778,20 @@ public final class MasterProtos {
 public static final int SCAN_RESULT_FIELD_NUMBER = 1;
 private int scanResult_;
 /**
+ * 
+ * This is how many archiving tasks we started as a result of this scan.
+ * 
+ *
  * optional int32 scan_result = 1;
  */
 public boolean hasScanResult() {
   return ((bitField0_ & 0x0001) == 0x0001);
 }
 /**
+ * 
+ * This is how many archiving tasks we started as a result of this scan.
+ * 
+ *
  * optional int32 scan_result = 1;
  */
 public int getScanResult() {
@@ -40069,18 +40085,30 @@ public final class MasterProtos {
 
   private int scanResult_ ;
   /**
+   * 
+   * This is how many archiving tasks we started as a result of this scan.
+   * 
+   *
* optional int32 scan_result = 1;
*/
   public boolean hasScanResult() {
 return ((bitField0_ & 0x0001) == 0x0001);
   }
   /**
+   * 
+   * This is how many archiving tasks we started as a result of this scan.
+   * 
+   *
* optional int32 scan_result = 1;
*/
   public int getScanResult() {
 return scanResult_;
   }
   /**
+   * 
+   * This is how many archiving tasks we started as a result of this scan.
+   * 
+   *
* optional int32 scan_result = 1;
*/
   public Builder setScanResult(int value) {
@@ -40090,6 +40118,10 @@ public final class MasterProtos {
 return this;
   }
   /**
+   * 
+   * This is how many archiving tasks we started as a result of this scan.
+   * 
+   *
* optional int32 scan_result = 1;
*/
   public Builder clearScanResult() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/a23fcc97/hbase-protocol-shaded/src/main/protobuf/Admin.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/Admin.proto 
b/hbase-protocol-shaded/src/main/protobuf/Admin.proto
index 6e851e6..2e64684 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Admin.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Admin.proto
@@ -38,6 +38,10 @@ message GetRegionInfoResponse {
   required RegionInfo region_info = 1;
   optional CompactionState compaction_state = 2;
   optional bool isRecovering = 3;
+  // True if region is splittable, false otherwise.
+  optional bool splittable = 4;
+  // True if region is mergeable, false otherwise.
+  optional bool mergeable = 5;
 
   enum CompactionState {
 NONE = 0;

http://git-wip-us.apache.org/repos/asf/hbase/blob/a23fcc97/hbase-protocol-shaded/src/main/protobuf/Master.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/Master.proto 
b/hbase-protocol-shaded/src/main/protobuf/Master.proto
index 02b0d2c..889f548 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Master.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Master.proto
@@ -365,6 +365,7 @@ message RunCatalogScanRequest {
 }
 
 message RunCatalogScanResponse {
+  // This is how many archiving tasks we started as a result of this scan.
   optional int32 scan_result = 1;
 }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a23fcc97/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto 
b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
index 6b7206f..f0668d8 100644
--- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
+++ 

[3/4] hbase git commit: Fix CatalogTracker. Make it use Procedures doing clean up of Region data on split/merge. Without these changes, ITBLL was failing at larger scale (3-4hours 5B rows) because we

2017-05-11 Thread stack
http://git-wip-us.apache.org/repos/asf/hbase/blob/a23fcc97/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProcedureProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProcedureProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProcedureProtos.java
index a5e2eaa..88677e4 100644
--- 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProcedureProtos.java
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProcedureProtos.java
@@ -2515,6 +2515,204 @@ public final class MasterProcedureProtos {
 // @@protoc_insertion_point(enum_scope:hbase.pb.MoveRegionState)
   }
 
+  /**
+   * Protobuf enum {@code hbase.pb.GCRegionState}
+   */
+  public enum GCRegionState
+  implements 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ProtocolMessageEnum {
+/**
+ * GC_REGION_PREPARE = 1;
+ */
+GC_REGION_PREPARE(1),
+/**
+ * GC_REGION_ARCHIVE = 2;
+ */
+GC_REGION_ARCHIVE(2),
+/**
+ * GC_REGION_PURGE_METADATA = 3;
+ */
+GC_REGION_PURGE_METADATA(3),
+;
+
+/**
+ * GC_REGION_PREPARE = 1;
+ */
+public static final int GC_REGION_PREPARE_VALUE = 1;
+/**
+ * GC_REGION_ARCHIVE = 2;
+ */
+public static final int GC_REGION_ARCHIVE_VALUE = 2;
+/**
+ * GC_REGION_PURGE_METADATA = 3;
+ */
+public static final int GC_REGION_PURGE_METADATA_VALUE = 3;
+
+
+public final int getNumber() {
+  return value;
+}
+
+/**
+ * @deprecated Use {@link #forNumber(int)} instead.
+ */
+@java.lang.Deprecated
+public static GCRegionState valueOf(int value) {
+  return forNumber(value);
+}
+
+public static GCRegionState forNumber(int value) {
+  switch (value) {
+case 1: return GC_REGION_PREPARE;
+case 2: return GC_REGION_ARCHIVE;
+case 3: return GC_REGION_PURGE_METADATA;
+default: return null;
+  }
+}
+
+public static 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap
+internalGetValueMap() {
+  return internalValueMap;
+}
+private static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap<
+GCRegionState> internalValueMap =
+  new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap()
 {
+public GCRegionState findValueByNumber(int number) {
+  return GCRegionState.forNumber(number);
+}
+  };
+
+public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumValueDescriptor
+getValueDescriptor() {
+  return getDescriptor().getValues().get(ordinal());
+}
+public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor
+getDescriptorForType() {
+  return getDescriptor();
+}
+public static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(20);
+}
+
+private static final GCRegionState[] VALUES = values();
+
+public static GCRegionState valueOf(
+
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumValueDescriptor
 desc) {
+  if (desc.getType() != getDescriptor()) {
+throw new java.lang.IllegalArgumentException(
+  "EnumValueDescriptor is not for this type.");
+  }
+  return VALUES[desc.getIndex()];
+}
+
+private final int value;
+
+private GCRegionState(int value) {
+  this.value = value;
+}
+
+// @@protoc_insertion_point(enum_scope:hbase.pb.GCRegionState)
+  }
+
+  /**
+   * Protobuf enum {@code hbase.pb.GCMergedRegionsState}
+   */
+  public enum GCMergedRegionsState
+  implements 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ProtocolMessageEnum {
+/**
+ * GC_MERGED_REGIONS_PREPARE = 1;
+ */
+GC_MERGED_REGIONS_PREPARE(1),
+/**
+ * GC_MERGED_REGIONS_PURGE = 2;
+ */
+GC_MERGED_REGIONS_PURGE(2),
+/**
+ * GC_REGION_EDIT_METADATA = 3;
+ */
+GC_REGION_EDIT_METADATA(3),
+;
+
+/**
+ * GC_MERGED_REGIONS_PREPARE = 1;
+ */
+public static final int GC_MERGED_REGIONS_PREPARE_VALUE = 1;
+/**
+ * GC_MERGED_REGIONS_PURGE = 2;
+ */
+public static final int GC_MERGED_REGIONS_PURGE_VALUE = 2;
+/**
+ * GC_REGION_EDIT_METADATA = 3;
+ */
+public static final int GC_REGION_EDIT_METADATA_VALUE = 3;
+
+
+public final int getNumber() {
+  return value;
+}
+
+/**
+ * @deprecated Use {@link #forNumber(int)} instead.
+ */
+

[1/4] hbase git commit: Fix CatalogTracker. Make it use Procedures doing clean up of Region data on split/merge. Without these changes, ITBLL was failing at larger scale (3-4hours 5B rows) because we

2017-05-11 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/HBASE-14614 9464f461b -> a23fcc97d


http://git-wip-us.apache.org/repos/asf/hbase/blob/a23fcc97/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java
index 9f23848..eca963d 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java
@@ -29,7 +29,7 @@ import org.apache.hadoop.hbase.security.User;
 
 /**
  * Base class for all the Table procedures that want to use a 
StateMachineProcedure.
- * It provide some basic helpers like basic locking, sync latch, and basic 
toStringClassDetails().
+ * It provides helpers like basic locking, sync latch, and 
toStringClassDetails().
  */
 @InterfaceAudience.Private
 public abstract class AbstractStateMachineTableProcedure
@@ -52,9 +52,10 @@ public abstract class 
AbstractStateMachineTableProcedure
 
   protected AbstractStateMachineTableProcedure(final MasterProcedureEnv env,
   final ProcedurePrepareLatch latch) {
-this.user = env.getRequestUser();
-this.setOwner(user);
-
+if (env != null) {
+  this.user = env.getRequestUser();
+  this.setOwner(user);
+}
 // used for compatibility with clients without procedures
 // they need a sync TableExistsException, TableNotFoundException, 
TableNotDisabledException, ...
 this.syncLatch = latch;
@@ -110,4 +111,4 @@ public abstract class 
AbstractStateMachineTableProcedure
   throw new TableNotFoundException(getTableName());
 }
   }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/a23fcc97/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DispatchMergingRegionsProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DispatchMergingRegionsProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DispatchMergingRegionsProcedure.java
index 1478fc7..15ed429 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DispatchMergingRegionsProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DispatchMergingRegionsProcedure.java
@@ -275,7 +275,7 @@ public class DispatchMergingRegionsProcedure
 
   @Override
   public TableOperationType getTableOperationType() {
-return TableOperationType.MERGE;
+return TableOperationType.REGION_MERGE;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/a23fcc97/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
index bcb0004..61e984c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
@@ -572,11 +572,13 @@ public class MasterProcedureScheduler extends 
AbstractProcedureScheduler {
 return false;
   // region operations are using the shared-lock on the table
   // and then they will grab an xlock on the region.
-  case SPLIT:
-  case MERGE:
-  case ASSIGN:
-  case UNASSIGN:
+  case REGION_SPLIT:
+  case REGION_MERGE:
+  case REGION_ASSIGN:
+  case REGION_UNASSIGN:
   case REGION_EDIT:
+  case REGION_GC:
+  case MERGED_REGIONS_GC:
 return false;
   default:
 break;

http://git-wip-us.apache.org/repos/asf/hbase/blob/a23fcc97/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
index 9e00579..3bd2c9e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
@@ -413,14 +413,8 @@ implements ServerProcedureInterface {
   final HRegionInfo hri = it.next();
   RegionTransitionProcedure rtp 

hbase git commit: HBASE-18021 Add more info in timed out RetriesExhaustedException for read replica client get processing (Huaxiang Sun)

2017-05-11 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-1 9bf5bc198 -> 51cb53776


HBASE-18021 Add more info in timed out RetriesExhaustedException for read 
replica client get processing (Huaxiang Sun)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/51cb5377
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/51cb5377
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/51cb5377

Branch: refs/heads/branch-1
Commit: 51cb53776de413c3f2b4b481a0798f428680ac6b
Parents: 9bf5bc1
Author: Michael Stack 
Authored: Thu May 11 17:23:43 2017 -0700
Committer: Michael Stack 
Committed: Thu May 11 17:24:26 2017 -0700

--
 .../hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java   | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/51cb5377/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
index 6630457..8c5efde 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
@@ -235,7 +235,9 @@ public class RpcRetryingCallerWithReadReplicas {
   Future f = 
cs.pollForFirstSuccessfullyCompletedTask(operationTimeout,
   TimeUnit.MILLISECONDS, startIndex, endIndex);
   if (f == null) {
-throw new RetriesExhaustedException("timed out after " + 
operationTimeout + " ms");
+throw new RetriesExhaustedException("Timed out after " + 
operationTimeout +
+"ms. Get is sent to replicas with startIndex: " + startIndex +
+", endIndex: " + endIndex + ", Locations: " + rl);
   }
   return f.get();
 } catch (ExecutionException e) {



hbase git commit: HBASE-18021 Add more info in timed out RetriesExhaustedException for read replica client get processing (Huaxiang Sun)

2017-05-11 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master c83347361 -> d64acfd30


HBASE-18021 Add more info in timed out RetriesExhaustedException for read 
replica client get processing (Huaxiang Sun)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d64acfd3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d64acfd3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d64acfd3

Branch: refs/heads/master
Commit: d64acfd3045027e219438ff6c45fd72f0ea5140f
Parents: c833473
Author: Michael Stack 
Authored: Thu May 11 17:23:43 2017 -0700
Committer: Michael Stack 
Committed: Thu May 11 17:23:43 2017 -0700

--
 .../hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java   | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d64acfd3/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
index 0050269..91c6344 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
@@ -209,7 +209,9 @@ public class RpcRetryingCallerWithReadReplicas {
   Future f = 
cs.pollForFirstSuccessfullyCompletedTask(operationTimeout,
   TimeUnit.MILLISECONDS, startIndex, endIndex);
   if (f == null) {
-throw new RetriesExhaustedException("timed out after " + 
operationTimeout + " ms");
+throw new RetriesExhaustedException("Timed out after " + 
operationTimeout +
+"ms. Get is sent to replicas with startIndex: " + startIndex +
+", endIndex: " + endIndex + ", Locations: " + rl);
   }
   return f.get();
 } catch (ExecutionException e) {



[3/3] hbase git commit: Amend HBASE-18000 Make sure we always return the scanner id with ScanResponse (Karan Mehta and Duo Zhang)

2017-05-11 Thread apurtell
Amend HBASE-18000 Make sure we always return the scanner id with ScanResponse 
(Karan Mehta and Duo Zhang)

Move getRegionScanner() call below builder.setScannerId() to handle a
corner case.

Conflicts:

hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5cbc041b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5cbc041b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5cbc041b

Branch: refs/heads/branch-1.3
Commit: 5cbc041b970be90c8938e136003a58f6829cf451
Parents: 80d7d1a
Author: Andrew Purtell 
Authored: Thu May 11 14:34:04 2017 -0700
Committer: Andrew Purtell 
Committed: Thu May 11 14:39:12 2017 -0700

--
 .../java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5cbc041b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 9f311d6..d3d64c8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -2747,11 +2747,11 @@ public class RSRpcServices implements 
HBaseRPCErrorHandler,
 boolean isSmallScan;
 try {
   if (request.hasScannerId()) {
-rsh = getRegionScanner(request);
-isSmallScan = false;
 // The downstream projects such as AsyncHBase in OpenTSDB need this 
value. See HBASE-18000
 // for more details.
 builder.setScannerId(request.getScannerId());
+rsh = getRegionScanner(request);
+isSmallScan = false;
   } else {
 Pair pair = newRegionScanner(request, 
builder);
 rsh = pair.getFirst();



[2/3] hbase git commit: Amend HBASE-18000 Make sure we always return the scanner id with ScanResponse (Karan Mehta and Duo Zhang)

2017-05-11 Thread apurtell
Amend HBASE-18000 Make sure we always return the scanner id with ScanResponse 
(Karan Mehta and Duo Zhang)

Move getRegionScanner() call below builder.setScannerId() to handle a
corner case.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9bf5bc19
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9bf5bc19
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9bf5bc19

Branch: refs/heads/branch-1
Commit: 9bf5bc1986098295b0d38b56bf3d45f9a0f5aec9
Parents: ffa3785
Author: Andrew Purtell 
Authored: Thu May 11 14:34:04 2017 -0700
Committer: Andrew Purtell 
Committed: Thu May 11 14:38:28 2017 -0700

--
 .../java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9bf5bc19/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 6f25ad4..87ff3d2 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -2808,10 +2808,10 @@ public class RSRpcServices implements 
HBaseRPCErrorHandler,
 ScanResponse.Builder builder = ScanResponse.newBuilder();
 try {
   if (request.hasScannerId()) {
-rsh = getRegionScanner(request);
 // The downstream projects such as AsyncHBase in OpenTSDB need this 
value. See HBASE-18000
 // for more details.
 builder.setScannerId(request.getScannerId());
+rsh = getRegionScanner(request);
   } else {
 rsh = newRegionScanner(request, builder);
   }



[1/3] hbase git commit: Amend HBASE-18000 Make sure we always return the scanner id with ScanResponse (Karan Mehta and Duo Zhang)

2017-05-11 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-1 ffa3785d0 -> 9bf5bc198
  refs/heads/branch-1.3 80d7d1a59 -> 5cbc041b9
  refs/heads/master 589563163 -> c83347361


Amend HBASE-18000 Make sure we always return the scanner id with ScanResponse 
(Karan Mehta and Duo Zhang)

Move getRegionScanner() call below builder.setScannerId() to handle a
corner case.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c8334736
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c8334736
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c8334736

Branch: refs/heads/master
Commit: c833473619d5a2ff9437d62f2ea14ac772288304
Parents: 5895631
Author: Andrew Purtell 
Authored: Thu May 11 14:34:04 2017 -0700
Committer: Andrew Purtell 
Committed: Thu May 11 14:35:23 2017 -0700

--
 .../java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c8334736/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 34401f2..95408b7 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -3058,10 +3058,10 @@ public class RSRpcServices implements 
HBaseRPCErrorHandler,
 ScanResponse.Builder builder = ScanResponse.newBuilder();
 try {
   if (request.hasScannerId()) {
-rsh = getRegionScanner(request);
 // The downstream projects such as AsyncHBase in OpenTSDB need this 
value. See HBASE-18000
 // for more details.
 builder.setScannerId(request.getScannerId());
+rsh = getRegionScanner(request);
   } else {
 rsh = newRegionScanner(request, builder);
   }



[3/5] hbase git commit: HBASE-18026 ProtobufUtil seems to do extra array copying

2017-05-11 Thread apurtell
HBASE-18026 ProtobufUtil seems to do extra array copying

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/80d7d1a5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/80d7d1a5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/80d7d1a5

Branch: refs/heads/branch-1.3
Commit: 80d7d1a595862b6d9d6a8fbbc5a554ad17a37381
Parents: 286394b
Author: Vincent 
Authored: Wed May 10 18:01:49 2017 -0700
Committer: Andrew Purtell 
Committed: Thu May 11 13:23:33 2017 -0700

--
 .../hadoop/hbase/protobuf/ProtobufUtil.java | 45 ++--
 1 file changed, 23 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/80d7d1a5/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index 5a78601..5461bd3 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hbase.protobuf;
 
 
+import static com.google.protobuf.HBaseZeroCopyByteString.zeroCopyGetBytes;
 import static 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME;
 
 import java.io.ByteArrayOutputStream;
@@ -458,7 +459,7 @@ public final class ProtobufUtil {
   public static Get toGet(
   final ClientProtos.Get proto) throws IOException {
 if (proto == null) return null;
-byte[] row = proto.getRow().toByteArray();
+byte[] row = zeroCopyGetBytes(proto.getRow());
 Get get = new Get(row);
 if (proto.hasCacheBlocks()) {
   get.setCacheBlocks(proto.getCacheBlocks());
@@ -556,7 +557,7 @@ public final class ProtobufUtil {
 MutationType type = proto.getMutateType();
 assert type == MutationType.PUT: type.name();
 long timestamp = proto.hasTimestamp()? proto.getTimestamp(): 
HConstants.LATEST_TIMESTAMP;
-Put put = proto.hasRow() ? new Put(proto.getRow().toByteArray(), 
timestamp) : null;
+Put put = proto.hasRow() ? new Put(zeroCopyGetBytes(proto.getRow()), 
timestamp) : null;
 int cellCount = proto.hasAssociatedCellCount()? 
proto.getAssociatedCellCount(): 0;
 if (cellCount > 0) {
   // The proto has metadata only and the data is separate to be found in 
the cellScanner.
@@ -581,7 +582,7 @@ public final class ProtobufUtil {
   }
   // The proto has the metadata and the data itself
   for (ColumnValue column: proto.getColumnValueList()) {
-byte[] family = column.getFamily().toByteArray();
+byte[] family = zeroCopyGetBytes(column.getFamily());
 for (QualifierValue qv: column.getQualifierValueList()) {
   if (!qv.hasValue()) {
 throw new DoNotRetryIOException(
@@ -597,23 +598,23 @@ public final class ProtobufUtil {
   }
   byte[] tags;
   if (qv.hasTags()) {
-tags = qv.getTags().toByteArray();
+tags = zeroCopyGetBytes(qv.getTags());
 Object[] array = Tag.asList(tags, 0, (short)tags.length).toArray();
 Tag[] tagArray = new Tag[array.length];
 for(int i = 0; i< array.length; i++) {
   tagArray[i] = (Tag)array[i];
 }
 if(qv.hasDeleteType()) {
-  byte[] qual = qv.hasQualifier() ? 
qv.getQualifier().toByteArray() : null;
-  put.add(new KeyValue(proto.getRow().toByteArray(), family, qual, 
ts,
+  byte[] qual = qv.hasQualifier() ? 
zeroCopyGetBytes(qv.getQualifier()) : null;
+  put.add(new KeyValue(zeroCopyGetBytes(proto.getRow()), family, 
qual, ts,
   fromDeleteType(qv.getDeleteType()), null, tags));
 } else {
   put.addImmutable(family, qualifier, ts, value, tagArray);
 }
   } else {
 if(qv.hasDeleteType()) {
-  byte[] qual = qv.hasQualifier() ? 
qv.getQualifier().toByteArray() : null;
-  put.add(new KeyValue(proto.getRow().toByteArray(), family, qual, 
ts,
+  byte[] qual = qv.hasQualifier() ? 
zeroCopyGetBytes(qv.getQualifier()) : null;
+  put.add(new KeyValue(zeroCopyGetBytes(proto.getRow()), family, 
qual, ts,
   fromDeleteType(qv.getDeleteType(;
 } else{
   put.addImmutable(family, qualifier, ts, value);
@@ -654,7 +655,7 @@ public final class ProtobufUtil {
 MutationType type = proto.getMutateType();
 

[5/5] hbase git commit: HBASE-18026 ProtobufUtil seems to do extra array copying

2017-05-11 Thread apurtell
HBASE-18026 ProtobufUtil seems to do extra array copying

Signed-off-by: Andrew Purtell 

Conflicts:

hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7d820db0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7d820db0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7d820db0

Branch: refs/heads/branch-1.1
Commit: 7d820db0ee82062637dd2465f94d3c99dd994277
Parents: 5de7799
Author: Vincent 
Authored: Wed May 10 18:01:49 2017 -0700
Committer: Andrew Purtell 
Committed: Thu May 11 13:28:58 2017 -0700

--
 .../hadoop/hbase/protobuf/ProtobufUtil.java | 39 ++--
 1 file changed, 20 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7d820db0/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index c5a40b6..2492070 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hbase.protobuf;
 
 
+import static com.google.protobuf.HBaseZeroCopyByteString.zeroCopyGetBytes;
 import static 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME;
 
 import java.io.ByteArrayOutputStream;
@@ -452,7 +453,7 @@ public final class ProtobufUtil {
   public static Get toGet(
   final ClientProtos.Get proto) throws IOException {
 if (proto == null) return null;
-byte[] row = proto.getRow().toByteArray();
+byte[] row = zeroCopyGetBytes(proto.getRow());
 Get get = new Get(row);
 if (proto.hasCacheBlocks()) {
   get.setCacheBlocks(proto.getCacheBlocks());
@@ -551,7 +552,7 @@ public final class ProtobufUtil {
 MutationType type = proto.getMutateType();
 assert type == MutationType.PUT: type.name();
 long timestamp = proto.hasTimestamp()? proto.getTimestamp(): 
HConstants.LATEST_TIMESTAMP;
-Put put = proto.hasRow() ? new Put(proto.getRow().toByteArray(), 
timestamp) : null;
+Put put = proto.hasRow() ? new Put(zeroCopyGetBytes(proto.getRow()), 
timestamp) : null;
 int cellCount = proto.hasAssociatedCellCount()? 
proto.getAssociatedCellCount(): 0;
 if (cellCount > 0) {
   // The proto has metadata only and the data is separate to be found in 
the cellScanner.
@@ -576,7 +577,7 @@ public final class ProtobufUtil {
   }
   // The proto has the metadata and the data itself
   for (ColumnValue column: proto.getColumnValueList()) {
-byte[] family = column.getFamily().toByteArray();
+byte[] family = zeroCopyGetBytes(column.getFamily());
 for (QualifierValue qv: column.getQualifierValueList()) {
   if (!qv.hasValue()) {
 throw new DoNotRetryIOException(
@@ -592,23 +593,23 @@ public final class ProtobufUtil {
   }
   byte[] tags;
   if (qv.hasTags()) {
-tags = qv.getTags().toByteArray();
+tags = zeroCopyGetBytes(qv.getTags());
 Object[] array = Tag.asList(tags, 0, (short)tags.length).toArray();
 Tag[] tagArray = new Tag[array.length];
 for(int i = 0; i< array.length; i++) {
   tagArray[i] = (Tag)array[i];
 }
 if(qv.hasDeleteType()) {
-  byte[] qual = qv.hasQualifier() ? 
qv.getQualifier().toByteArray() : null;
-  put.add(new KeyValue(proto.getRow().toByteArray(), family, qual, 
ts,
+  byte[] qual = qv.hasQualifier() ? 
zeroCopyGetBytes(qv.getQualifier()) : null;
+  put.add(new KeyValue(zeroCopyGetBytes(proto.getRow()), family, 
qual, ts,
   fromDeleteType(qv.getDeleteType()), null, tags));
 } else {
   put.addImmutable(family, qualifier, ts, value, tagArray);
 }
   } else {
 if(qv.hasDeleteType()) {
-  byte[] qual = qv.hasQualifier() ? 
qv.getQualifier().toByteArray() : null;
-  put.add(new KeyValue(proto.getRow().toByteArray(), family, qual, 
ts,
+  byte[] qual = qv.hasQualifier() ? 
zeroCopyGetBytes(qv.getQualifier()) : null;
+  put.add(new KeyValue(zeroCopyGetBytes(proto.getRow()), family, 
qual, ts,
   fromDeleteType(qv.getDeleteType(;
 } else{
   put.addImmutable(family, qualifier, ts, value);
@@ -649,7 

[1/5] hbase git commit: HBASE-18026 ProtobufUtil seems to do extra array copying

2017-05-11 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-1 27eab2c6e -> ffa3785d0
  refs/heads/branch-1.1 5de7799e0 -> 7d820db0e
  refs/heads/branch-1.2 b6d1b19a3 -> 14ab4a9c4
  refs/heads/branch-1.3 286394ba6 -> 80d7d1a59
  refs/heads/master 0ae0edcd6 -> 589563163


HBASE-18026 ProtobufUtil seems to do extra array copying

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/58956316
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/58956316
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/58956316

Branch: refs/heads/master
Commit: 58956316342b3eb90cb3d50ed74e4ad1914284f8
Parents: 0ae0edc
Author: Vincent 
Authored: Wed May 10 17:57:39 2017 -0700
Committer: Andrew Purtell 
Committed: Thu May 11 13:11:28 2017 -0700

--
 .../hadoop/hbase/protobuf/ProtobufUtil.java | 42 ++--
 1 file changed, 22 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/58956316/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index fcf2c34..5a6cd21 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hbase.protobuf;
 
+import static com.google.protobuf.HBaseZeroCopyByteString.zeroCopyGetBytes;
+
 import java.io.IOException;
 import java.lang.reflect.Constructor;
 import java.lang.reflect.Method;
@@ -367,7 +369,7 @@ public final class ProtobufUtil {
*/
   public static Get toGet(final ClientProtos.Get proto) throws IOException {
 if (proto == null) return null;
-byte[] row = proto.getRow().toByteArray();
+byte[] row = zeroCopyGetBytes(proto.getRow());
 Get get = new Get(row);
 if (proto.hasCacheBlocks()) {
   get.setCacheBlocks(proto.getCacheBlocks());
@@ -464,7 +466,7 @@ public final class ProtobufUtil {
 MutationType type = proto.getMutateType();
 assert type == MutationType.PUT: type.name();
 long timestamp = proto.hasTimestamp()? proto.getTimestamp(): 
HConstants.LATEST_TIMESTAMP;
-Put put = proto.hasRow() ? new Put(proto.getRow().toByteArray(), 
timestamp) : null;
+Put put = proto.hasRow() ? new Put(zeroCopyGetBytes(proto.getRow()), 
timestamp) : null;
 int cellCount = proto.hasAssociatedCellCount()? 
proto.getAssociatedCellCount(): 0;
 if (cellCount > 0) {
   // The proto has metadata only and the data is separate to be found in 
the cellScanner.
@@ -489,7 +491,7 @@ public final class ProtobufUtil {
   }
   // The proto has the metadata and the data itself
   for (ColumnValue column: proto.getColumnValueList()) {
-byte[] family = column.getFamily().toByteArray();
+byte[] family = zeroCopyGetBytes(column.getFamily());
 for (QualifierValue qv: column.getQualifierValueList()) {
   if (!qv.hasValue()) {
 throw new DoNotRetryIOException(
@@ -508,7 +510,7 @@ public final class ProtobufUtil {
 allTagsBytes = qv.getTags().toByteArray();
 if(qv.hasDeleteType()) {
   byte[] qual = qv.hasQualifier() ? 
qv.getQualifier().toByteArray() : null;
-  put.add(new KeyValue(proto.getRow().toByteArray(), family, qual, 
ts,
+  put.add(new KeyValue(zeroCopyGetBytes(proto.getRow()), family, 
qual, ts,
   fromDeleteType(qv.getDeleteType()), null, allTagsBytes));
 } else {
   List tags = TagUtil.asList(allTagsBytes, 0, 
(short)allTagsBytes.length);
@@ -517,8 +519,8 @@ public final class ProtobufUtil {
 }
   } else {
 if(qv.hasDeleteType()) {
-  byte[] qual = qv.hasQualifier() ? 
qv.getQualifier().toByteArray() : null;
-  put.add(new KeyValue(proto.getRow().toByteArray(), family, qual, 
ts,
+  byte[] qual = qv.hasQualifier() ? 
zeroCopyGetBytes(qv.getQualifier()) : null;
+  put.add(new KeyValue(zeroCopyGetBytes(proto.getRow()), family, 
qual, ts,
   fromDeleteType(qv.getDeleteType(;
 } else{
   put.addImmutable(family, qualifier, ts, value);
@@ -559,7 +561,7 @@ public final class ProtobufUtil {
 MutationType type = proto.getMutateType();
 assert type == MutationType.DELETE : type.name();
 long timestamp = proto.hasTimestamp() ? proto.getTimestamp() : 
HConstants.LATEST_TIMESTAMP;
-Delete delete = 

[4/5] hbase git commit: HBASE-18026 ProtobufUtil seems to do extra array copying

2017-05-11 Thread apurtell
HBASE-18026 ProtobufUtil seems to do extra array copying

Signed-off-by: Andrew Purtell 

Conflicts:

hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/14ab4a9c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/14ab4a9c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/14ab4a9c

Branch: refs/heads/branch-1.2
Commit: 14ab4a9c47ed1532c0457430d30cbb08f2669d61
Parents: b6d1b19
Author: Vincent 
Authored: Wed May 10 18:01:49 2017 -0700
Committer: Andrew Purtell 
Committed: Thu May 11 13:28:47 2017 -0700

--
 .../hadoop/hbase/protobuf/ProtobufUtil.java | 39 ++--
 1 file changed, 20 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/14ab4a9c/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index 24e6b1d..9318a4c 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hbase.protobuf;
 
 
+import static com.google.protobuf.HBaseZeroCopyByteString.zeroCopyGetBytes;
 import static 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME;
 
 import java.io.ByteArrayOutputStream;
@@ -455,7 +456,7 @@ public final class ProtobufUtil {
   public static Get toGet(
   final ClientProtos.Get proto) throws IOException {
 if (proto == null) return null;
-byte[] row = proto.getRow().toByteArray();
+byte[] row = zeroCopyGetBytes(proto.getRow());
 Get get = new Get(row);
 if (proto.hasCacheBlocks()) {
   get.setCacheBlocks(proto.getCacheBlocks());
@@ -553,7 +554,7 @@ public final class ProtobufUtil {
 MutationType type = proto.getMutateType();
 assert type == MutationType.PUT: type.name();
 long timestamp = proto.hasTimestamp()? proto.getTimestamp(): 
HConstants.LATEST_TIMESTAMP;
-Put put = proto.hasRow() ? new Put(proto.getRow().toByteArray(), 
timestamp) : null;
+Put put = proto.hasRow() ? new Put(zeroCopyGetBytes(proto.getRow()), 
timestamp) : null;
 int cellCount = proto.hasAssociatedCellCount()? 
proto.getAssociatedCellCount(): 0;
 if (cellCount > 0) {
   // The proto has metadata only and the data is separate to be found in 
the cellScanner.
@@ -578,7 +579,7 @@ public final class ProtobufUtil {
   }
   // The proto has the metadata and the data itself
   for (ColumnValue column: proto.getColumnValueList()) {
-byte[] family = column.getFamily().toByteArray();
+byte[] family = zeroCopyGetBytes(column.getFamily());
 for (QualifierValue qv: column.getQualifierValueList()) {
   if (!qv.hasValue()) {
 throw new DoNotRetryIOException(
@@ -594,23 +595,23 @@ public final class ProtobufUtil {
   }
   byte[] tags;
   if (qv.hasTags()) {
-tags = qv.getTags().toByteArray();
+tags = zeroCopyGetBytes(qv.getTags());
 Object[] array = Tag.asList(tags, 0, (short)tags.length).toArray();
 Tag[] tagArray = new Tag[array.length];
 for(int i = 0; i< array.length; i++) {
   tagArray[i] = (Tag)array[i];
 }
 if(qv.hasDeleteType()) {
-  byte[] qual = qv.hasQualifier() ? 
qv.getQualifier().toByteArray() : null;
-  put.add(new KeyValue(proto.getRow().toByteArray(), family, qual, 
ts,
+  byte[] qual = qv.hasQualifier() ? 
zeroCopyGetBytes(qv.getQualifier()) : null;
+  put.add(new KeyValue(zeroCopyGetBytes(proto.getRow()), family, 
qual, ts,
   fromDeleteType(qv.getDeleteType()), null, tags));
 } else {
   put.addImmutable(family, qualifier, ts, value, tagArray);
 }
   } else {
 if(qv.hasDeleteType()) {
-  byte[] qual = qv.hasQualifier() ? 
qv.getQualifier().toByteArray() : null;
-  put.add(new KeyValue(proto.getRow().toByteArray(), family, qual, 
ts,
+  byte[] qual = qv.hasQualifier() ? 
zeroCopyGetBytes(qv.getQualifier()) : null;
+  put.add(new KeyValue(zeroCopyGetBytes(proto.getRow()), family, 
qual, ts,
   fromDeleteType(qv.getDeleteType(;
 } else{
   put.addImmutable(family, qualifier, ts, value);
@@ -651,7 

[2/5] hbase git commit: HBASE-18026 ProtobufUtil seems to do extra array copying

2017-05-11 Thread apurtell
HBASE-18026 ProtobufUtil seems to do extra array copying

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ffa3785d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ffa3785d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ffa3785d

Branch: refs/heads/branch-1
Commit: ffa3785d0e7308595affd9331dcd419ac639413e
Parents: 27eab2c
Author: Vincent 
Authored: Wed May 10 18:01:49 2017 -0700
Committer: Andrew Purtell 
Committed: Thu May 11 13:23:19 2017 -0700

--
 .../hadoop/hbase/protobuf/ProtobufUtil.java | 45 ++--
 1 file changed, 23 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ffa3785d/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index a006370..99a0913 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hbase.protobuf;
 
 
+import static com.google.protobuf.HBaseZeroCopyByteString.zeroCopyGetBytes;
 import static 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME;
 
 import com.google.common.collect.ArrayListMultimap;
@@ -475,7 +476,7 @@ public final class ProtobufUtil {
   public static Get toGet(
   final ClientProtos.Get proto) throws IOException {
 if (proto == null) return null;
-byte[] row = proto.getRow().toByteArray();
+byte[] row = zeroCopyGetBytes(proto.getRow());
 Get get = new Get(row);
 if (proto.hasCacheBlocks()) {
   get.setCacheBlocks(proto.getCacheBlocks());
@@ -573,7 +574,7 @@ public final class ProtobufUtil {
 MutationType type = proto.getMutateType();
 assert type == MutationType.PUT: type.name();
 long timestamp = proto.hasTimestamp()? proto.getTimestamp(): 
HConstants.LATEST_TIMESTAMP;
-Put put = proto.hasRow() ? new Put(proto.getRow().toByteArray(), 
timestamp) : null;
+Put put = proto.hasRow() ? new Put(zeroCopyGetBytes(proto.getRow()), 
timestamp) : null;
 int cellCount = proto.hasAssociatedCellCount()? 
proto.getAssociatedCellCount(): 0;
 if (cellCount > 0) {
   // The proto has metadata only and the data is separate to be found in 
the cellScanner.
@@ -598,7 +599,7 @@ public final class ProtobufUtil {
   }
   // The proto has the metadata and the data itself
   for (ColumnValue column: proto.getColumnValueList()) {
-byte[] family = column.getFamily().toByteArray();
+byte[] family = zeroCopyGetBytes(column.getFamily());
 for (QualifierValue qv: column.getQualifierValueList()) {
   if (!qv.hasValue()) {
 throw new DoNotRetryIOException(
@@ -614,23 +615,23 @@ public final class ProtobufUtil {
   }
   byte[] tags;
   if (qv.hasTags()) {
-tags = qv.getTags().toByteArray();
+tags = zeroCopyGetBytes(qv.getTags());
 Object[] array = Tag.asList(tags, 0, (short)tags.length).toArray();
 Tag[] tagArray = new Tag[array.length];
 for(int i = 0; i< array.length; i++) {
   tagArray[i] = (Tag)array[i];
 }
 if(qv.hasDeleteType()) {
-  byte[] qual = qv.hasQualifier() ? 
qv.getQualifier().toByteArray() : null;
-  put.add(new KeyValue(proto.getRow().toByteArray(), family, qual, 
ts,
+  byte[] qual = qv.hasQualifier() ? 
zeroCopyGetBytes(qv.getQualifier()) : null;
+  put.add(new KeyValue(zeroCopyGetBytes(proto.getRow()), family, 
qual, ts,
   fromDeleteType(qv.getDeleteType()), null, tags));
 } else {
   put.addImmutable(family, qualifier, ts, value, tagArray);
 }
   } else {
 if(qv.hasDeleteType()) {
-  byte[] qual = qv.hasQualifier() ? 
qv.getQualifier().toByteArray() : null;
-  put.add(new KeyValue(proto.getRow().toByteArray(), family, qual, 
ts,
+  byte[] qual = qv.hasQualifier() ? 
zeroCopyGetBytes(qv.getQualifier()) : null;
+  put.add(new KeyValue(zeroCopyGetBytes(proto.getRow()), family, 
qual, ts,
   fromDeleteType(qv.getDeleteType(;
 } else{
   put.addImmutable(family, qualifier, ts, value);
@@ -671,7 +672,7 @@ public final class ProtobufUtil {
 MutationType type = 

[50/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index a33ba34..f9a0061 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Checkstyle Results
 
@@ -289,7 +289,7 @@
 2159
 0
 0
-14347
+14333
 
 Files
 
@@ -,7 +,7 @@
 org/apache/hadoop/hbase/io/hfile/HFileBlock.java
 0
 0
-47
+46
 
 org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
 0
@@ -4412,7 +4412,7 @@
 org/apache/hadoop/hbase/regionserver/StoreScanner.java
 0
 0
-36
+23
 
 org/apache/hadoop/hbase/regionserver/StoreUtils.java
 0
@@ -7069,7 +7069,7 @@
 
 
 http://checkstyle.sourceforge.net/config_blocks.html#NeedBraces;>NeedBraces
-1934
+1930
 Error
 
 coding
@@ -7131,7 +7131,7 @@
 ordered: true
 sortStaticImportsAlphabetically: true
 option: top
-917
+916
 Error
 
 
@@ -7154,19 +7154,19 @@
 caseIndent: 2
 basicOffset: 2
 lineWrappingIndentation: 2
-5074
+5066
 Error
 
 javadoc
 http://checkstyle.sourceforge.net/config_javadoc.html#JavadocTagContinuationIndentation;>JavadocTagContinuationIndentation
 
 offset: 2
-787
+782
 Error
 
 
 http://checkstyle.sourceforge.net/config_javadoc.html#NonEmptyAtclauseDescription;>NonEmptyAtclauseDescription
-3265
+3269
 Error
 
 misc
@@ -12833,7 +12833,7 @@
 
 Error
 javadoc
-JavadocTagContinuationIndentation
+NonEmptyAtclauseDescription
 Javadoc comment at column 43 has parse error. Missed HTML close tag 
'TableName'. Sometimes it means that close tag missed for one of previous 
tags.
 122
 
@@ -15833,7 +15833,7 @@
 
 Error
 javadoc
-JavadocTagContinuationIndentation
+NonEmptyAtclauseDescription
 Javadoc comment at column 64 has parse error. Missed HTML close tag 
'code'. Sometimes it means that close tag missed for one of previous tags.
 1957
 
@@ -20381,7 +20381,7 @@
 
 Error
 javadoc
-JavadocTagContinuationIndentation
+NonEmptyAtclauseDescription
 Javadoc comment at column 37 has parse error. Details: no viable 
alternative at input 'ColumnFamily,' while parsing HTML_ELEMENT
 29
 
@@ -30745,16 +30745,16 @@
 Line
 
 Error
-imports
-ImportOrder
-Wrong order for 'com.google.common.annotations.VisibleForTesting' 
import.
-56
+indentation
+Indentation
+'method def modifier' have incorrect indentation level 8, expected level 
should be one of the following: 4, 6.
+235
 
 Error
 indentation
 Indentation
-'method def modifier' have incorrect indentation level 8, expected level 
should be one of the following: 4, 6.
-237
+'method def' child have incorrect indentation level 10, expected level 
should be one of the following: 6, 8.
+239
 
 Error
 indentation
@@ -30765,80 +30765,80 @@
 Error
 indentation
 Indentation
-'method def' child have incorrect indentation level 10, expected level 
should be one of the following: 6, 8.
-243
+'if' have incorrect indentation level 10, expected level should be one of 
the following: 6, 8.
+242
 
 Error
 indentation
 Indentation
-'if' have incorrect indentation level 10, expected level should be one of 
the following: 6, 8.
-244
+'if' child have incorrect indentation level 12, expected level should be 
one of the following: 8, 10.
+243
 
 Error
 indentation
 Indentation
-'if' child have incorrect indentation level 12, expected level should be 
one of the following: 8, 10.
-245
+'if rcurly' have incorrect indentation level 10, expected level should be 
one of the following: 6, 8.
+244
 
 Error
 indentation
 Indentation
-'if rcurly' have incorrect indentation level 10, expected level should be 
one of the following: 6, 8.
-246
+'else' child have incorrect indentation level 12, expected level should be 
one of the following: 8, 10.
+245
 
 Error
 indentation
 Indentation
 'else' child have incorrect indentation level 12, expected level should be 
one of the following: 8, 10.
-247
+246
 
 Error
 indentation
 Indentation
 'else' child have incorrect indentation level 12, expected level should be 
one of the following: 8, 10.
-248
+247
 
 Error
 indentation
 Indentation
-'else' child have incorrect indentation level 12, expected level should be 
one of the following: 8, 10.
-249
+'else rcurly' have incorrect indentation level 10, expected level should 
be one of the following: 6, 8.
+248
 
 Error
 indentation
 Indentation
-'else rcurly' have incorrect indentation level 10, expected level should 
be one of the following: 6, 8.
+'method def' child have incorrect indentation level 10, expected level 
should be one of the following: 6, 8.
 250
 
 Error
 indentation
 Indentation
 'method def' child have incorrect indentation level 10, expected level 
should be one of the following: 6, 8.
-252
+251
 
 Error
 indentation
 Indentation
 'method def' child have incorrect indentation level 10, expected level 
should be one of the following: 6, 8.
-253
+252
 
 Error
 indentation
 Indentation
 'method def' child have incorrect 

[43/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/devapidocs/org/apache/hadoop/hbase/regionserver/SegmentScanner.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/SegmentScanner.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/SegmentScanner.html
index c0e01d7..6862974 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/SegmentScanner.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/SegmentScanner.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class SegmentScanner
+public class SegmentScanner
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements KeyValueScanner
 A scanner of a single memstore segment.
@@ -245,6 +245,10 @@ implements 
+org.apache.hadoop.fs.Path
+getFilePath()
+
+
 private Cell
 getHighest(Cellfirst,
   Cellsecond)
@@ -252,43 +256,43 @@ implements 
+
 protected http://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true;
 title="class or interface in java.util">IteratorCell
 getIterator(Cellcell)
 
-
+
 Cell
 getNextIndexedKey()
 
-
+
 long
 getScannerOrder()
 Get the order of this KeyValueScanner.
 
 
-
+
 private Segment
 getSegment()
 Private Methods
 
 
-
+
 boolean
 isFileScanner()
 
-
+
 Cell
 next()
 Return the next Cell in this scanner, iterating the 
scanner
 
 
-
+
 Cell
 peek()
 Look at the next Cell in this scanner, but do not iterate 
the scanner
 
 
-
+
 boolean
 realSeekDone()
 This scanner is working solely on the in-memory MemStore 
and doesn't work on
@@ -296,7 +300,7 @@ implements 
+
 boolean
 requestSeek(Cellc,
booleanforward,
@@ -306,38 +310,38 @@ implements 
+
 boolean
 reseek(Cellcell)
 Reseek the scanner at or after the specified KeyValue.
 
 
-
+
 boolean
 seek(Cellcell)
 Seek the scanner at or after the specified Cell.
 
 
-
+
 boolean
 seekToLastRow()
 Seek the scanner at the first KeyValue of last row
 
 
-
+
 boolean
 seekToPreviousRow(Cellcell)
 Seek the scanner at the first Cell of the row which is the 
previous row
  of specified key
 
 
-
+
 void
 shipped()
 Called after a batch of rows scanned (RPC) and set to be 
returned to client.
 
 
-
+
 boolean
 shouldUseScanner(Scanscan,
 Storestore,
@@ -346,11 +350,11 @@ implements 
+
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 toString()
 
-
+
 protected void
 updateCurrent()
 Private internal method for iterating over the segment,
@@ -385,7 +389,7 @@ implements 
 
 scannerOrder
-privatelong scannerOrder
+privatelong scannerOrder
 Order of this scanner relative to other scanners. See
  KeyValueScanner.getScannerOrder().
 
@@ -396,7 +400,7 @@ implements 
 
 DEFAULT_SCANNER_ORDER
-private static finallong DEFAULT_SCANNER_ORDER
+private static finallong DEFAULT_SCANNER_ORDER
 
 See Also:
 Constant
 Field Values
@@ -409,7 +413,7 @@ implements 
 
 segment
-protected finalSegment segment
+protected finalSegment segment
 
 
 
@@ -418,7 +422,7 @@ implements 
 
 readPoint
-privatelong readPoint
+privatelong readPoint
 
 
 
@@ -427,7 +431,7 @@ implements 
 
 iter
-protectedhttp://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true;
 title="class or interface in java.util">IteratorCell iter
+protectedhttp://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true;
 title="class or interface in java.util">IteratorCell iter
 
 
 
@@ -436,7 +440,7 @@ implements 
 
 current
-protectedCell current
+protectedCell current
 
 
 
@@ -445,7 +449,7 @@ implements 
 
 stopSkippingKVsIfNextRow
-privateboolean stopSkippingKVsIfNextRow
+privateboolean stopSkippingKVsIfNextRow
 
 
 
@@ -454,7 +458,7 @@ implements 
 
 last
-privateCell last
+privateCell last
 
 
 
@@ -463,7 +467,7 @@ implements 
 
 closed
-protectedboolean closed
+protectedboolean closed
 
 
 
@@ -480,7 +484,7 @@ implements 
 
 SegmentScanner
-protectedSegmentScanner(Segmentsegment,
+protectedSegmentScanner(Segmentsegment,
  longreadPoint)
 
 
@@ -490,7 +494,7 @@ implements 
 
 SegmentScanner
-protectedSegmentScanner(Segmentsegment,
+protectedSegmentScanner(Segmentsegment,
 

[49/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/checkstyle.rss
--
diff --git a/checkstyle.rss b/checkstyle.rss
index 9a3384a..34f7bff 100644
--- a/checkstyle.rss
+++ b/checkstyle.rss
@@ -26,7 +26,7 @@ under the License.
 2007 - 2017 The Apache Software Foundation
 
   File: 2159,
- Errors: 14347,
+ Errors: 14333,
  Warnings: 0,
  Infos: 0
   
@@ -12165,7 +12165,7 @@ under the License.
   0
 
 
-  47
+  46
 
   
   
@@ -16841,7 +16841,7 @@ under the License.
   0
 
 
-  36
+  23
 
   
   

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/coc.html
--
diff --git a/coc.html b/coc.html
index e5594e3..b2a377a 100644
--- a/coc.html
+++ b/coc.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  
   Code of Conduct Policy
@@ -380,7 +380,7 @@ email to mailto:priv...@hbase.apache.org;>the priv
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-05-10
+  Last Published: 
2017-05-11
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/cygwin.html
--
diff --git a/cygwin.html b/cygwin.html
index 81d6155..d1697f4 100644
--- a/cygwin.html
+++ b/cygwin.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Installing Apache HBase (TM) on Windows using 
Cygwin
 
@@ -679,7 +679,7 @@ Now your HBase server is running, start 
coding and build that next
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-05-10
+  Last Published: 
2017-05-11
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/dependencies.html
--
diff --git a/dependencies.html b/dependencies.html
index 79a8b43..17a4aa2 100644
--- a/dependencies.html
+++ b/dependencies.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Dependencies
 
@@ -524,7 +524,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-05-10
+  Last Published: 
2017-05-11
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/dependency-convergence.html
--
diff --git a/dependency-convergence.html b/dependency-convergence.html
index 8353952..4a4f3e2 100644
--- a/dependency-convergence.html
+++ b/dependency-convergence.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Reactor Dependency Convergence
 
@@ -1849,7 +1849,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-05-10
+  Last Published: 
2017-05-11
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/dependency-info.html
--
diff --git a/dependency-info.html b/dependency-info.html
index 911a0e4..24b2202 100644
--- a/dependency-info.html
+++ b/dependency-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Dependency Information
 
@@ -318,7 +318,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-05-10
+  Last Published: 
2017-05-11
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/dependency-management.html
--
diff --git a/dependency-management.html b/dependency-management.html
index ad857de..f468b7c 100644
--- a/dependency-management.html
+++ b/dependency-management.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Dependency Management
 
@@ -894,7 +894,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved. 

[45/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/devapidocs/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html
index aa80c60..b1c2159 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html
@@ -128,7 +128,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class KeyValueHeap
+public class KeyValueHeap
 extends NonReversedNonLazyKeyValueScanner
 implements KeyValueScanner, 
InternalScanner
 Implements a heap merge across any number of 
KeyValueScanners.
@@ -353,7 +353,7 @@ implements NonLazyKeyValueScanner
-doRealSeek,
 enforceSeek,
 isFileScanner,
 realSeekDone,
 shouldUseScanner
+doRealSeek,
 enforceSeek,
 getFilePath,
 isFileScanner,
 realSeekDone,
 shouldUseScanner
 
 
 
@@ -367,7 +367,7 @@ implements KeyValueScanner
-backwardSeek,
 enforceSeek,
 isFileScanner,
 realSeekDone,
 seekToLastRow,
 seekToPreviousRow,
 shouldUseScanner
+backwardSeek,
 enforceSeek,
 getFilePath,
 isFileScanner,
 realSeekDone,
 seekToLastRow,
 seekToPreviousRow,
 shouldUseScanner
 
 
 
@@ -389,7 +389,7 @@ implements 
 
 LOG
-private static finalorg.apache.commons.logging.Log LOG
+private static finalorg.apache.commons.logging.Log LOG
 
 
 
@@ -398,7 +398,7 @@ implements 
 
 heap
-protectedhttp://docs.oracle.com/javase/8/docs/api/java/util/PriorityQueue.html?is-external=true;
 title="class or interface in java.util">PriorityQueueKeyValueScanner heap
+protectedhttp://docs.oracle.com/javase/8/docs/api/java/util/PriorityQueue.html?is-external=true;
 title="class or interface in java.util">PriorityQueueKeyValueScanner heap
 
 
 
@@ -407,7 +407,7 @@ implements 
 
 scannersForDelayedClose
-protectedhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScanner scannersForDelayedClose
+protectedhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScanner scannersForDelayedClose
 
 
 
@@ -416,7 +416,7 @@ implements 
 
 current
-protectedKeyValueScanner 
current
+protectedKeyValueScanner 
current
 The current sub-scanner, i.e. the one that contains the 
next key/value
  to return to the client. This scanner is NOT included in heap
  (but we frequently add it back to the heap and pull the new winner out).
@@ -433,7 +433,7 @@ implements 
 
 comparator
-protectedKeyValueHeap.KVScannerComparator comparator
+protectedKeyValueHeap.KVScannerComparator comparator
 
 
 
@@ -450,7 +450,7 @@ implements 
 
 KeyValueHeap
-publicKeyValueHeap(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">List? extends KeyValueScannerscanners,
+publicKeyValueHeap(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">List? extends KeyValueScannerscanners,
 CellComparatorcomparator)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Constructor.  This KeyValueHeap will handle closing of 
passed in
@@ -470,7 +470,7 @@ implements 
 
 KeyValueHeap
-KeyValueHeap(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">List? extends KeyValueScannerscanners,
+KeyValueHeap(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">List? extends KeyValueScannerscanners,
  KeyValueHeap.KVScannerComparatorcomparator)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Constructor.
@@ -497,7 +497,7 @@ implements 
 
 peek
-publicCellpeek()
+publicCellpeek()
 Description copied from 
interface:KeyValueScanner
 Look at the next Cell in this scanner, but do not iterate 
scanner.
  NOTICE: The returned cell has not been passed into ScanQueryMatcher. So it 
may not be what the
@@ -516,7 +516,7 @@ implements 
 
 next
-publicCellnext()
+publicCellnext()
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Description copied from 
interface:KeyValueScanner
 Return the next Cell in this scanner, iterating the 
scanner
@@ -536,7 +536,7 @@ implements 
 
 next
-publicbooleannext(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListCellresult)

[40/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/devapidocs/org/apache/hadoop/hbase/regionserver/StoreScanner.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/StoreScanner.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/StoreScanner.html
index 498fef5..3391360 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/StoreScanner.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/StoreScanner.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":9,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":9,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -128,11 +128,15 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class StoreScanner
+public class StoreScanner
 extends NonReversedNonLazyKeyValueScanner
 implements KeyValueScanner, 
InternalScanner, 
ChangedReadersObserver
-Scanner scans both the memstore and the Store. Coalesce 
KeyValue stream
- into ListKeyValue for a single row.
+Scanner scans both the memstore and the Store. Coalesce 
KeyValue stream into ListKeyValue
+ for a single row.
+ 
+ The implementation is not thread safe. So there will be no race between next 
and close. The only
+ exception is updateReaders, it will be called in the memstore flush thread to 
indicate that there
+ is a flush.
 
 
 
@@ -171,27 +175,27 @@ implements Field and Description
 
 
-protected boolean
-cacheBlocks
+private long
+bytesRead
 
 
-protected long
-cellsPerHeartbeatCheck
+private boolean
+cacheBlocks
 
 
-protected boolean
-closing
+private long
+cellsPerHeartbeatCheck
 
 
-protected http://docs.oracle.com/javase/8/docs/api/java/util/NavigableSet.html?is-external=true;
 title="class or interface in 
java.util">NavigableSetbyte[]
-columns
+private boolean
+closing
 
 
-protected long
+private long
 countPerRow
 
 
-protected http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScanner
+private http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScanner
 currentScanners
 
 
@@ -201,19 +205,19 @@ implements 
-protected ExecutorService
+private ExecutorService
 executor
 
 
-protected boolean
+private boolean
 explicitColumnQuery
 
 
-protected boolean
+private boolean
 flushed
 
 
-protected http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListStoreFile
+private http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListStoreFile
 flushedStoreFiles
 
 
@@ -221,7 +225,7 @@ implements flushLock
 
 
-protected boolean
+private boolean
 get
 
 
@@ -235,7 +239,7 @@ implements heap
 
 
-protected http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueHeap
+private http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueHeap
 heapsForDelayedClose
 
 
@@ -245,51 +249,51 @@ implements 
-protected Cell
-lastTop
-
-
 (package private) static boolean
 LAZY_SEEK_ENABLED_BY_DEFAULT
 We don't ever expect to change this, the constant is just 
for clarity.
 
 
-
-protected static boolean
+
+private static boolean
 lazySeekEnabledGlobally
 Used during unit testing to ensure that lazy seek does save 
seek ops
 
 
-
+
 private static 
org.apache.commons.logging.Log
 LOG
 
-
-protected ScanQueryMatcher
+
+private ScanQueryMatcher
 matcher
 
-
-protected long
+
+private long
 maxRowSize
 
-
-protected int
+
+private int
 minVersions
 
-
-protected long
+
+private long
 now
 
-
-protected long
+
+private long
 oldestUnexpiredTS
 
-
-protected boolean
+
+private boolean
 parallelSeekEnabled
 A flag that enables StoreFileScanner parallel-seeking
 
 
+
+private long
+preadMaxBytes
+
 
 private Cell
 prevCell
@@ -299,31 +303,42 @@ implements readPt
 
 
-protected Scan
-scan
+private Scan.ReadType
+readType
 
 
+private Scan
+scan
+
+
 private boolean
 scanUsePread
 
-
+
 protected Store
 store
 
-
-protected int
+
+private int
 storeLimit
 
-
-protected int
+
+private int
 storeOffset
 
-
+
 static 

[48/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html
index a331b54..7db9b41 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html
@@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static interface HFileBlock.FSReader
+static interface HFileBlock.FSReader
 A full-fledged reader with iteration ability.
 
 
@@ -192,7 +192,7 @@ var activeTableTab = "activeTableTab";
 
 
 readBlockData
-HFileBlockreadBlockData(longoffset,
+HFileBlockreadBlockData(longoffset,
  longonDiskSize,
  booleanpread)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -216,7 +216,7 @@ var activeTableTab = "activeTableTab";
 
 
 blockRange
-HFileBlock.BlockIteratorblockRange(longstartOffset,
+HFileBlock.BlockIteratorblockRange(longstartOffset,
 longendOffset)
 Creates a block iterator over the given portion of the HFile.
  The iterator returns blocks starting with offset such that offset =
@@ -236,7 +236,7 @@ var activeTableTab = "activeTableTab";
 
 
 closeStreams
-voidcloseStreams()
+voidcloseStreams()
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Closes the backing streams
 
@@ -251,7 +251,7 @@ var activeTableTab = "activeTableTab";
 
 
 getBlockDecodingContext
-HFileBlockDecodingContextgetBlockDecodingContext()
+HFileBlockDecodingContextgetBlockDecodingContext()
 Get a decoder for BlockType.ENCODED_DATA
 blocks from this file.
 
 
@@ -261,7 +261,7 @@ var activeTableTab = "activeTableTab";
 
 
 getDefaultBlockDecodingContext
-HFileBlockDecodingContextgetDefaultBlockDecodingContext()
+HFileBlockDecodingContextgetDefaultBlockDecodingContext()
 Get the default decoder for blocks from this file.
 
 
@@ -271,7 +271,7 @@ var activeTableTab = "activeTableTab";
 
 
 setIncludesMemstoreTS
-voidsetIncludesMemstoreTS(booleanincludesMemstoreTS)
+voidsetIncludesMemstoreTS(booleanincludesMemstoreTS)
 
 
 
@@ -280,7 +280,7 @@ var activeTableTab = "activeTableTab";
 
 
 setDataBlockEncoder
-voidsetDataBlockEncoder(HFileDataBlockEncoderencoder)
+voidsetDataBlockEncoder(HFileDataBlockEncoderencoder)
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html
index 584e956..7069ce3 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class HFileBlock.FSReaderImpl
+static class HFileBlock.FSReaderImpl
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements HFileBlock.FSReader
 Reads version 2 blocks from the filesystem.
@@ -140,59 +140,49 @@ implements Field and Description
 
 
-static int
-DEFAULT_BUFFER_SIZE
-The default buffer size for our buffered streams
-
-
-
 private HFileBlockDefaultDecodingContext
 defaultDecodingCtx
 Default context used when BlockType != BlockType.ENCODED_DATA.
 
 
-
+
 private HFileBlockDecodingContext
 encodedBlockDecodingCtx
 
-
-protected HFileContext
+
+private HFileContext
 fileContext
 
-
-protected long
+
+private long
 fileSize
 The size of the file we are reading from, or -1 if 
unknown.
 
 
-
+
 protected int
 hdrSize
 The size of the header
 
 
-
-protected HFileSystem
+
+private HFileSystem
 hfs
 The filesystem used to access data
 
 
-
-protected http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+
+private http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 pathName
 
-
+
 private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicReference.html?is-external=true;
 title="class or interface in 
java.util.concurrent.atomic">AtomicReferenceHFileBlock.PrefetchedHeader
 prefetchedHeader
 Cache of the NEXT header after this.
 
 
-
-private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/Lock.html?is-external=true;
 

[26/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/KeyValueScanner.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/KeyValueScanner.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/KeyValueScanner.html
index cc7046c..b0c7b40 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/KeyValueScanner.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/KeyValueScanner.html
@@ -30,158 +30,165 @@
 022import java.io.IOException;
 023
 024import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-025import org.apache.hadoop.hbase.Cell;
-026import 
org.apache.hadoop.hbase.KeyValue;
-027import 
org.apache.hadoop.hbase.client.Scan;
-028
-029/**
-030 * Scanner that returns the next 
KeyValue.
-031 */
-032@InterfaceAudience.Private
-033// TODO: Change name from KeyValueScanner 
to CellScanner only we already have a simple CellScanner
-034// so this should be something else 
altogether, a decoration on our base CellScanner. TODO.
-035// This class shows in CPs so do it all 
in one swell swoop. HBase-2.0.0.
-036public interface KeyValueScanner extends 
Shipper, Closeable {
-037  /**
-038   * The byte array represents for 
NO_NEXT_INDEXED_KEY;
-039   * The actual value is irrelevant 
because this is always compared by reference.
-040   */
-041  public static final Cell 
NO_NEXT_INDEXED_KEY = new KeyValue();
-042
-043  /**
-044   * Look at the next Cell in this 
scanner, but do not iterate scanner.
-045   * NOTICE: The returned cell has not 
been passed into ScanQueryMatcher. So it may not be what the
-046   * user need.
-047   * @return the next Cell
-048   */
-049  Cell peek();
-050
-051  /**
-052   * Return the next Cell in this 
scanner, iterating the scanner
-053   * @return the next Cell
-054   */
-055  Cell next() throws IOException;
-056
-057  /**
-058   * Seek the scanner at or after the 
specified KeyValue.
-059   * @param key seek value
-060   * @return true if scanner has values 
left, false if end of scanner
-061   */
-062  boolean seek(Cell key) throws 
IOException;
-063
-064  /**
-065   * Reseek the scanner at or after the 
specified KeyValue.
-066   * This method is guaranteed to seek at 
or after the required key only if the
-067   * key comes after the current position 
of the scanner. Should not be used
-068   * to seek to a key which may come 
before the current position.
-069   * @param key seek value (should be 
non-null)
-070   * @return true if scanner has values 
left, false if end of scanner
-071   */
-072  boolean reseek(Cell key) throws 
IOException;
-073
-074  /**
-075   * Get the order of this 
KeyValueScanner. This is only relevant for StoreFileScanners and
-076   * MemStoreScanners (other scanners 
simply return 0). This is required for comparing multiple
-077   * files to find out which one has the 
latest data. StoreFileScanners are ordered from 0
-078   * (oldest) to newest in increasing 
order. MemStoreScanner gets LONG.max since it always
-079   * contains freshest data.
-080   */
-081  long getScannerOrder();
-082
-083  /**
-084   * Close the KeyValue scanner.
-085   */
-086  void close();
-087
-088  /**
-089   * Allows to filter out scanners (both 
StoreFile and memstore) that we don't
-090   * want to use based on criteria such 
as Bloom filters and timestamp ranges.
-091   * @param scan the scan that we are 
selecting scanners for
-092   * @param store the store we are 
performing the scan on.
-093   * @param oldestUnexpiredTS the oldest 
timestamp we are interested in for
-094   *  this query, based on TTL
-095   * @return true if the scanner should 
be included in the query
-096   */
-097  boolean shouldUseScanner(Scan scan, 
Store store, long oldestUnexpiredTS);
-098
-099  // "Lazy scanner" optimizations
-100
-101  /**
-102   * Similar to {@link #seek} (or {@link 
#reseek} if forward is true) but only
-103   * does a seek operation after checking 
that it is really necessary for the
-104   * row/column combination specified by 
the kv parameter. This function was
-105   * added to avoid unnecessary disk 
seeks by checking row-column Bloom filters
-106   * before a seek on multi-column 
get/scan queries, and to optimize by looking
-107   * up more recent files first.
-108   * @param forward do a forward-only 
"reseek" instead of a random-access seek
-109   * @param useBloom whether to enable 
multi-column Bloom filter optimization
-110   */
-111  boolean requestSeek(Cell kv, boolean 
forward, boolean useBloom)
-112  throws IOException;
-113
-114  /**
-115   * We optimize our store scanners by 
checking the most recent store file
-116   * first, so we sometimes pretend we 
have done a seek but delay it until the
-117   * store scanner bubbles up to the top 
of the key-value heap. This method is
-118   * then used to ensure the top store 
file scanner 

[42/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFileInfo.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFileInfo.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFileInfo.html
index d399861..5a6092d 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFileInfo.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFileInfo.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":9,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":9,"i12":10,"i13":9,"i14":9,"i15":9,"i16":9,"i17":10,"i18":10,"i19":9,"i20":9,"i21":10,"i22":9,"i23":10,"i24":10,"i25":10,"i26":9};
+var methods = 
{"i0":10,"i1":10,"i2":9,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":9,"i13":10,"i14":9,"i15":9,"i16":9,"i17":9,"i18":10,"i19":10,"i20":9,"i21":9,"i22":10,"i23":9,"i24":10,"i25":10,"i26":10,"i27":9};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -282,86 +282,92 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 equals(http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Objectthat)
 
 
+http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+getActiveFileName()
+Return the active file name that contains the real 
data.
+
+
+
 long
 getCreatedTimestamp()
 
-
+
 org.apache.hadoop.fs.FileStatus
 getFileStatus()
 
-
+
 HDFSBlocksDistribution
 getHDFSBlockDistribution()
 
-
+
 long
 getModificationTime()
 
-
+
 org.apache.hadoop.fs.Path
 getPath()
 
-
+
 Reference
 getReference()
 
-
+
 org.apache.hadoop.fs.FileStatus
 getReferencedFileStatus(org.apache.hadoop.fs.FileSystemfs)
 Get the FileStatus of the file referenced by 
this StoreFileInfo
 
 
-
+
 static org.apache.hadoop.fs.Path
 getReferredToFile(org.apache.hadoop.fs.Pathp)
 
-
+
 int
 hashCode()
 
-
+
 static boolean
 isDelFile(org.apache.hadoop.fs.Pathpath)
 
-
+
 static boolean
 isDelFile(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringfileName)
 
-
+
 static boolean
 isHFile(org.apache.hadoop.fs.Pathpath)
 
-
+
 static boolean
 isHFile(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringfileName)
 
-
+
 boolean
 isLink()
 
-
+
 boolean
 isReference()
 
-
+
 static boolean
 isReference(org.apache.hadoop.fs.Pathpath)
 
-
+
 static boolean
 isReference(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname)
 
-
+
 boolean
 isTopReference()
 
-
+
 static boolean
 isValid(org.apache.hadoop.fs.FileStatusfileStatus)
 Return if the specified file is a valid store file or 
not.
 
 
-
+
 StoreFileReader
 open(org.apache.hadoop.fs.FileSystemfs,
 CacheConfigcacheConf,
@@ -373,17 +379,17 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 Open a Reader for the StoreFile
 
 
-
+
 void
 setRegionCoprocessorHost(RegionCoprocessorHostcoprocessorHost)
 Sets the region coprocessor env.
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 toString()
 
-
+
 static boolean
 validateStoreFileName(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringfileName)
 Validate the store file name.
@@ -1053,7 +1059,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 
-
+
 
 hashCode
 publicinthashCode()
@@ -1063,6 +1069,20 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 
+
+
+
+
+
+getActiveFileName
+publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetActiveFileName()
+Return the active file name that contains the real data.
+ 
+ For referenced hfile, we will return the name of the reference file as it 
will be used to
+ construct the StoreFileReader. And for linked hfile, we will return the name 
of the file being
+ linked.
+
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFileReader.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFileReader.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFileReader.html
index b6d92d0..a3c4858 100644
--- 

[46/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.html 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.html
index bd75b02..6dbfaa0 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.html
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class HFileBlock
+public class HFileBlock
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements Cacheable
 Reads HFile 
version 2 blocks to HFiles and via Cacheable Interface to 
caches.
@@ -719,7 +719,7 @@ implements 
 
 LOG
-private static finalorg.apache.commons.logging.Log LOG
+private static finalorg.apache.commons.logging.Log LOG
 
 
 
@@ -728,7 +728,7 @@ implements 
 
 blockType
-privateBlockType blockType
+privateBlockType blockType
 Type of block. Header field 0.
 
 
@@ -738,7 +738,7 @@ implements 
 
 onDiskSizeWithoutHeader
-privateint onDiskSizeWithoutHeader
+privateint onDiskSizeWithoutHeader
 Size on disk excluding header, including checksum. Header 
field 1.
 
 See Also:
@@ -752,7 +752,7 @@ implements 
 
 uncompressedSizeWithoutHeader
-privateint uncompressedSizeWithoutHeader
+privateint uncompressedSizeWithoutHeader
 Size of pure data. Does not include header or checksums. 
Header field 2.
 
 See Also:
@@ -766,7 +766,7 @@ implements 
 
 prevBlockOffset
-privatelong prevBlockOffset
+privatelong prevBlockOffset
 The offset of the previous block on disk. Header field 
3.
 
 See Also:
@@ -780,7 +780,7 @@ implements 
 
 onDiskDataSizeWithHeader
-privateint onDiskDataSizeWithHeader
+privateint onDiskDataSizeWithHeader
 Size on disk of header + data. Excludes checksum. Header 
field 6,
  OR calculated from onDiskSizeWithoutHeader
 when using HDFS checksum.
 
@@ -795,7 +795,7 @@ implements 
 
 buf
-privateByteBuff buf
+privateByteBuff buf
 The in-memory representation of the hfile block. Can be on 
or offheap. Can be backed by
  a single ByteBuffer or by many. Make no assumptions.
 
@@ -816,7 +816,7 @@ implements 
 
 fileContext
-privateHFileContext fileContext
+privateHFileContext fileContext
 Meta data that holds meta information on the 
hfileblock.
 
 
@@ -826,7 +826,7 @@ implements 
 
 offset
-privatelong offset
+privatelong offset
 The offset of this block in the file. Populated by the 
reader for
  convenience of access. This offset is not part of the block header.
 
@@ -837,7 +837,7 @@ implements 
 
 memType
-privateCacheable.MemoryType memType
+privateCacheable.MemoryType memType
 
 
 
@@ -846,7 +846,7 @@ implements 
 
 nextBlockOnDiskSize
-privateint nextBlockOnDiskSize
+privateint nextBlockOnDiskSize
 The on-disk size of the next block, including the header 
and checksums if present, obtained by
  peeking into the first HConstants.HFILEBLOCK_HEADER_SIZE
 bytes of the next block's
  header, or UNSET if unknown.
@@ -864,7 +864,7 @@ implements 
 
 CHECKSUM_VERIFICATION_NUM_IO_THRESHOLD
-static finalint CHECKSUM_VERIFICATION_NUM_IO_THRESHOLD
+static finalint CHECKSUM_VERIFICATION_NUM_IO_THRESHOLD
 On a checksum failure, do these many succeeding read 
requests using hdfs checksums before
  auto-reenabling hbase checksum verification.
 
@@ -879,7 +879,7 @@ implements 
 
 UNSET
-private staticint UNSET
+private staticint UNSET
 
 
 
@@ -888,7 +888,7 @@ implements 
 
 FILL_HEADER
-public static finalboolean FILL_HEADER
+public static finalboolean FILL_HEADER
 
 See Also:
 Constant
 Field Values
@@ -901,7 +901,7 @@ implements 
 
 DONT_FILL_HEADER
-public static finalboolean DONT_FILL_HEADER
+public static finalboolean DONT_FILL_HEADER
 
 See Also:
 Constant
 Field Values
@@ -914,7 +914,7 @@ implements 
 
 MULTI_BYTE_BUFFER_HEAP_SIZE
-public static finalint MULTI_BYTE_BUFFER_HEAP_SIZE
+public static finalint MULTI_BYTE_BUFFER_HEAP_SIZE
 
 
 
@@ -923,7 +923,7 @@ implements 
 
 BLOCK_METADATA_SPACE
-static finalint BLOCK_METADATA_SPACE
+static finalint BLOCK_METADATA_SPACE
 Space for metadata on a block that gets stored along with 
the block when we cache it.
  There are a few bytes stuck on the end of the HFileBlock that we pull in from 
HDFS (note,
  when we read from HDFS, we pull in an HFileBlock AND the header of the next 
block if one).
@@ -946,7 +946,7 @@ implements 
 
 CHECKSUM_SIZE
-static finalint CHECKSUM_SIZE
+static finalint CHECKSUM_SIZE
 Each checksum value is an integer that can be stored in 4 
bytes.
 
 See Also:
@@ -960,7 +960,7 @@ implements 
 
 DUMMY_HEADER_NO_CHECKSUM
-static finalbyte[] DUMMY_HEADER_NO_CHECKSUM
+static finalbyte[] DUMMY_HEADER_NO_CHECKSUM
 
 
 
@@ -969,7 +969,7 @@ implements 
 
 BLOCK_DESERIALIZER
-static finalCacheableDeserializerCacheable BLOCK_DESERIALIZER
+static 

[04/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/testdevapidocs/src-html/org/apache/hadoop/hbase/TestIOFencing.html
--
diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestIOFencing.html 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestIOFencing.html
index 157e668..e3058d6 100644
--- a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestIOFencing.html
+++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestIOFencing.html
@@ -25,350 +25,367 @@
 017 */
 018package org.apache.hadoop.hbase;
 019
-020import java.io.IOException;
-021import java.util.Collection;
-022import java.util.List;
-023import 
java.util.concurrent.CountDownLatch;
+020import static 
org.junit.Assert.assertEquals;
+021import static 
org.junit.Assert.assertTrue;
+022
+023import com.google.common.collect.Lists;
 024
-025import com.google.common.collect.Lists;
-026import org.apache.commons.logging.Log;
-027import 
org.apache.commons.logging.LogFactory;
-028import 
org.apache.hadoop.conf.Configuration;
-029import org.apache.hadoop.fs.FileSystem;
-030import org.apache.hadoop.fs.Path;
-031import 
org.apache.hadoop.hbase.client.Admin;
-032import 
org.apache.hadoop.hbase.client.Table;
-033import 
org.apache.hadoop.hbase.regionserver.CompactingMemStore;
-034import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-035import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor;
-036import 
org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
-037import 
org.apache.hadoop.hbase.regionserver.HRegion;
-038import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
-039import 
org.apache.hadoop.hbase.regionserver.HStore;
-040import 
org.apache.hadoop.hbase.regionserver.Region;
-041import 
org.apache.hadoop.hbase.regionserver.RegionServerServices;
-042import 
org.apache.hadoop.hbase.regionserver.Store;
-043import 
org.apache.hadoop.hbase.regionserver.StoreFile;
-044import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
-045import 
org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
-046import 
org.apache.hadoop.hbase.regionserver.wal.WALUtil;
-047import 
org.apache.hadoop.hbase.security.User;
-048import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-049import 
org.apache.hadoop.hbase.testclassification.MiscTests;
-050import 
org.apache.hadoop.hbase.util.Bytes;
-051import 
org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
-052import org.apache.hadoop.hbase.wal.WAL;
-053import org.junit.Test;
-054import 
org.junit.experimental.categories.Category;
-055
-056import static 
org.junit.Assert.assertEquals;
-057import static 
org.junit.Assert.assertTrue;
-058
-059/**
-060 * Test for the case where a regionserver 
going down has enough cycles to do damage to regions
-061 * that have actually been assigned 
elsehwere.
-062 *
-063 * pIf we happen to assign a 
region before it fully done with in its old location -- i.e. it is on two 
servers at the
-064 * same time -- all can work fine until 
the case where the region on the dying server decides to compact or otherwise
-065 * change the region file set.  The 
region in its new location will then get a surprise when it tries to do 
something
-066 * w/ a file removed by the region in its 
old location on dying server.
-067 *
-068 * pMaking a test for this case 
is a little tough in that even if a file is deleted up on the namenode,
-069 * if the file was opened before the 
delete, it will continue to let reads happen until something changes the
-070 * state of cached blocks in the 
dfsclient that was already open (a block from the deleted file is cleaned
-071 * from the datanode by NN).
-072 *
-073 * pWhat we will do below is do 
an explicit check for existence on the files listed in the region that
-074 * has had some files removed because of 
a compaction.  This sort of hurry's along and makes certain what is a chance
-075 * occurance.
-076 */
-077@Category({MiscTests.class, 
MediumTests.class})
-078public class TestIOFencing {
-079  private static final Log LOG = 
LogFactory.getLog(TestIOFencing.class);
-080  static {
-081// Uncomment the following lines if 
more verbosity is needed for
-082// debugging (see HBASE-12285 for 
details).
-083
//((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
-084
//((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
-085
//((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
-086
//((Log4JLogger)LogFactory.getLog("org.apache.hadoop.hdfs.server.namenode.FSNamesystem"))
-087//
.getLogger().setLevel(Level.ALL);
-088
//((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
-089  }
-090
-091  public abstract static class 
CompactionBlockerRegion extends HRegion {
-092volatile int compactCount = 0;
-093volatile CountDownLatch 
compactionsBlocked = new CountDownLatch(0);
-094volatile 

hbase-site git commit: INFRA-10751 Empty commit

2017-05-11 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 1241ee85f -> d5aa6a181


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/d5aa6a18
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/d5aa6a18
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/d5aa6a18

Branch: refs/heads/asf-site
Commit: d5aa6a181f575a96f02deb6e812b4593ce5ccbb3
Parents: 1241ee8
Author: jenkins 
Authored: Thu May 11 14:59:21 2017 +
Committer: jenkins 
Committed: Thu May 11 14:59:21 2017 +

--

--




[32/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.PrefetchedHeader.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.PrefetchedHeader.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.PrefetchedHeader.html
index 3fedd0b..6414009 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.PrefetchedHeader.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.PrefetchedHeader.html
@@ -25,2075 +25,2063 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.hfile;
 019
-020import java.io.DataInputStream;
-021import java.io.DataOutput;
-022import java.io.DataOutputStream;
-023import java.io.IOException;
-024import java.io.InputStream;
-025import java.nio.ByteBuffer;
-026import 
java.util.concurrent.atomic.AtomicReference;
-027import java.util.concurrent.locks.Lock;
-028import 
java.util.concurrent.locks.ReentrantLock;
-029
-030import org.apache.commons.logging.Log;
-031import 
org.apache.commons.logging.LogFactory;
-032import 
org.apache.hadoop.fs.FSDataInputStream;
-033import 
org.apache.hadoop.fs.FSDataOutputStream;
-034import org.apache.hadoop.fs.Path;
-035import org.apache.hadoop.hbase.Cell;
-036import 
org.apache.hadoop.hbase.HConstants;
-037import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-038import 
org.apache.hadoop.hbase.fs.HFileSystem;
-039import 
org.apache.hadoop.hbase.io.ByteArrayOutputStream;
-040import 
org.apache.hadoop.hbase.io.ByteBuffInputStream;
-041import 
org.apache.hadoop.hbase.io.ByteBufferWriterDataOutputStream;
-042import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
-043import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-044import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
-045import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultDecodingContext;
-046import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
-047import 
org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
-048import 
org.apache.hadoop.hbase.nio.ByteBuff;
-049import 
org.apache.hadoop.hbase.nio.MultiByteBuff;
-050import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
-051import 
org.apache.hadoop.hbase.util.Bytes;
-052import 
org.apache.hadoop.hbase.util.ChecksumType;
-053import 
org.apache.hadoop.hbase.util.ClassSize;
-054import org.apache.hadoop.io.IOUtils;
-055
-056import 
com.google.common.annotations.VisibleForTesting;
-057import 
com.google.common.base.Preconditions;
-058
-059/**
-060 * Reads {@link HFile} version 2 blocks 
to HFiles and via {@link Cacheable} Interface to caches.
-061 * Version 2 was introduced in 
hbase-0.92.0. No longer has support for version 1 blocks since
-062 * hbase-1.3.0.
-063 *
-064 * pVersion 1 was the original 
file block. Version 2 was introduced when we changed the hbase file
-065 * format to support multi-level block 
indexes and compound bloom filters (HBASE-3857).
-066 *
-067 * h3HFileBlock: Version 
2/h3
-068 * In version 2, a block is structured as 
follows:
-069 * ul
-070 * libHeader:/b 
See Writer#putHeader() for where header is written; header total size is
-071 * HFILEBLOCK_HEADER_SIZE
-072 * ul
-073 * li0. blockType: Magic record 
identifying the {@link BlockType} (8 bytes):
-074 * e.g. 
codeDATABLK*/code
-075 * li1. onDiskSizeWithoutHeader: 
Compressed -- a.k.a 'on disk' -- block size, excluding header,
-076 * but including tailing checksum bytes 
(4 bytes)
-077 * li2. 
uncompressedSizeWithoutHeader: Uncompressed block size, excluding header, and 
excluding
-078 * checksum bytes (4 bytes)
-079 * li3. prevBlockOffset: The 
offset of the previous block of the same type (8 bytes). This is
-080 * used to navigate to the previous block 
without having to go to the block index
-081 * li4: For minorVersions 
gt;=1, the ordinal describing checksum type (1 byte)
-082 * li5: For minorVersions 
gt;=1, the number of data bytes/checksum chunk (4 bytes)
-083 * li6: onDiskDataSizeWithHeader: 
For minorVersions gt;=1, the size of data 'on disk', including
-084 * header, excluding checksums (4 
bytes)
-085 * /ul
-086 * /li
-087 * 
libRaw/Compressed/Encrypted/Encoded data:/b The 
compression
-088 * algorithm is the same for all the 
blocks in an {@link HFile}. If compression is NONE, this is
-089 * just raw, serialized Cells.
-090 * libTail:/b For 
minorVersions gt;=1, a series of 4 byte checksums, one each for
-091 * the number of bytes specified by 
bytesPerChecksum.
-092 * /ul
-093 *
-094 * h3Caching/h3
-095 * Caches cache whole blocks with 
trailing checksums if any. We then tag on some metadata, the
-096 * content of BLOCK_METADATA_SPACE which 
will be flag on if we are doing 'hbase'
-097 * checksums and then the offset into the 
file which is needed when we re-make a cache key
-098 * when we return the block to the cache 
as 'done'. See {@link 

[02/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/DelegatingKeyValueScanner.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/DelegatingKeyValueScanner.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/DelegatingKeyValueScanner.html
index 74ac17d..7f553ad 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/DelegatingKeyValueScanner.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/DelegatingKeyValueScanner.html
@@ -28,98 +28,104 @@
 020
 021import java.io.IOException;
 022
-023import org.apache.hadoop.hbase.Cell;
-024import 
org.apache.hadoop.hbase.client.Scan;
-025import 
org.apache.hadoop.hbase.regionserver.KeyValueScanner;
-026import 
org.apache.hadoop.hbase.regionserver.Store;
-027
-028public class DelegatingKeyValueScanner 
implements KeyValueScanner {
-029  protected KeyValueScanner delegate;
-030
-031  public 
DelegatingKeyValueScanner(KeyValueScanner delegate) {
-032this.delegate = delegate;
-033  }
-034
-035  @Override
-036  public void shipped() throws 
IOException {
-037delegate.shipped();
-038  }
-039
-040  @Override
-041  public Cell peek() {
-042return delegate.peek();
-043  }
-044
-045  @Override
-046  public Cell next() throws IOException 
{
-047return delegate.next();
-048  }
-049
-050  @Override
-051  public boolean seek(Cell key) throws 
IOException {
-052return delegate.seek(key);
-053  }
-054
-055  @Override
-056  public boolean reseek(Cell key) throws 
IOException {
-057return delegate.reseek(key);
-058  }
-059
-060  @Override
-061  public long getScannerOrder() {
-062return delegate.getScannerOrder();
-063  }
-064
-065  @Override
-066  public void close() {
-067delegate.close();
-068  }
-069
-070  @Override
-071  public boolean shouldUseScanner(Scan 
scan, Store store, long oldestUnexpiredTS) {
-072return 
delegate.shouldUseScanner(scan, store, oldestUnexpiredTS);
-073  }
-074
-075  @Override
-076  public boolean requestSeek(Cell kv, 
boolean forward, boolean useBloom) throws IOException {
-077return delegate.requestSeek(kv, 
forward, useBloom);
-078  }
-079
-080  @Override
-081  public boolean realSeekDone() {
-082return delegate.realSeekDone();
-083  }
-084
-085  @Override
-086  public void enforceSeek() throws 
IOException {
-087delegate.enforceSeek();
-088  }
-089
-090  @Override
-091  public boolean isFileScanner() {
-092return delegate.isFileScanner();
-093  }
-094
-095  @Override
-096  public boolean backwardSeek(Cell key) 
throws IOException {
-097return delegate.backwardSeek(key);
-098  }
-099
-100  @Override
-101  public boolean seekToPreviousRow(Cell 
key) throws IOException {
-102return 
delegate.seekToPreviousRow(key);
-103  }
-104
-105  @Override
-106  public boolean seekToLastRow() throws 
IOException {
-107return delegate.seekToLastRow();
-108  }
-109
-110  @Override
-111  public Cell getNextIndexedKey() {
-112return 
delegate.getNextIndexedKey();
-113  }
-114}
+023import org.apache.hadoop.fs.Path;
+024import org.apache.hadoop.hbase.Cell;
+025import 
org.apache.hadoop.hbase.client.Scan;
+026import 
org.apache.hadoop.hbase.regionserver.KeyValueScanner;
+027import 
org.apache.hadoop.hbase.regionserver.Store;
+028
+029public class DelegatingKeyValueScanner 
implements KeyValueScanner {
+030  protected KeyValueScanner delegate;
+031
+032  public 
DelegatingKeyValueScanner(KeyValueScanner delegate) {
+033this.delegate = delegate;
+034  }
+035
+036  @Override
+037  public void shipped() throws 
IOException {
+038delegate.shipped();
+039  }
+040
+041  @Override
+042  public Cell peek() {
+043return delegate.peek();
+044  }
+045
+046  @Override
+047  public Cell next() throws IOException 
{
+048return delegate.next();
+049  }
+050
+051  @Override
+052  public boolean seek(Cell key) throws 
IOException {
+053return delegate.seek(key);
+054  }
+055
+056  @Override
+057  public boolean reseek(Cell key) throws 
IOException {
+058return delegate.reseek(key);
+059  }
+060
+061  @Override
+062  public long getScannerOrder() {
+063return delegate.getScannerOrder();
+064  }
+065
+066  @Override
+067  public void close() {
+068delegate.close();
+069  }
+070
+071  @Override
+072  public boolean shouldUseScanner(Scan 
scan, Store store, long oldestUnexpiredTS) {
+073return 
delegate.shouldUseScanner(scan, store, oldestUnexpiredTS);
+074  }
+075
+076  @Override
+077  public boolean requestSeek(Cell kv, 
boolean forward, boolean useBloom) throws IOException {
+078return delegate.requestSeek(kv, 
forward, useBloom);
+079  }
+080
+081  @Override
+082  public boolean realSeekDone() {
+083return delegate.realSeekDone();
+084  }
+085
+086  @Override
+087  public void enforceSeek() throws 
IOException {
+088delegate.enforceSeek();
+089  }
+090

[18/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.StoreScannerCompactionRace.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.StoreScannerCompactionRace.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.StoreScannerCompactionRace.html
index 573de43..4e9c7c3 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.StoreScannerCompactionRace.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.StoreScannerCompactionRace.html
@@ -32,1012 +32,1081 @@
 024import java.io.IOException;
 025import java.io.InterruptedIOException;
 026import java.util.ArrayList;
-027import java.util.List;
-028import java.util.NavigableSet;
-029import 
java.util.concurrent.CountDownLatch;
-030import 
java.util.concurrent.locks.ReentrantLock;
-031
-032import org.apache.commons.logging.Log;
-033import 
org.apache.commons.logging.LogFactory;
-034import org.apache.hadoop.hbase.Cell;
-035import 
org.apache.hadoop.hbase.CellComparator;
-036import 
org.apache.hadoop.hbase.CellUtil;
-037import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-038import 
org.apache.hadoop.hbase.HConstants;
-039import 
org.apache.hadoop.hbase.KeyValue;
-040import 
org.apache.hadoop.hbase.KeyValueUtil;
-041import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-042import 
org.apache.hadoop.hbase.client.IsolationLevel;
-043import 
org.apache.hadoop.hbase.client.Scan;
-044import 
org.apache.hadoop.hbase.executor.ExecutorService;
-045import 
org.apache.hadoop.hbase.filter.Filter;
-046import 
org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope;
-047import 
org.apache.hadoop.hbase.regionserver.ScannerContext.NextState;
-048import 
org.apache.hadoop.hbase.regionserver.handler.ParallelSeekHandler;
-049import 
org.apache.hadoop.hbase.regionserver.querymatcher.CompactionScanQueryMatcher;
-050import 
org.apache.hadoop.hbase.regionserver.querymatcher.LegacyScanQueryMatcher;
-051import 
org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher;
-052import 
org.apache.hadoop.hbase.regionserver.querymatcher.UserScanQueryMatcher;
-053import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-054
-055/**
-056 * Scanner scans both the memstore and 
the Store. Coalesce KeyValue stream
-057 * into Listlt;KeyValuegt; for 
a single row.
-058 */
-059@InterfaceAudience.Private
-060public class StoreScanner extends 
NonReversedNonLazyKeyValueScanner
-061implements KeyValueScanner, 
InternalScanner, ChangedReadersObserver {
-062  private static final Log LOG = 
LogFactory.getLog(StoreScanner.class);
-063  // In unit tests, the store could be 
null
-064  protected final Store store;
-065  protected ScanQueryMatcher matcher;
-066  protected KeyValueHeap heap;
-067  protected boolean cacheBlocks;
-068
-069  protected long countPerRow = 0;
-070  protected int storeLimit = -1;
-071  protected int storeOffset = 0;
-072
-073  // Used to indicate that the scanner 
has closed (see HBASE-1107)
-074  // Doesnt need to be volatile because 
it's always accessed via synchronized methods
-075  protected boolean closing = false;
-076  protected final boolean get;
-077  protected final boolean 
explicitColumnQuery;
-078  protected final boolean 
useRowColBloom;
-079  /**
-080   * A flag that enables StoreFileScanner 
parallel-seeking
-081   */
-082  protected boolean parallelSeekEnabled = 
false;
-083  protected ExecutorService executor;
-084  protected final Scan scan;
-085  protected final 
NavigableSetbyte[] columns;
-086  protected final long 
oldestUnexpiredTS;
-087  protected final long now;
-088  protected final int minVersions;
-089  protected final long maxRowSize;
-090  protected final long 
cellsPerHeartbeatCheck;
-091
-092  // Collects all the KVHeap that are 
eagerly getting closed during the
-093  // course of a scan
-094  protected ListKeyValueHeap 
heapsForDelayedClose = new ArrayList();
-095
-096  /**
-097   * The number of KVs seen by the 
scanner. Includes explicitly skipped KVs, but not
-098   * KVs skipped via seeking to next 
row/column. TODO: estimate them?
-099   */
-100  private long kvsScanned = 0;
-101  private Cell prevCell = null;
-102
-103  /** We don't ever expect to change 
this, the constant is just for clarity. */
-104  static final boolean 
LAZY_SEEK_ENABLED_BY_DEFAULT = true;
-105  public static final String 
STORESCANNER_PARALLEL_SEEK_ENABLE =
-106  
"hbase.storescanner.parallel.seek.enable";
+027import java.util.HashMap;
+028import java.util.List;
+029import java.util.Map;
+030import java.util.NavigableSet;
+031import 
java.util.concurrent.CountDownLatch;
+032import 
java.util.concurrent.locks.ReentrantLock;
+033
+034import org.apache.commons.logging.Log;
+035import 
org.apache.commons.logging.LogFactory;
+036import org.apache.hadoop.hbase.Cell;

[51/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/1241ee85
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/1241ee85
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/1241ee85

Branch: refs/heads/asf-site
Commit: 1241ee85f5760b3fcab20856e2605714075e9953
Parents: cac6146
Author: jenkins 
Authored: Thu May 11 14:58:59 2017 +
Committer: jenkins 
Committed: Thu May 11 14:58:59 2017 +

--
 acid-semantics.html | 4 +-
 apache_hbase_reference_guide.pdf| 4 +-
 apache_hbase_reference_guide.pdfmarks   | 4 +-
 bulk-loads.html | 4 +-
 checkstyle-aggregate.html   | 10564 -
 checkstyle.rss  | 6 +-
 coc.html| 4 +-
 cygwin.html | 4 +-
 dependencies.html   | 4 +-
 dependency-convergence.html | 4 +-
 dependency-info.html| 4 +-
 dependency-management.html  | 4 +-
 devapidocs/constant-values.html |32 +-
 devapidocs/index-all.html   |56 +-
 .../hadoop/hbase/backup/package-tree.html   | 2 +-
 .../org/apache/hadoop/hbase/class-use/Cell.html |10 +-
 .../hadoop/hbase/class-use/CellComparator.html  | 3 +-
 .../hbase/class-use/KeepDeletedCells.html   | 3 +-
 .../hbase/classification/package-tree.html  | 4 +-
 .../hbase/client/class-use/Scan.ReadType.html   |22 +
 .../hadoop/hbase/client/class-use/Scan.html | 2 +-
 .../hadoop/hbase/client/package-tree.html   |28 +-
 .../apache/hadoop/hbase/client/package-use.html | 3 +
 .../executor/class-use/ExecutorService.html | 2 +-
 .../hadoop/hbase/executor/package-tree.html | 2 +-
 .../hadoop/hbase/filter/package-tree.html   | 6 +-
 .../hadoop/hbase/fs/class-use/HFileSystem.html  | 2 +-
 .../io/class-use/FSDataInputStreamWrapper.html  | 2 +-
 .../io/hfile/HFileBlock.BlockIterator.html  | 6 +-
 .../io/hfile/HFileBlock.BlockWritable.html  | 6 +-
 .../hbase/io/hfile/HFileBlock.FSReader.html |16 +-
 .../hbase/io/hfile/HFileBlock.FSReaderImpl.html |   119 +-
 .../hbase/io/hfile/HFileBlock.Header.html   |18 +-
 .../io/hfile/HFileBlock.PrefetchedHeader.html   |12 +-
 .../hbase/io/hfile/HFileBlock.Writer.State.html |12 +-
 .../hbase/io/hfile/HFileBlock.Writer.html   |80 +-
 .../hadoop/hbase/io/hfile/HFileBlock.html   |   146 +-
 .../hbase/io/hfile/class-use/HFileContext.html  | 2 +-
 .../hadoop/hbase/io/hfile/package-tree.html | 8 +-
 .../hadoop/hbase/mapreduce/package-tree.html| 4 +-
 .../hadoop/hbase/master/package-tree.html   | 4 +-
 .../hbase/master/procedure/package-tree.html| 2 +-
 .../org/apache/hadoop/hbase/package-tree.html   |12 +-
 .../hadoop/hbase/procedure2/package-tree.html   | 6 +-
 .../hadoop/hbase/quotas/package-tree.html   | 4 +-
 .../KeyValueHeap.KVScannerComparator.html   |12 +-
 .../hadoop/hbase/regionserver/KeyValueHeap.html |50 +-
 .../hbase/regionserver/KeyValueScanner.html |77 +-
 .../hbase/regionserver/MobStoreScanner.html | 8 +-
 .../regionserver/NonLazyKeyValueScanner.html|53 +-
 .../NonReversedNonLazyKeyValueScanner.html  | 2 +-
 .../regionserver/ReversedKeyValueHeap.html  | 4 +-
 .../regionserver/ReversedMobStoreScanner.html   | 8 +-
 .../regionserver/ReversedStoreScanner.html  |10 +-
 .../hadoop/hbase/regionserver/ScanInfo.html |87 +-
 .../hbase/regionserver/SegmentScanner.html  |   123 +-
 .../StoreFile.Comparators.GetBulkTime.html  | 6 +-
 .../StoreFile.Comparators.GetFileSize.html  | 6 +-
 .../StoreFile.Comparators.GetMaxTimestamp.html  | 6 +-
 .../StoreFile.Comparators.GetPathName.html  | 6 +-
 .../StoreFile.Comparators.GetSeqId.html | 6 +-
 .../regionserver/StoreFile.Comparators.html | 8 +-
 .../hadoop/hbase/regionserver/StoreFile.html|85 +-
 .../hbase/regionserver/StoreFileInfo.html   |68 +-
 .../hbase/regionserver/StoreFileReader.html |   104 +-
 .../hbase/regionserver/StoreFileScanner.html|   169 +-
 ...StoreScanner.StoreScannerCompactionRace.html |12 +-
 .../hadoop/hbase/regionserver/StoreScanner.html |   396 +-
 .../regionserver/class-use/KeyValueHeap.html| 2 +-
 .../regionserver/class-use/KeyValueScanner.html | 6 +-
 .../hbase/regionserver/class-use/StoreFile.html | 5 +-
 .../class-use/StoreFileScanner.html |15 +-
 

[09/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/testdevapidocs/org/apache/hadoop/hbase/regionserver/class-use/TestSwitchToStreamRead.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/class-use/TestSwitchToStreamRead.html
 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/class-use/TestSwitchToStreamRead.html
new file mode 100644
index 000..4b1b5a3
--- /dev/null
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/class-use/TestSwitchToStreamRead.html
@@ -0,0 +1,125 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+Uses of Class 
org.apache.hadoop.hbase.regionserver.TestSwitchToStreamRead (Apache HBase 
2.0.0-SNAPSHOT Test API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+
+
+
+Uses 
of Classorg.apache.hadoop.hbase.regionserver.TestSwitchToStreamRead
+
+No usage of 
org.apache.hadoop.hbase.regionserver.TestSwitchToStreamRead
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+
+
+Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+
+

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-frame.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-frame.html 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-frame.html
index 98df571..fd744e3 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-frame.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-frame.html
@@ -221,6 +221,7 @@
 TestStripeStoreEngine
 TestStripeStoreEngine.TestStoreEngine
 TestStripeStoreFileManager
+TestSwitchToStreamRead
 TestTags
 TestTags.TestCoprocessorForTags
 TestTimeRangeTracker

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-summary.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-summary.html 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-summary.html
index 6994a02..e0273a9 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-summary.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-summary.html
@@ -1144,45 +1144,49 @@
 
 
 
+TestSwitchToStreamRead
+
+
+
 TestTags
 
 Class that test tags
 
 
-
+
 TestTags.TestCoprocessorForTags
 
 
-
+
 TestTimeRangeTracker
 
 
-
+
 TestTimestampFilterSeekHint
 
 
-
+
 TestWalAndCompactingMemStoreFlush
 
 This test verifies the correctness of the Per Column Family 
flushing strategy
  when part of the memstores are compacted memstores
 
 
-
+
 TestWALLockup
 
 Testing for lock up of WAL subsystem.
 
 
-
+
 TestWALLockup.DummyServer
 
 
-
+
 TestWALLockup.DummyWALActionsListener
 
 
-
+
 TestWALMonotonicallyIncreasingSeqId
 
 Test for HBASE-17471
@@ -1190,7 +1194,7 @@
  path.
 
 
-
+
 TestWideScanner
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
index a7ceea0..65c0a76 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
@@ -525,6 +525,7 @@
 org.apache.hadoop.hbase.regionserver.TestStoreScanner
 org.apache.hadoop.hbase.regionserver.TestStripeStoreEngine
 org.apache.hadoop.hbase.regionserver.TestStripeStoreFileManager
+org.apache.hadoop.hbase.regionserver.TestSwitchToStreamRead
 org.apache.hadoop.hbase.regionserver.TestTags
 

[19/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFileScanner.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFileScanner.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFileScanner.html
index 1c88305..f81ea94 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFileScanner.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFileScanner.html
@@ -35,530 +35,538 @@
 027import java.util.List;
 028import 
java.util.concurrent.atomic.LongAdder;
 029
-030import org.apache.hadoop.hbase.Cell;
-031import 
org.apache.hadoop.hbase.CellComparator;
-032import 
org.apache.hadoop.hbase.CellUtil;
-033import 
org.apache.hadoop.hbase.HConstants;
-034import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-035import 
org.apache.hadoop.hbase.client.Scan;
-036import 
org.apache.hadoop.hbase.io.TimeRange;
-037import 
org.apache.hadoop.hbase.io.hfile.HFileScanner;
-038import 
org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher;
-039
-040/**
-041 * KeyValueScanner adaptor over the 
Reader.  It also provides hooks into
-042 * bloom filter things.
-043 */
-044@InterfaceAudience.LimitedPrivate("Coprocessor")
-045public class StoreFileScanner implements 
KeyValueScanner {
-046  // the reader it comes from:
-047  private final StoreFileReader reader;
-048  private final HFileScanner hfs;
-049  private Cell cur = null;
-050  private boolean closed = false;
-051
-052  private boolean realSeekDone;
-053  private boolean delayedReseek;
-054  private Cell delayedSeekKV;
-055
-056  private final boolean enforceMVCC;
-057  private final boolean hasMVCCInfo;
-058  // A flag represents whether could stop 
skipping KeyValues for MVCC
-059  // if have encountered the next row. 
Only used for reversed scan
-060  private boolean 
stopSkippingKVsIfNextRow = false;
-061
-062  private static LongAdder seekCount;
-063
-064  private final boolean 
canOptimizeForNonNullColumn;
-065
-066  private final long readPt;
-067
-068  // Order of this scanner relative to 
other scanners when duplicate key-value is found.
-069  // Higher values means scanner has 
newer data.
-070  private final long scannerOrder;
-071
-072  /**
-073   * Implements a {@link KeyValueScanner} 
on top of the specified {@link HFileScanner}
-074   * @param useMVCC If true, scanner will 
filter out updates with MVCC larger than {@code readPt}.
-075   * @param readPt MVCC value to use to 
filter out the updates newer than this scanner.
-076   * @param hasMVCC Set to true if 
underlying store file reader has MVCC info.
-077   * @param scannerOrder Order of the 
scanner relative to other scanners. See
-078   *  {@link 
KeyValueScanner#getScannerOrder()}.
-079   * @param canOptimizeForNonNullColumn 
{@code true} if we can make sure there is no null column,
-080   *  otherwise {@code false}. 
This is a hint for optimization.
-081   */
-082  public StoreFileScanner(StoreFileReader 
reader, HFileScanner hfs, boolean useMVCC,
-083  boolean hasMVCC, long readPt, long 
scannerOrder, boolean canOptimizeForNonNullColumn) {
-084this.readPt = readPt;
-085this.reader = reader;
-086this.hfs = hfs;
-087this.enforceMVCC = useMVCC;
-088this.hasMVCCInfo = hasMVCC;
-089this.scannerOrder = scannerOrder;
-090this.canOptimizeForNonNullColumn = 
canOptimizeForNonNullColumn;
-091  }
-092
-093  boolean isPrimaryReplica() {
-094return 
reader.isPrimaryReplicaReader();
-095  }
-096
-097  /**
-098   * Return an array of scanners 
corresponding to the given
-099   * set of store files.
+030import org.apache.hadoop.fs.Path;
+031import org.apache.hadoop.hbase.Cell;
+032import 
org.apache.hadoop.hbase.CellComparator;
+033import 
org.apache.hadoop.hbase.CellUtil;
+034import 
org.apache.hadoop.hbase.HConstants;
+035import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+036import 
org.apache.hadoop.hbase.client.Scan;
+037import 
org.apache.hadoop.hbase.io.TimeRange;
+038import 
org.apache.hadoop.hbase.io.hfile.HFileScanner;
+039import 
org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher;
+040
+041/**
+042 * KeyValueScanner adaptor over the 
Reader.  It also provides hooks into
+043 * bloom filter things.
+044 */
+045@InterfaceAudience.LimitedPrivate("Coprocessor")
+046public class StoreFileScanner implements 
KeyValueScanner {
+047  // the reader it comes from:
+048  private final StoreFileReader reader;
+049  private final HFileScanner hfs;
+050  private Cell cur = null;
+051  private boolean closed = false;
+052
+053  private boolean realSeekDone;
+054  private boolean delayedReseek;
+055  private Cell delayedSeekKV;
+056
+057  private final boolean enforceMVCC;
+058  private final boolean hasMVCCInfo;
+059  // A flag represents whether could stop 
skipping KeyValues for MVCC

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.BlockReaderThread.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.BlockReaderThread.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.BlockReaderThread.html
index b535394..1b4407c 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.BlockReaderThread.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.BlockReaderThread.html
@@ -679,187 +679,188 @@
 671  while (System.currentTimeMillis() 
 endTime) {
 672int blockId = 
rand.nextInt(NUM_TEST_BLOCKS);
 673long offset = 
offsets.get(blockId);
-674boolean pread = 
rand.nextBoolean();
-675boolean withOnDiskSize = 
rand.nextBoolean();
-676long expectedSize =
-677  (blockId == NUM_TEST_BLOCKS - 1 
? fileSize
-678  : offsets.get(blockId + 1)) 
- offset;
-679
-680HFileBlock b;
-681try {
-682  long onDiskSizeArg = 
withOnDiskSize ? expectedSize : -1;
-683  b = hbr.readBlockData(offset, 
onDiskSizeArg, pread);
-684} catch (IOException ex) {
-685  LOG.error("Error in client " + 
clientId + " trying to read block at "
-686  + offset + ", pread=" + 
pread + ", withOnDiskSize=" +
-687  withOnDiskSize, ex);
-688  return false;
-689}
-690
-691assertEquals(types.get(blockId), 
b.getBlockType());
-692assertEquals(expectedSize, 
b.getOnDiskSizeWithHeader());
-693assertEquals(offset, 
b.getOffset());
-694
-695++numBlocksRead;
-696if (pread)
-697  ++numPositionalRead;
-698if (withOnDiskSize)
-699  ++numWithOnDiskSize;
-700  }
-701  LOG.info("Client " + clientId + " 
successfully read " + numBlocksRead +
-702" blocks (with pread: " + 
numPositionalRead + ", with onDiskSize " +
-703"specified: " + numWithOnDiskSize 
+ ")");
-704
-705  return true;
-706}
-707
-708  }
-709
-710  @Test
-711  public void testConcurrentReading() 
throws Exception {
-712testConcurrentReadingInternals();
-713  }
-714
-715  protected void 
testConcurrentReadingInternals() throws IOException,
-716  InterruptedException, 
ExecutionException {
-717for (Compression.Algorithm 
compressAlgo : COMPRESSION_ALGORITHMS) {
-718  Path path = new 
Path(TEST_UTIL.getDataTestDir(), "concurrent_reading");
-719  Random rand = defaultRandom();
-720  ListLong offsets = new 
ArrayList();
-721  ListBlockType types = new 
ArrayList();
-722  writeBlocks(rand, compressAlgo, 
path, offsets, null, types, null);
-723  FSDataInputStream is = 
fs.open(path);
-724  long fileSize = 
fs.getFileStatus(path).getLen();
-725  HFileContext meta = new 
HFileContextBuilder()
-726  
.withHBaseCheckSum(true)
-727  
.withIncludesMvcc(includesMemstoreTS)
-728  
.withIncludesTags(includesTag)
-729  
.withCompression(compressAlgo)
-730  .build();
-731  HFileBlock.FSReader hbr = new 
HFileBlock.FSReaderImpl(is, fileSize, meta);
-732
-733  Executor exec = 
Executors.newFixedThreadPool(NUM_READER_THREADS);
-734  
ExecutorCompletionServiceBoolean ecs = new 
ExecutorCompletionService(exec);
-735
-736  for (int i = 0; i  
NUM_READER_THREADS; ++i) {
-737ecs.submit(new 
BlockReaderThread("reader_" + (char) ('A' + i), hbr,
-738offsets, types, fileSize));
-739  }
-740
-741  for (int i = 0; i  
NUM_READER_THREADS; ++i) {
-742FutureBoolean result = 
ecs.take();
-743assertTrue(result.get());
-744if (detailedLogging) {
-745  LOG.info(String.valueOf(i + 
1)
-746+ " reader threads finished 
successfully (algo=" + compressAlgo
-747+ ")");
-748}
-749  }
-750
-751  is.close();
-752}
-753  }
-754
-755  private long writeBlocks(Random rand, 
Compression.Algorithm compressAlgo,
-756  Path path, ListLong 
expectedOffsets, ListLong expectedPrevOffsets,
-757  ListBlockType 
expectedTypes, ListByteBuffer expectedContents
-758  ) throws IOException {
-759boolean cacheOnWrite = 
expectedContents != null;
-760FSDataOutputStream os = 
fs.create(path);
-761HFileContext meta = new 
HFileContextBuilder()
-762
.withHBaseCheckSum(true)
-763
.withIncludesMvcc(includesMemstoreTS)
-764
.withIncludesTags(includesTag)
-765
.withCompression(compressAlgo)
-766
.withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
-767.build();
-768HFileBlock.Writer hbw = new 

[41/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFileScanner.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFileScanner.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFileScanner.html
index 6928b70..d28a9f8 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFileScanner.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFileScanner.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":9,"i8":9,"i9":9,"i10":9,"i11":9,"i12":9,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":9,"i21":10,"i22":9,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":9,"i9":9,"i10":9,"i11":9,"i12":9,"i13":9,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":9,"i22":10,"i23":9,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.LimitedPrivate(value="Coprocessor")
-public class StoreFileScanner
+public class StoreFileScanner
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements KeyValueScanner
 KeyValueScanner adaptor over the Reader.  It also provides 
hooks into
@@ -268,20 +268,24 @@ implements getComparator()
 
 
+org.apache.hadoop.fs.Path
+getFilePath()
+
+
 Cell
 getNextIndexedKey()
 
-
+
 (package private) StoreFileReader
 getReader()
 
-
+
 long
 getScannerOrder()
 Get the order of this KeyValueScanner.
 
 
-
+
 static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListStoreFileScanner
 getScannersForCompaction(http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionStoreFilefiles,
 booleancanUseDropBehind,
@@ -289,7 +293,7 @@ implements Get scanners for compaction.
 
 
-
+
 static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListStoreFileScanner
 getScannersForStoreFiles(http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionStoreFilefiles,
 booleancacheBlocks,
@@ -300,7 +304,7 @@ implements Return an array of scanners corresponding to the given set 
of store files.
 
 
-
+
 static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListStoreFileScanner
 getScannersForStoreFiles(http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionStoreFilefiles,
 booleancacheBlocks,
@@ -313,45 +317,44 @@ implements 
+
 static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListStoreFileScanner
 getScannersForStoreFiles(http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionStoreFilefiles,
 booleancacheBlocks,
 booleanusePread,
 longreadPt)
-Return an array of scanners corresponding to the given
- set of store files.
+Return an array of scanners corresponding to the given set 
of store files.
 
 
-
+
 (package private) static long
 getSeekCount()
 
-
+
 (package private) static void
 instrument()
 
-
+
 boolean
 isFileScanner()
 
-
+
 (package private) boolean
 isPrimaryReplica()
 
-
+
 Cell
 next()
 Return the next Cell in this scanner, iterating the 
scanner
 
 
-
+
 Cell
 peek()
 Look at the next Cell in this scanner, but do not iterate 
scanner.
 
 
-
+
 boolean
 realSeekDone()
 We optimize our store scanners by checking the most recent 
store file
@@ -359,7 +362,7 @@ implements 
+
 boolean
 requestSeek(Cellkv,
booleanforward,
@@ -367,52 +370,52 @@ implements Pretend we have done a seek but don't do it yet, if 
possible.
 
 
-
+
 boolean
 reseek(Cellkey)
 Reseek the scanner at or after the specified KeyValue.
 
 
-
+
 (package private) static boolean
 reseekAtOrAfter(HFileScanners,
Cellk)
 
-
+
 boolean
 seek(Cellkey)
 Seek the scanner at or after the specified KeyValue.
 
 
-
+
 static boolean
 seekAtOrAfter(HFileScanners,
  Cellk)
 
-
+
 boolean
 seekToLastRow()
 Seek the 

[37/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockIterator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockIterator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockIterator.html
index 3fedd0b..6414009 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockIterator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockIterator.html
@@ -25,2075 +25,2063 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.hfile;
 019
-020import java.io.DataInputStream;
-021import java.io.DataOutput;
-022import java.io.DataOutputStream;
-023import java.io.IOException;
-024import java.io.InputStream;
-025import java.nio.ByteBuffer;
-026import 
java.util.concurrent.atomic.AtomicReference;
-027import java.util.concurrent.locks.Lock;
-028import 
java.util.concurrent.locks.ReentrantLock;
-029
-030import org.apache.commons.logging.Log;
-031import 
org.apache.commons.logging.LogFactory;
-032import 
org.apache.hadoop.fs.FSDataInputStream;
-033import 
org.apache.hadoop.fs.FSDataOutputStream;
-034import org.apache.hadoop.fs.Path;
-035import org.apache.hadoop.hbase.Cell;
-036import 
org.apache.hadoop.hbase.HConstants;
-037import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-038import 
org.apache.hadoop.hbase.fs.HFileSystem;
-039import 
org.apache.hadoop.hbase.io.ByteArrayOutputStream;
-040import 
org.apache.hadoop.hbase.io.ByteBuffInputStream;
-041import 
org.apache.hadoop.hbase.io.ByteBufferWriterDataOutputStream;
-042import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
-043import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-044import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
-045import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultDecodingContext;
-046import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
-047import 
org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
-048import 
org.apache.hadoop.hbase.nio.ByteBuff;
-049import 
org.apache.hadoop.hbase.nio.MultiByteBuff;
-050import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
-051import 
org.apache.hadoop.hbase.util.Bytes;
-052import 
org.apache.hadoop.hbase.util.ChecksumType;
-053import 
org.apache.hadoop.hbase.util.ClassSize;
-054import org.apache.hadoop.io.IOUtils;
-055
-056import 
com.google.common.annotations.VisibleForTesting;
-057import 
com.google.common.base.Preconditions;
-058
-059/**
-060 * Reads {@link HFile} version 2 blocks 
to HFiles and via {@link Cacheable} Interface to caches.
-061 * Version 2 was introduced in 
hbase-0.92.0. No longer has support for version 1 blocks since
-062 * hbase-1.3.0.
-063 *
-064 * pVersion 1 was the original 
file block. Version 2 was introduced when we changed the hbase file
-065 * format to support multi-level block 
indexes and compound bloom filters (HBASE-3857).
-066 *
-067 * h3HFileBlock: Version 
2/h3
-068 * In version 2, a block is structured as 
follows:
-069 * ul
-070 * libHeader:/b 
See Writer#putHeader() for where header is written; header total size is
-071 * HFILEBLOCK_HEADER_SIZE
-072 * ul
-073 * li0. blockType: Magic record 
identifying the {@link BlockType} (8 bytes):
-074 * e.g. 
codeDATABLK*/code
-075 * li1. onDiskSizeWithoutHeader: 
Compressed -- a.k.a 'on disk' -- block size, excluding header,
-076 * but including tailing checksum bytes 
(4 bytes)
-077 * li2. 
uncompressedSizeWithoutHeader: Uncompressed block size, excluding header, and 
excluding
-078 * checksum bytes (4 bytes)
-079 * li3. prevBlockOffset: The 
offset of the previous block of the same type (8 bytes). This is
-080 * used to navigate to the previous block 
without having to go to the block index
-081 * li4: For minorVersions 
gt;=1, the ordinal describing checksum type (1 byte)
-082 * li5: For minorVersions 
gt;=1, the number of data bytes/checksum chunk (4 bytes)
-083 * li6: onDiskDataSizeWithHeader: 
For minorVersions gt;=1, the size of data 'on disk', including
-084 * header, excluding checksums (4 
bytes)
-085 * /ul
-086 * /li
-087 * 
libRaw/Compressed/Encrypted/Encoded data:/b The 
compression
-088 * algorithm is the same for all the 
blocks in an {@link HFile}. If compression is NONE, this is
-089 * just raw, serialized Cells.
-090 * libTail:/b For 
minorVersions gt;=1, a series of 4 byte checksums, one each for
-091 * the number of bytes specified by 
bytesPerChecksum.
-092 * /ul
-093 *
-094 * h3Caching/h3
-095 * Caches cache whole blocks with 
trailing checksums if any. We then tag on some metadata, the
-096 * content of BLOCK_METADATA_SPACE which 
will be flag on if we are doing 'hbase'
-097 * checksums and then the offset into the 
file which is needed when we re-make a cache key
-098 * when we return the block to the cache 
as 'done'. See {@link 

[33/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Header.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Header.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Header.html
index 3fedd0b..6414009 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Header.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Header.html
@@ -25,2075 +25,2063 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.hfile;
 019
-020import java.io.DataInputStream;
-021import java.io.DataOutput;
-022import java.io.DataOutputStream;
-023import java.io.IOException;
-024import java.io.InputStream;
-025import java.nio.ByteBuffer;
-026import 
java.util.concurrent.atomic.AtomicReference;
-027import java.util.concurrent.locks.Lock;
-028import 
java.util.concurrent.locks.ReentrantLock;
-029
-030import org.apache.commons.logging.Log;
-031import 
org.apache.commons.logging.LogFactory;
-032import 
org.apache.hadoop.fs.FSDataInputStream;
-033import 
org.apache.hadoop.fs.FSDataOutputStream;
-034import org.apache.hadoop.fs.Path;
-035import org.apache.hadoop.hbase.Cell;
-036import 
org.apache.hadoop.hbase.HConstants;
-037import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-038import 
org.apache.hadoop.hbase.fs.HFileSystem;
-039import 
org.apache.hadoop.hbase.io.ByteArrayOutputStream;
-040import 
org.apache.hadoop.hbase.io.ByteBuffInputStream;
-041import 
org.apache.hadoop.hbase.io.ByteBufferWriterDataOutputStream;
-042import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
-043import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-044import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
-045import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultDecodingContext;
-046import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
-047import 
org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
-048import 
org.apache.hadoop.hbase.nio.ByteBuff;
-049import 
org.apache.hadoop.hbase.nio.MultiByteBuff;
-050import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
-051import 
org.apache.hadoop.hbase.util.Bytes;
-052import 
org.apache.hadoop.hbase.util.ChecksumType;
-053import 
org.apache.hadoop.hbase.util.ClassSize;
-054import org.apache.hadoop.io.IOUtils;
-055
-056import 
com.google.common.annotations.VisibleForTesting;
-057import 
com.google.common.base.Preconditions;
-058
-059/**
-060 * Reads {@link HFile} version 2 blocks 
to HFiles and via {@link Cacheable} Interface to caches.
-061 * Version 2 was introduced in 
hbase-0.92.0. No longer has support for version 1 blocks since
-062 * hbase-1.3.0.
-063 *
-064 * pVersion 1 was the original 
file block. Version 2 was introduced when we changed the hbase file
-065 * format to support multi-level block 
indexes and compound bloom filters (HBASE-3857).
-066 *
-067 * h3HFileBlock: Version 
2/h3
-068 * In version 2, a block is structured as 
follows:
-069 * ul
-070 * libHeader:/b 
See Writer#putHeader() for where header is written; header total size is
-071 * HFILEBLOCK_HEADER_SIZE
-072 * ul
-073 * li0. blockType: Magic record 
identifying the {@link BlockType} (8 bytes):
-074 * e.g. 
codeDATABLK*/code
-075 * li1. onDiskSizeWithoutHeader: 
Compressed -- a.k.a 'on disk' -- block size, excluding header,
-076 * but including tailing checksum bytes 
(4 bytes)
-077 * li2. 
uncompressedSizeWithoutHeader: Uncompressed block size, excluding header, and 
excluding
-078 * checksum bytes (4 bytes)
-079 * li3. prevBlockOffset: The 
offset of the previous block of the same type (8 bytes). This is
-080 * used to navigate to the previous block 
without having to go to the block index
-081 * li4: For minorVersions 
gt;=1, the ordinal describing checksum type (1 byte)
-082 * li5: For minorVersions 
gt;=1, the number of data bytes/checksum chunk (4 bytes)
-083 * li6: onDiskDataSizeWithHeader: 
For minorVersions gt;=1, the size of data 'on disk', including
-084 * header, excluding checksums (4 
bytes)
-085 * /ul
-086 * /li
-087 * 
libRaw/Compressed/Encrypted/Encoded data:/b The 
compression
-088 * algorithm is the same for all the 
blocks in an {@link HFile}. If compression is NONE, this is
-089 * just raw, serialized Cells.
-090 * libTail:/b For 
minorVersions gt;=1, a series of 4 byte checksums, one each for
-091 * the number of bytes specified by 
bytesPerChecksum.
-092 * /ul
-093 *
-094 * h3Caching/h3
-095 * Caches cache whole blocks with 
trailing checksums if any. We then tag on some metadata, the
-096 * content of BLOCK_METADATA_SPACE which 
will be flag on if we are doing 'hbase'
-097 * checksums and then the offset into the 
file which is needed when we re-make a cache key
-098 * when we return the block to the cache 
as 'done'. See {@link Cacheable#serialize(ByteBuffer)} and
-099 * {@link 

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html
index 11e5e0a..bb27967 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/SegmentScanner.html
@@ -31,381 +31,386 @@
 023import java.util.SortedSet;
 024
 025import 
org.apache.commons.lang.NotImplementedException;
-026import org.apache.hadoop.hbase.Cell;
-027import 
org.apache.hadoop.hbase.CellUtil;
-028import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-029import 
org.apache.hadoop.hbase.client.Scan;
-030
-031/**
-032 * A scanner of a single memstore 
segment.
-033 */
-034@InterfaceAudience.Private
-035public class SegmentScanner implements 
KeyValueScanner {
-036
-037  /**
-038   * Order of this scanner relative to 
other scanners. See
-039   * {@link 
KeyValueScanner#getScannerOrder()}.
-040   */
-041  private long scannerOrder;
-042  private static final long 
DEFAULT_SCANNER_ORDER = Long.MAX_VALUE;
-043
-044  // the observed structure
-045  protected final Segment segment;
-046  // the highest relevant MVCC
-047  private long readPoint;
-048  // the current iterator that can be 
reinitialized by
-049  // seek(), backwardSeek(), or 
reseek()
-050  protected IteratorCell iter;
-051  // the pre-calculated cell to be 
returned by peek()
-052  protected Cell current = null;
-053  // or next()
-054  // A flag represents whether could stop 
skipping KeyValues for MVCC
-055  // if have encountered the next row. 
Only used for reversed scan
-056  private boolean 
stopSkippingKVsIfNextRow = false;
-057  // last iterated KVs by seek (to 
restore the iterator state after reseek)
-058  private Cell last = null;
-059
-060  // flag to indicate if this scanner is 
closed
-061  protected boolean closed = false;
-062
-063  protected SegmentScanner(Segment 
segment, long readPoint) {
-064this(segment, readPoint, 
DEFAULT_SCANNER_ORDER);
-065  }
-066
-067  /**
-068   * @param scannerOrder see {@link 
KeyValueScanner#getScannerOrder()}.
-069   * Scanners are ordered from 0 (oldest) 
to newest in increasing order.
-070   */
-071  protected SegmentScanner(Segment 
segment, long readPoint, long scannerOrder) {
-072this.segment = segment;
-073this.readPoint = readPoint;
-074//increase the reference count so the 
underlying structure will not be de-allocated
-075this.segment.incScannerCount();
-076iter = segment.iterator();
-077// the initialization of the current 
is required for working with heap of SegmentScanners
-078updateCurrent();
-079this.scannerOrder = scannerOrder;
-080if (current == null) {
-081  // nothing to fetch from this 
scanner
-082  close();
-083}
-084  }
-085
-086  /**
-087   * Look at the next Cell in this 
scanner, but do not iterate the scanner
-088   * @return the currently observed 
Cell
-089   */
-090  @Override
-091  public Cell peek() {  // sanity 
check, the current should be always valid
-092if (closed) {
-093  return null;
-094}
-095if (current!=null  
current.getSequenceId()  readPoint) {
-096  throw new RuntimeException("current 
is invalid: read point is "+readPoint+", " +
-097  "while current sequence id is " 
+current.getSequenceId());
-098}
-099return current;
-100  }
-101
-102  /**
-103   * Return the next Cell in this 
scanner, iterating the scanner
-104   * @return the next Cell or null if end 
of scanner
-105   */
-106  @Override
-107  public Cell next() throws IOException 
{
-108if (closed) {
-109  return null;
-110}
-111Cell oldCurrent = current;
-112updateCurrent();  // 
update the currently observed Cell
-113return oldCurrent;
-114  }
-115
-116  /**
-117   * Seek the scanner at or after the 
specified Cell.
-118   * @param cell seek value
-119   * @return true if scanner has values 
left, false if end of scanner
-120   */
-121  @Override
-122  public boolean seek(Cell cell) throws 
IOException {
-123if (closed) {
-124  return false;
-125}
-126if(cell == null) {
-127  close();
-128  return false;
-129}
-130// restart the iterator from new 
key
-131iter = getIterator(cell);
-132// last is going to be reinitialized 
in the next getNext() call
-133last = null;
-134updateCurrent();
-135return (current != null);
-136  }
-137
-138  protected IteratorCell 
getIterator(Cell cell) {
-139return 
segment.tailSet(cell).iterator();
-140  }
-141
-142  /**
-143   * Reseek the scanner at or after the 
specified KeyValue.
-144   * This method is guaranteed to seek at 
or after the required key only if the
-145   * key 

[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html
index 3fedd0b..6414009 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html
@@ -25,2075 +25,2063 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.hfile;
 019
-020import java.io.DataInputStream;
-021import java.io.DataOutput;
-022import java.io.DataOutputStream;
-023import java.io.IOException;
-024import java.io.InputStream;
-025import java.nio.ByteBuffer;
-026import 
java.util.concurrent.atomic.AtomicReference;
-027import java.util.concurrent.locks.Lock;
-028import 
java.util.concurrent.locks.ReentrantLock;
-029
-030import org.apache.commons.logging.Log;
-031import 
org.apache.commons.logging.LogFactory;
-032import 
org.apache.hadoop.fs.FSDataInputStream;
-033import 
org.apache.hadoop.fs.FSDataOutputStream;
-034import org.apache.hadoop.fs.Path;
-035import org.apache.hadoop.hbase.Cell;
-036import 
org.apache.hadoop.hbase.HConstants;
-037import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-038import 
org.apache.hadoop.hbase.fs.HFileSystem;
-039import 
org.apache.hadoop.hbase.io.ByteArrayOutputStream;
-040import 
org.apache.hadoop.hbase.io.ByteBuffInputStream;
-041import 
org.apache.hadoop.hbase.io.ByteBufferWriterDataOutputStream;
-042import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
-043import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-044import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
-045import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultDecodingContext;
-046import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
-047import 
org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
-048import 
org.apache.hadoop.hbase.nio.ByteBuff;
-049import 
org.apache.hadoop.hbase.nio.MultiByteBuff;
-050import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
-051import 
org.apache.hadoop.hbase.util.Bytes;
-052import 
org.apache.hadoop.hbase.util.ChecksumType;
-053import 
org.apache.hadoop.hbase.util.ClassSize;
-054import org.apache.hadoop.io.IOUtils;
-055
-056import 
com.google.common.annotations.VisibleForTesting;
-057import 
com.google.common.base.Preconditions;
-058
-059/**
-060 * Reads {@link HFile} version 2 blocks 
to HFiles and via {@link Cacheable} Interface to caches.
-061 * Version 2 was introduced in 
hbase-0.92.0. No longer has support for version 1 blocks since
-062 * hbase-1.3.0.
-063 *
-064 * pVersion 1 was the original 
file block. Version 2 was introduced when we changed the hbase file
-065 * format to support multi-level block 
indexes and compound bloom filters (HBASE-3857).
-066 *
-067 * h3HFileBlock: Version 
2/h3
-068 * In version 2, a block is structured as 
follows:
-069 * ul
-070 * libHeader:/b 
See Writer#putHeader() for where header is written; header total size is
-071 * HFILEBLOCK_HEADER_SIZE
-072 * ul
-073 * li0. blockType: Magic record 
identifying the {@link BlockType} (8 bytes):
-074 * e.g. 
codeDATABLK*/code
-075 * li1. onDiskSizeWithoutHeader: 
Compressed -- a.k.a 'on disk' -- block size, excluding header,
-076 * but including tailing checksum bytes 
(4 bytes)
-077 * li2. 
uncompressedSizeWithoutHeader: Uncompressed block size, excluding header, and 
excluding
-078 * checksum bytes (4 bytes)
-079 * li3. prevBlockOffset: The 
offset of the previous block of the same type (8 bytes). This is
-080 * used to navigate to the previous block 
without having to go to the block index
-081 * li4: For minorVersions 
gt;=1, the ordinal describing checksum type (1 byte)
-082 * li5: For minorVersions 
gt;=1, the number of data bytes/checksum chunk (4 bytes)
-083 * li6: onDiskDataSizeWithHeader: 
For minorVersions gt;=1, the size of data 'on disk', including
-084 * header, excluding checksums (4 
bytes)
-085 * /ul
-086 * /li
-087 * 
libRaw/Compressed/Encrypted/Encoded data:/b The 
compression
-088 * algorithm is the same for all the 
blocks in an {@link HFile}. If compression is NONE, this is
-089 * just raw, serialized Cells.
-090 * libTail:/b For 
minorVersions gt;=1, a series of 4 byte checksums, one each for
-091 * the number of bytes specified by 
bytesPerChecksum.
-092 * /ul
-093 *
-094 * h3Caching/h3
-095 * Caches cache whole blocks with 
trailing checksums if any. We then tag on some metadata, the
-096 * content of BLOCK_METADATA_SPACE which 
will be flag on if we are doing 'hbase'
-097 * checksums and then the offset into the 
file which is needed when we re-make a cache key
-098 * when we return the block to the cache 
as 'done'. See {@link Cacheable#serialize(ByteBuffer)} and
-099 * {@link 

[39/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/KeyValueHeap.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/KeyValueHeap.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/KeyValueHeap.html
index 3774bbc..95e703e 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/KeyValueHeap.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/KeyValueHeap.html
@@ -146,7 +146,7 @@
 
 
 
-protected http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueHeap
+private http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueHeap
 StoreScanner.heapsForDelayedClose
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/KeyValueScanner.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/KeyValueScanner.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/KeyValueScanner.html
index e1bf7eb..7a417ea 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/KeyValueScanner.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/KeyValueScanner.html
@@ -429,7 +429,7 @@
 
 
 
-protected http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScanner
+private http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScanner
 StoreScanner.currentScanners
 
 
@@ -669,7 +669,7 @@
longorder)
 
 
-protected http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScanner
+private http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScanner
 StoreScanner.getScannersNoCompaction()
 Get a filtered list of scanners.
 
@@ -732,7 +732,7 @@
 
 
 
-protected void
+private void
 StoreScanner.addCurrentScanners(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">List? extends KeyValueScannerscanners)
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/StoreFile.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/StoreFile.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/StoreFile.html
index 1276669..c0b4e80 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/StoreFile.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/StoreFile.html
@@ -534,7 +534,7 @@
 
 
 
-protected http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListStoreFile
+private http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListStoreFile
 StoreScanner.flushedStoreFiles
 
 
@@ -1213,8 +1213,7 @@
 booleancacheBlocks,
 booleanusePread,
 longreadPt)
-Return an array of scanners corresponding to the given
- set of store files.
+Return an array of scanners corresponding to the given set 
of store files.
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/StoreFileScanner.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/StoreFileScanner.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/StoreFileScanner.html
index 7223e90..c29046a 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/StoreFileScanner.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/StoreFileScanner.html
@@ -132,6 +132,13 @@
 
 
 StoreFileScanner
+StoreFile.getPreadScanner(booleancacheBlocks,
+   longreadPt,
+   longscannerOrder,
+   booleancanOptimizeForNonNullColumn)
+
+
+StoreFileScanner
 StoreFileReader.getStoreFileScanner(booleancacheBlocks,
booleanpread,
booleanisCompaction,
@@ -141,11 +148,10 @@
 Get a scanner to scan over this StoreFile.
 
 
-
+
 StoreFileScanner
-StoreFile.getStreamScanner(booleancanUseDropBehind,
+StoreFile.getStreamScanner(booleancanUseDropBehind,
 booleancacheBlocks,
-

[34/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html
index 3fedd0b..6414009 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html
@@ -25,2075 +25,2063 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.hfile;
 019
-020import java.io.DataInputStream;
-021import java.io.DataOutput;
-022import java.io.DataOutputStream;
-023import java.io.IOException;
-024import java.io.InputStream;
-025import java.nio.ByteBuffer;
-026import 
java.util.concurrent.atomic.AtomicReference;
-027import java.util.concurrent.locks.Lock;
-028import 
java.util.concurrent.locks.ReentrantLock;
-029
-030import org.apache.commons.logging.Log;
-031import 
org.apache.commons.logging.LogFactory;
-032import 
org.apache.hadoop.fs.FSDataInputStream;
-033import 
org.apache.hadoop.fs.FSDataOutputStream;
-034import org.apache.hadoop.fs.Path;
-035import org.apache.hadoop.hbase.Cell;
-036import 
org.apache.hadoop.hbase.HConstants;
-037import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-038import 
org.apache.hadoop.hbase.fs.HFileSystem;
-039import 
org.apache.hadoop.hbase.io.ByteArrayOutputStream;
-040import 
org.apache.hadoop.hbase.io.ByteBuffInputStream;
-041import 
org.apache.hadoop.hbase.io.ByteBufferWriterDataOutputStream;
-042import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
-043import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-044import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
-045import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultDecodingContext;
-046import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
-047import 
org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
-048import 
org.apache.hadoop.hbase.nio.ByteBuff;
-049import 
org.apache.hadoop.hbase.nio.MultiByteBuff;
-050import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
-051import 
org.apache.hadoop.hbase.util.Bytes;
-052import 
org.apache.hadoop.hbase.util.ChecksumType;
-053import 
org.apache.hadoop.hbase.util.ClassSize;
-054import org.apache.hadoop.io.IOUtils;
-055
-056import 
com.google.common.annotations.VisibleForTesting;
-057import 
com.google.common.base.Preconditions;
-058
-059/**
-060 * Reads {@link HFile} version 2 blocks 
to HFiles and via {@link Cacheable} Interface to caches.
-061 * Version 2 was introduced in 
hbase-0.92.0. No longer has support for version 1 blocks since
-062 * hbase-1.3.0.
-063 *
-064 * pVersion 1 was the original 
file block. Version 2 was introduced when we changed the hbase file
-065 * format to support multi-level block 
indexes and compound bloom filters (HBASE-3857).
-066 *
-067 * h3HFileBlock: Version 
2/h3
-068 * In version 2, a block is structured as 
follows:
-069 * ul
-070 * libHeader:/b 
See Writer#putHeader() for where header is written; header total size is
-071 * HFILEBLOCK_HEADER_SIZE
-072 * ul
-073 * li0. blockType: Magic record 
identifying the {@link BlockType} (8 bytes):
-074 * e.g. 
codeDATABLK*/code
-075 * li1. onDiskSizeWithoutHeader: 
Compressed -- a.k.a 'on disk' -- block size, excluding header,
-076 * but including tailing checksum bytes 
(4 bytes)
-077 * li2. 
uncompressedSizeWithoutHeader: Uncompressed block size, excluding header, and 
excluding
-078 * checksum bytes (4 bytes)
-079 * li3. prevBlockOffset: The 
offset of the previous block of the same type (8 bytes). This is
-080 * used to navigate to the previous block 
without having to go to the block index
-081 * li4: For minorVersions 
gt;=1, the ordinal describing checksum type (1 byte)
-082 * li5: For minorVersions 
gt;=1, the number of data bytes/checksum chunk (4 bytes)
-083 * li6: onDiskDataSizeWithHeader: 
For minorVersions gt;=1, the size of data 'on disk', including
-084 * header, excluding checksums (4 
bytes)
-085 * /ul
-086 * /li
-087 * 
libRaw/Compressed/Encrypted/Encoded data:/b The 
compression
-088 * algorithm is the same for all the 
blocks in an {@link HFile}. If compression is NONE, this is
-089 * just raw, serialized Cells.
-090 * libTail:/b For 
minorVersions gt;=1, a series of 4 byte checksums, one each for
-091 * the number of bytes specified by 
bytesPerChecksum.
-092 * /ul
-093 *
-094 * h3Caching/h3
-095 * Caches cache whole blocks with 
trailing checksums if any. We then tag on some metadata, the
-096 * content of BLOCK_METADATA_SPACE which 
will be flag on if we are doing 'hbase'
-097 * checksums and then the offset into the 
file which is needed when we re-make a cache key
-098 * when we return the block to the cache 
as 'done'. See {@link Cacheable#serialize(ByteBuffer)} 

[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html
index d84c4f8..a4fc8e5 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html
@@ -27,435 +27,438 @@
 019
 020package 
org.apache.hadoop.hbase.regionserver;
 021
-022import java.io.IOException;
-023import java.util.ArrayList;
-024import java.util.Comparator;
-025import java.util.List;
-026import java.util.PriorityQueue;
-027
-028import org.apache.commons.logging.Log;
-029import 
org.apache.commons.logging.LogFactory;
-030import org.apache.hadoop.hbase.Cell;
-031import 
org.apache.hadoop.hbase.CellComparator;
-032import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-033import 
org.apache.hadoop.hbase.regionserver.ScannerContext.NextState;
-034
-035/**
-036 * Implements a heap merge across any 
number of KeyValueScanners.
-037 * p
-038 * Implements KeyValueScanner itself.
+022import 
com.google.common.annotations.VisibleForTesting;
+023
+024import java.io.IOException;
+025import java.util.ArrayList;
+026import java.util.Comparator;
+027import java.util.List;
+028import java.util.PriorityQueue;
+029
+030import org.apache.commons.logging.Log;
+031import 
org.apache.commons.logging.LogFactory;
+032import org.apache.hadoop.hbase.Cell;
+033import 
org.apache.hadoop.hbase.CellComparator;
+034import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+035import 
org.apache.hadoop.hbase.regionserver.ScannerContext.NextState;
+036
+037/**
+038 * Implements a heap merge across any 
number of KeyValueScanners.
 039 * p
-040 * This class is used at the Region level 
to merge across Stores
-041 * and at the Store level to merge across 
the memstore and StoreFiles.
-042 * p
-043 * In the Region case, we also need 
InternalScanner.next(List), so this class
-044 * also implements InternalScanner.  
WARNING: As is, if you try to use this
-045 * as an InternalScanner at the Store 
level, you will get runtime exceptions.
-046 */
-047@InterfaceAudience.Private
-048public class KeyValueHeap extends 
NonReversedNonLazyKeyValueScanner
-049implements KeyValueScanner, 
InternalScanner {
-050  private static final Log LOG = 
LogFactory.getLog(KeyValueHeap.class);
-051  protected 
PriorityQueueKeyValueScanner heap = null;
-052  // Holds the scanners when a ever a 
eager close() happens.  All such eagerly closed
-053  // scans are collected and when the 
final scanner.close() happens will perform the
-054  // actual close.
-055  protected ListKeyValueScanner 
scannersForDelayedClose = null;
-056
-057  /**
-058   * The current sub-scanner, i.e. the 
one that contains the next key/value
-059   * to return to the client. This 
scanner is NOT included in {@link #heap}
-060   * (but we frequently add it back to 
the heap and pull the new winner out).
-061   * We maintain an invariant that the 
current sub-scanner has already done
-062   * a real seek, and that current.peek() 
is always a real key/value (or null)
-063   * except for the fake 
last-key-on-row-column supplied by the multi-column
-064   * Bloom filter optimization, which is 
OK to propagate to StoreScanner. In
-065   * order to ensure that, always use 
{@link #pollRealKV()} to update current.
-066   */
-067  protected KeyValueScanner current = 
null;
-068
-069  protected KVScannerComparator 
comparator;
+040 * Implements KeyValueScanner itself.
+041 * p
+042 * This class is used at the Region level 
to merge across Stores
+043 * and at the Store level to merge across 
the memstore and StoreFiles.
+044 * p
+045 * In the Region case, we also need 
InternalScanner.next(List), so this class
+046 * also implements InternalScanner.  
WARNING: As is, if you try to use this
+047 * as an InternalScanner at the Store 
level, you will get runtime exceptions.
+048 */
+049@InterfaceAudience.Private
+050public class KeyValueHeap extends 
NonReversedNonLazyKeyValueScanner
+051implements KeyValueScanner, 
InternalScanner {
+052  private static final Log LOG = 
LogFactory.getLog(KeyValueHeap.class);
+053  protected 
PriorityQueueKeyValueScanner heap = null;
+054  // Holds the scanners when a ever a 
eager close() happens.  All such eagerly closed
+055  // scans are collected and when the 
final scanner.close() happens will perform the
+056  // actual close.
+057  protected ListKeyValueScanner 
scannersForDelayedClose = null;
+058
+059  /**
+060   * The current sub-scanner, i.e. the 
one that contains the next key/value
+061   * to return to the client. This 
scanner is NOT included in {@link #heap}
+062   * (but we frequently add it back to 
the heap and pull the new winner out).
+063  

[36/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockWritable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockWritable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockWritable.html
index 3fedd0b..6414009 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockWritable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockWritable.html
@@ -25,2075 +25,2063 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.hfile;
 019
-020import java.io.DataInputStream;
-021import java.io.DataOutput;
-022import java.io.DataOutputStream;
-023import java.io.IOException;
-024import java.io.InputStream;
-025import java.nio.ByteBuffer;
-026import 
java.util.concurrent.atomic.AtomicReference;
-027import java.util.concurrent.locks.Lock;
-028import 
java.util.concurrent.locks.ReentrantLock;
-029
-030import org.apache.commons.logging.Log;
-031import 
org.apache.commons.logging.LogFactory;
-032import 
org.apache.hadoop.fs.FSDataInputStream;
-033import 
org.apache.hadoop.fs.FSDataOutputStream;
-034import org.apache.hadoop.fs.Path;
-035import org.apache.hadoop.hbase.Cell;
-036import 
org.apache.hadoop.hbase.HConstants;
-037import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-038import 
org.apache.hadoop.hbase.fs.HFileSystem;
-039import 
org.apache.hadoop.hbase.io.ByteArrayOutputStream;
-040import 
org.apache.hadoop.hbase.io.ByteBuffInputStream;
-041import 
org.apache.hadoop.hbase.io.ByteBufferWriterDataOutputStream;
-042import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
-043import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-044import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
-045import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultDecodingContext;
-046import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
-047import 
org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
-048import 
org.apache.hadoop.hbase.nio.ByteBuff;
-049import 
org.apache.hadoop.hbase.nio.MultiByteBuff;
-050import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
-051import 
org.apache.hadoop.hbase.util.Bytes;
-052import 
org.apache.hadoop.hbase.util.ChecksumType;
-053import 
org.apache.hadoop.hbase.util.ClassSize;
-054import org.apache.hadoop.io.IOUtils;
-055
-056import 
com.google.common.annotations.VisibleForTesting;
-057import 
com.google.common.base.Preconditions;
-058
-059/**
-060 * Reads {@link HFile} version 2 blocks 
to HFiles and via {@link Cacheable} Interface to caches.
-061 * Version 2 was introduced in 
hbase-0.92.0. No longer has support for version 1 blocks since
-062 * hbase-1.3.0.
-063 *
-064 * pVersion 1 was the original 
file block. Version 2 was introduced when we changed the hbase file
-065 * format to support multi-level block 
indexes and compound bloom filters (HBASE-3857).
-066 *
-067 * h3HFileBlock: Version 
2/h3
-068 * In version 2, a block is structured as 
follows:
-069 * ul
-070 * libHeader:/b 
See Writer#putHeader() for where header is written; header total size is
-071 * HFILEBLOCK_HEADER_SIZE
-072 * ul
-073 * li0. blockType: Magic record 
identifying the {@link BlockType} (8 bytes):
-074 * e.g. 
codeDATABLK*/code
-075 * li1. onDiskSizeWithoutHeader: 
Compressed -- a.k.a 'on disk' -- block size, excluding header,
-076 * but including tailing checksum bytes 
(4 bytes)
-077 * li2. 
uncompressedSizeWithoutHeader: Uncompressed block size, excluding header, and 
excluding
-078 * checksum bytes (4 bytes)
-079 * li3. prevBlockOffset: The 
offset of the previous block of the same type (8 bytes). This is
-080 * used to navigate to the previous block 
without having to go to the block index
-081 * li4: For minorVersions 
gt;=1, the ordinal describing checksum type (1 byte)
-082 * li5: For minorVersions 
gt;=1, the number of data bytes/checksum chunk (4 bytes)
-083 * li6: onDiskDataSizeWithHeader: 
For minorVersions gt;=1, the size of data 'on disk', including
-084 * header, excluding checksums (4 
bytes)
-085 * /ul
-086 * /li
-087 * 
libRaw/Compressed/Encrypted/Encoded data:/b The 
compression
-088 * algorithm is the same for all the 
blocks in an {@link HFile}. If compression is NONE, this is
-089 * just raw, serialized Cells.
-090 * libTail:/b For 
minorVersions gt;=1, a series of 4 byte checksums, one each for
-091 * the number of bytes specified by 
bytesPerChecksum.
-092 * /ul
-093 *
-094 * h3Caching/h3
-095 * Caches cache whole blocks with 
trailing checksums if any. We then tag on some metadata, the
-096 * content of BLOCK_METADATA_SPACE which 
will be flag on if we are doing 'hbase'
-097 * checksums and then the offset into the 
file which is needed when we re-make a cache key
-098 * when we return the block to the cache 
as 'done'. See {@link 

[31/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.State.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.State.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.State.html
index 3fedd0b..6414009 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.State.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.State.html
@@ -25,2075 +25,2063 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.hfile;
 019
-020import java.io.DataInputStream;
-021import java.io.DataOutput;
-022import java.io.DataOutputStream;
-023import java.io.IOException;
-024import java.io.InputStream;
-025import java.nio.ByteBuffer;
-026import 
java.util.concurrent.atomic.AtomicReference;
-027import java.util.concurrent.locks.Lock;
-028import 
java.util.concurrent.locks.ReentrantLock;
-029
-030import org.apache.commons.logging.Log;
-031import 
org.apache.commons.logging.LogFactory;
-032import 
org.apache.hadoop.fs.FSDataInputStream;
-033import 
org.apache.hadoop.fs.FSDataOutputStream;
-034import org.apache.hadoop.fs.Path;
-035import org.apache.hadoop.hbase.Cell;
-036import 
org.apache.hadoop.hbase.HConstants;
-037import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-038import 
org.apache.hadoop.hbase.fs.HFileSystem;
-039import 
org.apache.hadoop.hbase.io.ByteArrayOutputStream;
-040import 
org.apache.hadoop.hbase.io.ByteBuffInputStream;
-041import 
org.apache.hadoop.hbase.io.ByteBufferWriterDataOutputStream;
-042import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
-043import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-044import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
-045import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultDecodingContext;
-046import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
-047import 
org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
-048import 
org.apache.hadoop.hbase.nio.ByteBuff;
-049import 
org.apache.hadoop.hbase.nio.MultiByteBuff;
-050import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
-051import 
org.apache.hadoop.hbase.util.Bytes;
-052import 
org.apache.hadoop.hbase.util.ChecksumType;
-053import 
org.apache.hadoop.hbase.util.ClassSize;
-054import org.apache.hadoop.io.IOUtils;
-055
-056import 
com.google.common.annotations.VisibleForTesting;
-057import 
com.google.common.base.Preconditions;
-058
-059/**
-060 * Reads {@link HFile} version 2 blocks 
to HFiles and via {@link Cacheable} Interface to caches.
-061 * Version 2 was introduced in 
hbase-0.92.0. No longer has support for version 1 blocks since
-062 * hbase-1.3.0.
-063 *
-064 * pVersion 1 was the original 
file block. Version 2 was introduced when we changed the hbase file
-065 * format to support multi-level block 
indexes and compound bloom filters (HBASE-3857).
-066 *
-067 * h3HFileBlock: Version 
2/h3
-068 * In version 2, a block is structured as 
follows:
-069 * ul
-070 * libHeader:/b 
See Writer#putHeader() for where header is written; header total size is
-071 * HFILEBLOCK_HEADER_SIZE
-072 * ul
-073 * li0. blockType: Magic record 
identifying the {@link BlockType} (8 bytes):
-074 * e.g. 
codeDATABLK*/code
-075 * li1. onDiskSizeWithoutHeader: 
Compressed -- a.k.a 'on disk' -- block size, excluding header,
-076 * but including tailing checksum bytes 
(4 bytes)
-077 * li2. 
uncompressedSizeWithoutHeader: Uncompressed block size, excluding header, and 
excluding
-078 * checksum bytes (4 bytes)
-079 * li3. prevBlockOffset: The 
offset of the previous block of the same type (8 bytes). This is
-080 * used to navigate to the previous block 
without having to go to the block index
-081 * li4: For minorVersions 
gt;=1, the ordinal describing checksum type (1 byte)
-082 * li5: For minorVersions 
gt;=1, the number of data bytes/checksum chunk (4 bytes)
-083 * li6: onDiskDataSizeWithHeader: 
For minorVersions gt;=1, the size of data 'on disk', including
-084 * header, excluding checksums (4 
bytes)
-085 * /ul
-086 * /li
-087 * 
libRaw/Compressed/Encrypted/Encoded data:/b The 
compression
-088 * algorithm is the same for all the 
blocks in an {@link HFile}. If compression is NONE, this is
-089 * just raw, serialized Cells.
-090 * libTail:/b For 
minorVersions gt;=1, a series of 4 byte checksums, one each for
-091 * the number of bytes specified by 
bytesPerChecksum.
-092 * /ul
-093 *
-094 * h3Caching/h3
-095 * Caches cache whole blocks with 
trailing checksums if any. We then tag on some metadata, the
-096 * content of BLOCK_METADATA_SPACE which 
will be flag on if we are doing 'hbase'
-097 * checksums and then the offset into the 
file which is needed when we re-make a cache key
-098 * when we return the block to the cache 
as 'done'. See {@link Cacheable#serialize(ByteBuffer)} 

[11/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/testdevapidocs/org/apache/hadoop/hbase/io/hfile/TestChecksum.CorruptedFSReaderImpl.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/io/hfile/TestChecksum.CorruptedFSReaderImpl.html
 
b/testdevapidocs/org/apache/hadoop/hbase/io/hfile/TestChecksum.CorruptedFSReaderImpl.html
index 54a3b17..2953370 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/io/hfile/TestChecksum.CorruptedFSReaderImpl.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/io/hfile/TestChecksum.CorruptedFSReaderImpl.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -147,33 +147,9 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 
-static int
-DEFAULT_BUFFER_SIZE
-
-
-protected 
org.apache.hadoop.hbase.io.hfile.HFileContext
-fileContext
-
-
-protected long
-fileSize
-
-
 protected int
 hdrSize
 
-
-protected 
org.apache.hadoop.hbase.fs.HFileSystem
-hfs
-
-
-protected http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
-pathName
-
-
-protected 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper
-streamWrapper
-
 
 
 
@@ -263,12 +239,6 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 toString()
 
-
-protected boolean
-validateChecksum(longarg0,
-http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBufferarg1,
-intarg2)
-
 
 
 
@@ -301,73 +271,15 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 If set to true, corrupt reads using readAtOffset(...).
 
 
-
-
-
-
-
-streamWrapper
-protectedorg.apache.hadoop.hbase.io.FSDataInputStreamWrapper streamWrapper
-
-
-
-
-
-
-
-fileSize
-protectedlong fileSize
-
-
 
 
 
-
+
 
 hdrSize
 protected finalint hdrSize
 
 
-
-
-
-
-
-hfs
-protectedorg.apache.hadoop.hbase.fs.HFileSystem hfs
-
-
-
-
-
-
-
-DEFAULT_BUFFER_SIZE
-public static finalint DEFAULT_BUFFER_SIZE
-
-See Also:
-Constant
 Field Values
-
-
-
-
-
-
-
-
-fileContext
-protectedorg.apache.hadoop.hbase.io.hfile.HFileContext fileContext
-
-
-
-
-
-
-
-pathName
-protectedhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String pathName
-
-
 
 
 
@@ -502,22 +414,6 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
publicorg.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContextgetDefaultBlockDecodingContext()
 
 
-
-
-
-
-
-validateChecksum
-protectedbooleanvalidateChecksum(longarg0,
-   http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBufferarg1,
-   intarg2)
-throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
-
-Throws:
-http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
-
-
-
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/testdevapidocs/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.html 
b/testdevapidocs/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.html
index 2c08ac7..4b716fc 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.html
@@ -795,7 +795,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 testConcurrentReading
-publicvoidtestConcurrentReading()
+publicvoidtestConcurrentReading()
throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -809,7 +809,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 testConcurrentReadingInternals
-protectedvoidtestConcurrentReadingInternals()
+protectedvoidtestConcurrentReadingInternals()
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;

[38/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/devapidocs/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.ImplData.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.ImplData.html
 
b/devapidocs/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.ImplData.html
index 19716c1..302cfb3 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.ImplData.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.ImplData.html
@@ -293,22 +293,22 @@ extends org.jamon.AbstractTemplateProxy.ImplData
 privateHRegionServer m_regionServer
 
 
-
+
 
 
 
 
-m_format
-privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String m_format
+m_filter
+privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String m_filter
 
 
-
+
 
 
 
 
-m_format__IsNotDefault
-privateboolean m_format__IsNotDefault
+m_filter__IsNotDefault
+privateboolean m_filter__IsNotDefault
 
 
 
@@ -329,40 +329,40 @@ extends org.jamon.AbstractTemplateProxy.ImplData
 privateboolean m_bcv__IsNotDefault
 
 
-
+
 
 
 
 
-m_bcn
-privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String m_bcn
+m_format
+privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String m_format
 
 
-
+
 
 
 
 
-m_bcn__IsNotDefault
-privateboolean m_bcn__IsNotDefault
+m_format__IsNotDefault
+privateboolean m_format__IsNotDefault
 
 
-
+
 
 
 
 
-m_filter
-privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String m_filter
+m_bcn
+privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String m_bcn
 
 
-
+
 
 
 
 
-m_filter__IsNotDefault
-privateboolean m_filter__IsNotDefault
+m_bcn__IsNotDefault
+privateboolean m_bcn__IsNotDefault
 
 
 
@@ -408,31 +408,31 @@ extends org.jamon.AbstractTemplateProxy.ImplData
 publicHRegionServergetRegionServer()
 
 
-
+
 
 
 
 
-setFormat
-publicvoidsetFormat(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringformat)
+setFilter
+publicvoidsetFilter(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringfilter)
 
 
-
+
 
 
 
 
-getFormat
-publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetFormat()
+getFilter
+publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetFilter()
 
 
-
+
 
 
 
 
-getFormat__IsNotDefault
-publicbooleangetFormat__IsNotDefault()
+getFilter__IsNotDefault
+publicbooleangetFilter__IsNotDefault()
 
 
 
@@ -462,58 +462,58 @@ extends org.jamon.AbstractTemplateProxy.ImplData
 publicbooleangetBcv__IsNotDefault()
 
 
-
+
 
 
 
 
-setBcn
-publicvoidsetBcn(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringbcn)
+setFormat
+publicvoidsetFormat(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringformat)
 
 
-
+
 
 
 
 
-getBcn
-publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetBcn()
+getFormat
+publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetFormat()
 
 
-
+
 
 
 
 
-getBcn__IsNotDefault
-publicbooleangetBcn__IsNotDefault()
+getFormat__IsNotDefault
+publicbooleangetFormat__IsNotDefault()
 
 
-
+
 
 
 
 
-setFilter
-publicvoidsetFilter(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringfilter)
+setBcn
+publicvoidsetBcn(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringbcn)
 
 
-
+
 
 
 
 
-getFilter
-publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetFilter()
+getBcn
+publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetBcn()
 
 
-
+
 
 
 
 
-getFilter__IsNotDefault
-publicbooleangetFilter__IsNotDefault()
+getBcn__IsNotDefault
+publicbooleangetBcn__IsNotDefault()
 
 
 


[35/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html
index 3fedd0b..6414009 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html
@@ -25,2075 +25,2063 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.hfile;
 019
-020import java.io.DataInputStream;
-021import java.io.DataOutput;
-022import java.io.DataOutputStream;
-023import java.io.IOException;
-024import java.io.InputStream;
-025import java.nio.ByteBuffer;
-026import 
java.util.concurrent.atomic.AtomicReference;
-027import java.util.concurrent.locks.Lock;
-028import 
java.util.concurrent.locks.ReentrantLock;
-029
-030import org.apache.commons.logging.Log;
-031import 
org.apache.commons.logging.LogFactory;
-032import 
org.apache.hadoop.fs.FSDataInputStream;
-033import 
org.apache.hadoop.fs.FSDataOutputStream;
-034import org.apache.hadoop.fs.Path;
-035import org.apache.hadoop.hbase.Cell;
-036import 
org.apache.hadoop.hbase.HConstants;
-037import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-038import 
org.apache.hadoop.hbase.fs.HFileSystem;
-039import 
org.apache.hadoop.hbase.io.ByteArrayOutputStream;
-040import 
org.apache.hadoop.hbase.io.ByteBuffInputStream;
-041import 
org.apache.hadoop.hbase.io.ByteBufferWriterDataOutputStream;
-042import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
-043import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-044import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
-045import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultDecodingContext;
-046import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
-047import 
org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
-048import 
org.apache.hadoop.hbase.nio.ByteBuff;
-049import 
org.apache.hadoop.hbase.nio.MultiByteBuff;
-050import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
-051import 
org.apache.hadoop.hbase.util.Bytes;
-052import 
org.apache.hadoop.hbase.util.ChecksumType;
-053import 
org.apache.hadoop.hbase.util.ClassSize;
-054import org.apache.hadoop.io.IOUtils;
-055
-056import 
com.google.common.annotations.VisibleForTesting;
-057import 
com.google.common.base.Preconditions;
-058
-059/**
-060 * Reads {@link HFile} version 2 blocks 
to HFiles and via {@link Cacheable} Interface to caches.
-061 * Version 2 was introduced in 
hbase-0.92.0. No longer has support for version 1 blocks since
-062 * hbase-1.3.0.
-063 *
-064 * pVersion 1 was the original 
file block. Version 2 was introduced when we changed the hbase file
-065 * format to support multi-level block 
indexes and compound bloom filters (HBASE-3857).
-066 *
-067 * h3HFileBlock: Version 
2/h3
-068 * In version 2, a block is structured as 
follows:
-069 * ul
-070 * libHeader:/b 
See Writer#putHeader() for where header is written; header total size is
-071 * HFILEBLOCK_HEADER_SIZE
-072 * ul
-073 * li0. blockType: Magic record 
identifying the {@link BlockType} (8 bytes):
-074 * e.g. 
codeDATABLK*/code
-075 * li1. onDiskSizeWithoutHeader: 
Compressed -- a.k.a 'on disk' -- block size, excluding header,
-076 * but including tailing checksum bytes 
(4 bytes)
-077 * li2. 
uncompressedSizeWithoutHeader: Uncompressed block size, excluding header, and 
excluding
-078 * checksum bytes (4 bytes)
-079 * li3. prevBlockOffset: The 
offset of the previous block of the same type (8 bytes). This is
-080 * used to navigate to the previous block 
without having to go to the block index
-081 * li4: For minorVersions 
gt;=1, the ordinal describing checksum type (1 byte)
-082 * li5: For minorVersions 
gt;=1, the number of data bytes/checksum chunk (4 bytes)
-083 * li6: onDiskDataSizeWithHeader: 
For minorVersions gt;=1, the size of data 'on disk', including
-084 * header, excluding checksums (4 
bytes)
-085 * /ul
-086 * /li
-087 * 
libRaw/Compressed/Encrypted/Encoded data:/b The 
compression
-088 * algorithm is the same for all the 
blocks in an {@link HFile}. If compression is NONE, this is
-089 * just raw, serialized Cells.
-090 * libTail:/b For 
minorVersions gt;=1, a series of 4 byte checksums, one each for
-091 * the number of bytes specified by 
bytesPerChecksum.
-092 * /ul
-093 *
-094 * h3Caching/h3
-095 * Caches cache whole blocks with 
trailing checksums if any. We then tag on some metadata, the
-096 * content of BLOCK_METADATA_SPACE which 
will be flag on if we are doing 'hbase'
-097 * checksums and then the offset into the 
file which is needed when we re-make a cache key
-098 * when we return the block to the cache 
as 'done'. See {@link Cacheable#serialize(ByteBuffer)} and
-099 * {@link 

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFile.Comparators.GetBulkTime.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFile.Comparators.GetBulkTime.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFile.Comparators.GetBulkTime.html
index b01aa5a..8090868 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFile.Comparators.GetBulkTime.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFile.Comparators.GetBulkTime.html
@@ -597,215 +597,221 @@
 589return reader;
 590  }
 591
-592  public StoreFileScanner 
getStreamScanner(boolean canUseDropBehind, boolean cacheBlocks,
-593  boolean pread, boolean 
isCompaction, long readPt, long scannerOrder,
-594  boolean 
canOptimizeForNonNullColumn) throws IOException {
-595return 
createStreamReader(canUseDropBehind).getStoreFileScanner(
-596  cacheBlocks, pread, isCompaction, 
readPt, scannerOrder, canOptimizeForNonNullColumn);
-597  }
-598
-599  /**
-600   * @return Current reader.  Must call 
initReader first else returns null.
-601   * @see #initReader()
-602   */
-603  public StoreFileReader getReader() {
-604return this.reader;
-605  }
-606
-607  /**
-608   * @param evictOnClose whether to evict 
blocks belonging to this file
-609   * @throws IOException
-610   */
-611  public synchronized void 
closeReader(boolean evictOnClose)
-612  throws IOException {
-613if (this.reader != null) {
-614  this.reader.close(evictOnClose);
-615  this.reader = null;
-616}
-617  }
-618
-619  /**
-620   * Marks the status of the file as 
compactedAway.
-621   */
-622  public void markCompactedAway() {
-623this.compactedAway = true;
-624  }
-625
-626  /**
-627   * Delete this file
-628   * @throws IOException
-629   */
-630  public void deleteReader() throws 
IOException {
-631boolean evictOnClose =
-632cacheConf != null? 
cacheConf.shouldEvictOnClose(): true;
-633closeReader(evictOnClose);
-634this.fs.delete(getPath(), true);
-635  }
-636
-637  @Override
-638  public String toString() {
-639return this.fileInfo.toString();
-640  }
-641
-642  /**
-643   * @return a length description of this 
StoreFile, suitable for debug output
-644   */
-645  public String toStringDetailed() {
-646StringBuilder sb = new 
StringBuilder();
-647
sb.append(this.getPath().toString());
-648sb.append(", 
isReference=").append(isReference());
-649sb.append(", 
isBulkLoadResult=").append(isBulkLoadResult());
-650if (isBulkLoadResult()) {
-651  sb.append(", 
bulkLoadTS=").append(getBulkLoadTimestamp());
-652} else {
-653  sb.append(", 
seqid=").append(getMaxSequenceId());
-654}
-655sb.append(", 
majorCompaction=").append(isMajorCompaction());
-656
-657return sb.toString();
-658  }
-659
-660  /**
-661   * Gets whether to skip resetting the 
sequence id for cells.
-662   * @param skipResetSeqId The byte array 
of boolean.
-663   * @return Whether to skip resetting 
the sequence id.
-664   */
-665  private boolean isSkipResetSeqId(byte[] 
skipResetSeqId) {
-666if (skipResetSeqId != null  
skipResetSeqId.length == 1) {
-667  return 
Bytes.toBoolean(skipResetSeqId);
-668}
-669return false;
-670  }
-671
-672  /**
-673   * @param fs
-674   * @param dir Directory to create file 
in.
-675   * @return random filename inside 
passed codedir/code
-676   */
-677  public static Path getUniqueFile(final 
FileSystem fs, final Path dir)
-678  throws IOException {
-679if 
(!fs.getFileStatus(dir).isDirectory()) {
-680  throw new IOException("Expecting " 
+ dir.toString() +
-681" to be a directory");
-682}
-683return new Path(dir, 
UUID.randomUUID().toString().replaceAll("-", ""));
-684  }
-685
-686  public Long getMinimumTimestamp() {
-687return getReader().timeRange == null? 
null: getReader().timeRange.getMin();
-688  }
-689
-690  public Long getMaximumTimestamp() {
-691return getReader().timeRange == null? 
null: getReader().timeRange.getMax();
-692  }
-693
-694
-695  /**
-696   * Gets the approximate mid-point of 
this file that is optimal for use in splitting it.
-697   * @param comparator Comparator used to 
compare KVs.
-698   * @return The split point row, or null 
if splitting is not possible, or reader is null.
-699   */
-700  byte[] getFileSplitPoint(CellComparator 
comparator) throws IOException {
-701if (this.reader == null) {
-702  LOG.warn("Storefile " + this + " 
Reader is null; cannot get split point");
-703  return null;
-704}
-705// Get first, last, and mid keys.  
Midkey is the key that starts block
-706// in middle of hfile.  Has column 
and timestamp.  Need to return just
-707// the row we want to split on as 
midkey.
-708Cell midkey = this.reader.midkey();

[29/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.html
index 3fedd0b..6414009 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.html
@@ -25,2075 +25,2063 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.hfile;
 019
-020import java.io.DataInputStream;
-021import java.io.DataOutput;
-022import java.io.DataOutputStream;
-023import java.io.IOException;
-024import java.io.InputStream;
-025import java.nio.ByteBuffer;
-026import 
java.util.concurrent.atomic.AtomicReference;
-027import java.util.concurrent.locks.Lock;
-028import 
java.util.concurrent.locks.ReentrantLock;
-029
-030import org.apache.commons.logging.Log;
-031import 
org.apache.commons.logging.LogFactory;
-032import 
org.apache.hadoop.fs.FSDataInputStream;
-033import 
org.apache.hadoop.fs.FSDataOutputStream;
-034import org.apache.hadoop.fs.Path;
-035import org.apache.hadoop.hbase.Cell;
-036import 
org.apache.hadoop.hbase.HConstants;
-037import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-038import 
org.apache.hadoop.hbase.fs.HFileSystem;
-039import 
org.apache.hadoop.hbase.io.ByteArrayOutputStream;
-040import 
org.apache.hadoop.hbase.io.ByteBuffInputStream;
-041import 
org.apache.hadoop.hbase.io.ByteBufferWriterDataOutputStream;
-042import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
-043import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-044import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
-045import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultDecodingContext;
-046import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
-047import 
org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
-048import 
org.apache.hadoop.hbase.nio.ByteBuff;
-049import 
org.apache.hadoop.hbase.nio.MultiByteBuff;
-050import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
-051import 
org.apache.hadoop.hbase.util.Bytes;
-052import 
org.apache.hadoop.hbase.util.ChecksumType;
-053import 
org.apache.hadoop.hbase.util.ClassSize;
-054import org.apache.hadoop.io.IOUtils;
-055
-056import 
com.google.common.annotations.VisibleForTesting;
-057import 
com.google.common.base.Preconditions;
-058
-059/**
-060 * Reads {@link HFile} version 2 blocks 
to HFiles and via {@link Cacheable} Interface to caches.
-061 * Version 2 was introduced in 
hbase-0.92.0. No longer has support for version 1 blocks since
-062 * hbase-1.3.0.
-063 *
-064 * pVersion 1 was the original 
file block. Version 2 was introduced when we changed the hbase file
-065 * format to support multi-level block 
indexes and compound bloom filters (HBASE-3857).
-066 *
-067 * h3HFileBlock: Version 
2/h3
-068 * In version 2, a block is structured as 
follows:
-069 * ul
-070 * libHeader:/b 
See Writer#putHeader() for where header is written; header total size is
-071 * HFILEBLOCK_HEADER_SIZE
-072 * ul
-073 * li0. blockType: Magic record 
identifying the {@link BlockType} (8 bytes):
-074 * e.g. 
codeDATABLK*/code
-075 * li1. onDiskSizeWithoutHeader: 
Compressed -- a.k.a 'on disk' -- block size, excluding header,
-076 * but including tailing checksum bytes 
(4 bytes)
-077 * li2. 
uncompressedSizeWithoutHeader: Uncompressed block size, excluding header, and 
excluding
-078 * checksum bytes (4 bytes)
-079 * li3. prevBlockOffset: The 
offset of the previous block of the same type (8 bytes). This is
-080 * used to navigate to the previous block 
without having to go to the block index
-081 * li4: For minorVersions 
gt;=1, the ordinal describing checksum type (1 byte)
-082 * li5: For minorVersions 
gt;=1, the number of data bytes/checksum chunk (4 bytes)
-083 * li6: onDiskDataSizeWithHeader: 
For minorVersions gt;=1, the size of data 'on disk', including
-084 * header, excluding checksums (4 
bytes)
-085 * /ul
-086 * /li
-087 * 
libRaw/Compressed/Encrypted/Encoded data:/b The 
compression
-088 * algorithm is the same for all the 
blocks in an {@link HFile}. If compression is NONE, this is
-089 * just raw, serialized Cells.
-090 * libTail:/b For 
minorVersions gt;=1, a series of 4 byte checksums, one each for
-091 * the number of bytes specified by 
bytesPerChecksum.
-092 * /ul
-093 *
-094 * h3Caching/h3
-095 * Caches cache whole blocks with 
trailing checksums if any. We then tag on some metadata, the
-096 * content of BLOCK_METADATA_SPACE which 
will be flag on if we are doing 'hbase'
-097 * checksums and then the offset into the 
file which is needed when we re-make a cache key
-098 * when we return the block to the cache 
as 'done'. See {@link Cacheable#serialize(ByteBuffer)} and
-099 * {@link Cacheable#getDeserializer()}.
-100 *
-101 * pTODO: 

[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
index e9107e6..1f0030b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
@@ -69,15 +69,15 @@
 061  requiredArguments = {
 062@org.jamon.annotations.Argument(name 
= "master", type = "HMaster")},
 063  optionalArguments = {
-064@org.jamon.annotations.Argument(name 
= "serverManager", type = "ServerManager"),
-065@org.jamon.annotations.Argument(name 
= "metaLocation", type = "ServerName"),
-066@org.jamon.annotations.Argument(name 
= "deadServers", type = "SetServerName"),
+064@org.jamon.annotations.Argument(name 
= "catalogJanitorEnabled", type = "boolean"),
+065@org.jamon.annotations.Argument(name 
= "filter", type = "String"),
+066@org.jamon.annotations.Argument(name 
= "serverManager", type = "ServerManager"),
 067@org.jamon.annotations.Argument(name 
= "assignmentManager", type = "AssignmentManager"),
-068@org.jamon.annotations.Argument(name 
= "frags", type = "MapString,Integer"),
+068@org.jamon.annotations.Argument(name 
= "deadServers", type = "SetServerName"),
 069@org.jamon.annotations.Argument(name 
= "servers", type = "ListServerName"),
-070@org.jamon.annotations.Argument(name 
= "catalogJanitorEnabled", type = "boolean"),
-071@org.jamon.annotations.Argument(name 
= "filter", type = "String"),
-072@org.jamon.annotations.Argument(name 
= "format", type = "String")})
+070@org.jamon.annotations.Argument(name 
= "format", type = "String"),
+071@org.jamon.annotations.Argument(name 
= "frags", type = "MapString,Integer"),
+072@org.jamon.annotations.Argument(name 
= "metaLocation", type = "ServerName")})
 073public class MasterStatusTmpl
 074  extends 
org.jamon.AbstractTemplateProxy
 075{
@@ -118,57 +118,57 @@
 110  return m_master;
 111}
 112private HMaster m_master;
-113// 28, 1
-114public void 
setServerManager(ServerManager serverManager)
+113// 25, 1
+114public void 
setCatalogJanitorEnabled(boolean catalogJanitorEnabled)
 115{
-116  // 28, 1
-117  m_serverManager = serverManager;
-118  m_serverManager__IsNotDefault = 
true;
+116  // 25, 1
+117  m_catalogJanitorEnabled = 
catalogJanitorEnabled;
+118  
m_catalogJanitorEnabled__IsNotDefault = true;
 119}
-120public ServerManager 
getServerManager()
+120public boolean 
getCatalogJanitorEnabled()
 121{
-122  return m_serverManager;
+122  return m_catalogJanitorEnabled;
 123}
-124private ServerManager 
m_serverManager;
-125public boolean 
getServerManager__IsNotDefault()
+124private boolean 
m_catalogJanitorEnabled;
+125public boolean 
getCatalogJanitorEnabled__IsNotDefault()
 126{
-127  return 
m_serverManager__IsNotDefault;
+127  return 
m_catalogJanitorEnabled__IsNotDefault;
 128}
-129private boolean 
m_serverManager__IsNotDefault;
-130// 22, 1
-131public void 
setMetaLocation(ServerName metaLocation)
+129private boolean 
m_catalogJanitorEnabled__IsNotDefault;
+130// 26, 1
+131public void setFilter(String 
filter)
 132{
-133  // 22, 1
-134  m_metaLocation = metaLocation;
-135  m_metaLocation__IsNotDefault = 
true;
+133  // 26, 1
+134  m_filter = filter;
+135  m_filter__IsNotDefault = true;
 136}
-137public ServerName getMetaLocation()
+137public String getFilter()
 138{
-139  return m_metaLocation;
+139  return m_filter;
 140}
-141private ServerName m_metaLocation;
-142public boolean 
getMetaLocation__IsNotDefault()
+141private String m_filter;
+142public boolean 
getFilter__IsNotDefault()
 143{
-144  return 
m_metaLocation__IsNotDefault;
+144  return m_filter__IsNotDefault;
 145}
-146private boolean 
m_metaLocation__IsNotDefault;
-147// 24, 1
-148public void 
setDeadServers(SetServerName deadServers)
+146private boolean 
m_filter__IsNotDefault;
+147// 28, 1
+148public void 
setServerManager(ServerManager serverManager)
 149{
-150  // 24, 1
-151  m_deadServers = deadServers;
-152  m_deadServers__IsNotDefault = 
true;
+150  // 28, 1
+151  m_serverManager = serverManager;
+152  m_serverManager__IsNotDefault = 
true;
 153}
-154public SetServerName 
getDeadServers()
+154public ServerManager 
getServerManager()
 155{
-156  return m_deadServers;
+156  return m_serverManager;
 157}
-158private SetServerName 
m_deadServers;
-159

[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFile.Comparators.GetSeqId.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFile.Comparators.GetSeqId.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFile.Comparators.GetSeqId.html
index b01aa5a..8090868 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFile.Comparators.GetSeqId.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFile.Comparators.GetSeqId.html
@@ -597,215 +597,221 @@
 589return reader;
 590  }
 591
-592  public StoreFileScanner 
getStreamScanner(boolean canUseDropBehind, boolean cacheBlocks,
-593  boolean pread, boolean 
isCompaction, long readPt, long scannerOrder,
-594  boolean 
canOptimizeForNonNullColumn) throws IOException {
-595return 
createStreamReader(canUseDropBehind).getStoreFileScanner(
-596  cacheBlocks, pread, isCompaction, 
readPt, scannerOrder, canOptimizeForNonNullColumn);
-597  }
-598
-599  /**
-600   * @return Current reader.  Must call 
initReader first else returns null.
-601   * @see #initReader()
-602   */
-603  public StoreFileReader getReader() {
-604return this.reader;
-605  }
-606
-607  /**
-608   * @param evictOnClose whether to evict 
blocks belonging to this file
-609   * @throws IOException
-610   */
-611  public synchronized void 
closeReader(boolean evictOnClose)
-612  throws IOException {
-613if (this.reader != null) {
-614  this.reader.close(evictOnClose);
-615  this.reader = null;
-616}
-617  }
-618
-619  /**
-620   * Marks the status of the file as 
compactedAway.
-621   */
-622  public void markCompactedAway() {
-623this.compactedAway = true;
-624  }
-625
-626  /**
-627   * Delete this file
-628   * @throws IOException
-629   */
-630  public void deleteReader() throws 
IOException {
-631boolean evictOnClose =
-632cacheConf != null? 
cacheConf.shouldEvictOnClose(): true;
-633closeReader(evictOnClose);
-634this.fs.delete(getPath(), true);
-635  }
-636
-637  @Override
-638  public String toString() {
-639return this.fileInfo.toString();
-640  }
-641
-642  /**
-643   * @return a length description of this 
StoreFile, suitable for debug output
-644   */
-645  public String toStringDetailed() {
-646StringBuilder sb = new 
StringBuilder();
-647
sb.append(this.getPath().toString());
-648sb.append(", 
isReference=").append(isReference());
-649sb.append(", 
isBulkLoadResult=").append(isBulkLoadResult());
-650if (isBulkLoadResult()) {
-651  sb.append(", 
bulkLoadTS=").append(getBulkLoadTimestamp());
-652} else {
-653  sb.append(", 
seqid=").append(getMaxSequenceId());
-654}
-655sb.append(", 
majorCompaction=").append(isMajorCompaction());
-656
-657return sb.toString();
-658  }
-659
-660  /**
-661   * Gets whether to skip resetting the 
sequence id for cells.
-662   * @param skipResetSeqId The byte array 
of boolean.
-663   * @return Whether to skip resetting 
the sequence id.
-664   */
-665  private boolean isSkipResetSeqId(byte[] 
skipResetSeqId) {
-666if (skipResetSeqId != null  
skipResetSeqId.length == 1) {
-667  return 
Bytes.toBoolean(skipResetSeqId);
-668}
-669return false;
-670  }
-671
-672  /**
-673   * @param fs
-674   * @param dir Directory to create file 
in.
-675   * @return random filename inside 
passed codedir/code
-676   */
-677  public static Path getUniqueFile(final 
FileSystem fs, final Path dir)
-678  throws IOException {
-679if 
(!fs.getFileStatus(dir).isDirectory()) {
-680  throw new IOException("Expecting " 
+ dir.toString() +
-681" to be a directory");
-682}
-683return new Path(dir, 
UUID.randomUUID().toString().replaceAll("-", ""));
-684  }
-685
-686  public Long getMinimumTimestamp() {
-687return getReader().timeRange == null? 
null: getReader().timeRange.getMin();
-688  }
-689
-690  public Long getMaximumTimestamp() {
-691return getReader().timeRange == null? 
null: getReader().timeRange.getMax();
-692  }
-693
-694
-695  /**
-696   * Gets the approximate mid-point of 
this file that is optimal for use in splitting it.
-697   * @param comparator Comparator used to 
compare KVs.
-698   * @return The split point row, or null 
if splitting is not possible, or reader is null.
-699   */
-700  byte[] getFileSplitPoint(CellComparator 
comparator) throws IOException {
-701if (this.reader == null) {
-702  LOG.warn("Storefile " + this + " 
Reader is null; cannot get split point");
-703  return null;
-704}
-705// Get first, last, and mid keys.  
Midkey is the key that starts block
-706// in middle of hfile.  Has column 
and timestamp.  Need to return just
-707// the row we want to split on as 
midkey.
-708Cell midkey = this.reader.midkey();
-709if 

[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/export_control.html
--
diff --git a/export_control.html b/export_control.html
index 43aeaf4..87fbc95 100644
--- a/export_control.html
+++ b/export_control.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  
   Export Control
@@ -336,7 +336,7 @@ for more details.
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-05-10
+  Last Published: 
2017-05-11
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/hbase-annotations/checkstyle.html
--
diff --git a/hbase-annotations/checkstyle.html 
b/hbase-annotations/checkstyle.html
index 77c752a..1479b91 100644
--- a/hbase-annotations/checkstyle.html
+++ b/hbase-annotations/checkstyle.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-05-10
+Last Published: 2017-05-11
   | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Annotations

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/hbase-annotations/dependencies.html
--
diff --git a/hbase-annotations/dependencies.html 
b/hbase-annotations/dependencies.html
index c96c509..14f2f78 100644
--- a/hbase-annotations/dependencies.html
+++ b/hbase-annotations/dependencies.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-05-10
+Last Published: 2017-05-11
   | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Annotations

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/hbase-annotations/dependency-convergence.html
--
diff --git a/hbase-annotations/dependency-convergence.html 
b/hbase-annotations/dependency-convergence.html
index 020ee4b..dd3c8ab 100644
--- a/hbase-annotations/dependency-convergence.html
+++ b/hbase-annotations/dependency-convergence.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-05-10
+Last Published: 2017-05-11
   | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Annotations

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/hbase-annotations/dependency-info.html
--
diff --git a/hbase-annotations/dependency-info.html 
b/hbase-annotations/dependency-info.html
index aa01886..390900b 100644
--- a/hbase-annotations/dependency-info.html
+++ b/hbase-annotations/dependency-info.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-05-10
+Last Published: 2017-05-11
   | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Annotations

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/hbase-annotations/dependency-management.html
--
diff --git a/hbase-annotations/dependency-management.html 
b/hbase-annotations/dependency-management.html
index 02d0520..2736e70 100644
--- a/hbase-annotations/dependency-management.html
+++ b/hbase-annotations/dependency-management.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
   

[06/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/testdevapidocs/src-html/org/apache/hadoop/hbase/TestIOFencing.BlockCompactionsInPrepRegion.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestIOFencing.BlockCompactionsInPrepRegion.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestIOFencing.BlockCompactionsInPrepRegion.html
index 157e668..e3058d6 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestIOFencing.BlockCompactionsInPrepRegion.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestIOFencing.BlockCompactionsInPrepRegion.html
@@ -25,350 +25,367 @@
 017 */
 018package org.apache.hadoop.hbase;
 019
-020import java.io.IOException;
-021import java.util.Collection;
-022import java.util.List;
-023import 
java.util.concurrent.CountDownLatch;
+020import static 
org.junit.Assert.assertEquals;
+021import static 
org.junit.Assert.assertTrue;
+022
+023import com.google.common.collect.Lists;
 024
-025import com.google.common.collect.Lists;
-026import org.apache.commons.logging.Log;
-027import 
org.apache.commons.logging.LogFactory;
-028import 
org.apache.hadoop.conf.Configuration;
-029import org.apache.hadoop.fs.FileSystem;
-030import org.apache.hadoop.fs.Path;
-031import 
org.apache.hadoop.hbase.client.Admin;
-032import 
org.apache.hadoop.hbase.client.Table;
-033import 
org.apache.hadoop.hbase.regionserver.CompactingMemStore;
-034import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-035import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor;
-036import 
org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
-037import 
org.apache.hadoop.hbase.regionserver.HRegion;
-038import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
-039import 
org.apache.hadoop.hbase.regionserver.HStore;
-040import 
org.apache.hadoop.hbase.regionserver.Region;
-041import 
org.apache.hadoop.hbase.regionserver.RegionServerServices;
-042import 
org.apache.hadoop.hbase.regionserver.Store;
-043import 
org.apache.hadoop.hbase.regionserver.StoreFile;
-044import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
-045import 
org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
-046import 
org.apache.hadoop.hbase.regionserver.wal.WALUtil;
-047import 
org.apache.hadoop.hbase.security.User;
-048import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-049import 
org.apache.hadoop.hbase.testclassification.MiscTests;
-050import 
org.apache.hadoop.hbase.util.Bytes;
-051import 
org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
-052import org.apache.hadoop.hbase.wal.WAL;
-053import org.junit.Test;
-054import 
org.junit.experimental.categories.Category;
-055
-056import static 
org.junit.Assert.assertEquals;
-057import static 
org.junit.Assert.assertTrue;
-058
-059/**
-060 * Test for the case where a regionserver 
going down has enough cycles to do damage to regions
-061 * that have actually been assigned 
elsehwere.
-062 *
-063 * pIf we happen to assign a 
region before it fully done with in its old location -- i.e. it is on two 
servers at the
-064 * same time -- all can work fine until 
the case where the region on the dying server decides to compact or otherwise
-065 * change the region file set.  The 
region in its new location will then get a surprise when it tries to do 
something
-066 * w/ a file removed by the region in its 
old location on dying server.
-067 *
-068 * pMaking a test for this case 
is a little tough in that even if a file is deleted up on the namenode,
-069 * if the file was opened before the 
delete, it will continue to let reads happen until something changes the
-070 * state of cached blocks in the 
dfsclient that was already open (a block from the deleted file is cleaned
-071 * from the datanode by NN).
-072 *
-073 * pWhat we will do below is do 
an explicit check for existence on the files listed in the region that
-074 * has had some files removed because of 
a compaction.  This sort of hurry's along and makes certain what is a chance
-075 * occurance.
-076 */
-077@Category({MiscTests.class, 
MediumTests.class})
-078public class TestIOFencing {
-079  private static final Log LOG = 
LogFactory.getLog(TestIOFencing.class);
-080  static {
-081// Uncomment the following lines if 
more verbosity is needed for
-082// debugging (see HBASE-12285 for 
details).
-083
//((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
-084
//((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
-085
//((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
-086
//((Log4JLogger)LogFactory.getLog("org.apache.hadoop.hdfs.server.namenode.FSNamesystem"))
-087//
.getLogger().setLevel(Level.ALL);
-088
//((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
-089  }
-090
-091  public abstract static class 
CompactionBlockerRegion extends 

[20/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFileReader.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFileReader.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFileReader.html
index 17e050f..a226489 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFileReader.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFileReader.html
@@ -84,562 +84,563 @@
 076
 077  // indicate that whether this 
StoreFileReader is shared, i.e., used for pread. If not, we will
 078  // close the internal reader when 
readCompleted is called.
-079  private final boolean shared;
-080
-081  private StoreFileReader(HFile.Reader 
reader, AtomicInteger refCount, boolean shared) {
-082this.reader = reader;
-083bloomFilterType = BloomType.NONE;
-084this.refCount = refCount;
-085this.shared = shared;
-086  }
-087
-088  public StoreFileReader(FileSystem fs, 
Path path, CacheConfig cacheConf,
-089  boolean primaryReplicaStoreFile, 
AtomicInteger refCount, boolean shared, Configuration conf)
-090  throws IOException {
-091this(HFile.createReader(fs, path, 
cacheConf, primaryReplicaStoreFile, conf), refCount, shared);
-092  }
-093
-094  public StoreFileReader(FileSystem fs, 
Path path, FSDataInputStreamWrapper in, long size,
-095  CacheConfig cacheConf, boolean 
primaryReplicaStoreFile, AtomicInteger refCount,
-096  boolean shared, Configuration conf) 
throws IOException {
-097this(HFile.createReader(fs, path, in, 
size, cacheConf, primaryReplicaStoreFile, conf), refCount,
-098shared);
-099  }
-100
-101  void copyFields(StoreFileReader reader) 
{
-102this.generalBloomFilter = 
reader.generalBloomFilter;
-103this.deleteFamilyBloomFilter = 
reader.deleteFamilyBloomFilter;
-104this.bloomFilterType = 
reader.bloomFilterType;
-105this.sequenceID = 
reader.sequenceID;
-106this.timeRange = reader.timeRange;
-107this.lastBloomKey = 
reader.lastBloomKey;
-108this.bulkLoadResult = 
reader.bulkLoadResult;
-109this.lastBloomKeyOnlyKV = 
reader.lastBloomKeyOnlyKV;
-110this.skipResetSeqId = 
reader.skipResetSeqId;
-111  }
-112
-113  public boolean isPrimaryReplicaReader() 
{
-114return 
reader.isPrimaryReplicaReader();
-115  }
-116
-117  /**
-118   * ONLY USE DEFAULT CONSTRUCTOR FOR 
UNIT TESTS
-119   */
-120  @VisibleForTesting
-121  StoreFileReader() {
-122this.refCount = new 
AtomicInteger(0);
-123this.reader = null;
-124this.shared = false;
-125  }
-126
-127  public CellComparator getComparator() 
{
-128return reader.getComparator();
-129  }
-130
-131  /**
-132   * Get a scanner to scan over this 
StoreFile.
-133   * @param cacheBlocks should this 
scanner cache blocks?
-134   * @param pread use pread (for highly 
concurrent small readers)
-135   * @param isCompaction is scanner being 
used for compaction?
-136   * @param scannerOrder Order of this 
scanner relative to other scanners. See
-137   *  {@link 
KeyValueScanner#getScannerOrder()}.
-138   * @param canOptimizeForNonNullColumn 
{@code true} if we can make sure there is no null column,
-139   *  otherwise {@code false}. 
This is a hint for optimization.
-140   * @return a scanner
-141   */
-142  public StoreFileScanner 
getStoreFileScanner(boolean cacheBlocks, boolean pread,
-143  boolean isCompaction, long readPt, 
long scannerOrder, boolean canOptimizeForNonNullColumn) {
-144// Increment the ref count
-145refCount.incrementAndGet();
-146return new StoreFileScanner(this, 
getScanner(cacheBlocks, pread, isCompaction),
-147!isCompaction, 
reader.hasMVCCInfo(), readPt, scannerOrder, canOptimizeForNonNullColumn);
-148  }
-149
-150  /**
-151   * Indicate that the scanner has 
finished reading with this reader. We need to decrement the ref
-152   * count, and also, if this is not the 
common pread reader, we should close it.
-153   */
-154  void readCompleted() {
-155refCount.decrementAndGet();
-156if (!shared) {
-157  try {
-158reader.close(false);
-159  } catch (IOException e) {
-160LOG.warn("failed to close stream 
reader", e);
-161  }
-162}
-163  }
-164
-165  /**
-166   * @deprecated Do not write further 
code which depends on this call. Instead
-167   *   use getStoreFileScanner() which 
uses the StoreFileScanner class/interface
-168   *   which is the preferred way to scan 
a store with higher level concepts.
-169   *
-170   * @param cacheBlocks should we cache 
the blocks?
-171   * @param pread use pread (for 
concurrent small readers)
-172   * @return the underlying 
HFileScanner
-173   */
-174  @Deprecated
-175  public HFileScanner getScanner(boolean 
cacheBlocks, boolean pread) {
-176return getScanner(cacheBlocks, pread, 
false);
-177  }
-178
-179  /**

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFile.Comparators.GetMaxTimestamp.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFile.Comparators.GetMaxTimestamp.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFile.Comparators.GetMaxTimestamp.html
index b01aa5a..8090868 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFile.Comparators.GetMaxTimestamp.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFile.Comparators.GetMaxTimestamp.html
@@ -597,215 +597,221 @@
 589return reader;
 590  }
 591
-592  public StoreFileScanner 
getStreamScanner(boolean canUseDropBehind, boolean cacheBlocks,
-593  boolean pread, boolean 
isCompaction, long readPt, long scannerOrder,
-594  boolean 
canOptimizeForNonNullColumn) throws IOException {
-595return 
createStreamReader(canUseDropBehind).getStoreFileScanner(
-596  cacheBlocks, pread, isCompaction, 
readPt, scannerOrder, canOptimizeForNonNullColumn);
-597  }
-598
-599  /**
-600   * @return Current reader.  Must call 
initReader first else returns null.
-601   * @see #initReader()
-602   */
-603  public StoreFileReader getReader() {
-604return this.reader;
-605  }
-606
-607  /**
-608   * @param evictOnClose whether to evict 
blocks belonging to this file
-609   * @throws IOException
-610   */
-611  public synchronized void 
closeReader(boolean evictOnClose)
-612  throws IOException {
-613if (this.reader != null) {
-614  this.reader.close(evictOnClose);
-615  this.reader = null;
-616}
-617  }
-618
-619  /**
-620   * Marks the status of the file as 
compactedAway.
-621   */
-622  public void markCompactedAway() {
-623this.compactedAway = true;
-624  }
-625
-626  /**
-627   * Delete this file
-628   * @throws IOException
-629   */
-630  public void deleteReader() throws 
IOException {
-631boolean evictOnClose =
-632cacheConf != null? 
cacheConf.shouldEvictOnClose(): true;
-633closeReader(evictOnClose);
-634this.fs.delete(getPath(), true);
-635  }
-636
-637  @Override
-638  public String toString() {
-639return this.fileInfo.toString();
-640  }
-641
-642  /**
-643   * @return a length description of this 
StoreFile, suitable for debug output
-644   */
-645  public String toStringDetailed() {
-646StringBuilder sb = new 
StringBuilder();
-647
sb.append(this.getPath().toString());
-648sb.append(", 
isReference=").append(isReference());
-649sb.append(", 
isBulkLoadResult=").append(isBulkLoadResult());
-650if (isBulkLoadResult()) {
-651  sb.append(", 
bulkLoadTS=").append(getBulkLoadTimestamp());
-652} else {
-653  sb.append(", 
seqid=").append(getMaxSequenceId());
-654}
-655sb.append(", 
majorCompaction=").append(isMajorCompaction());
-656
-657return sb.toString();
-658  }
-659
-660  /**
-661   * Gets whether to skip resetting the 
sequence id for cells.
-662   * @param skipResetSeqId The byte array 
of boolean.
-663   * @return Whether to skip resetting 
the sequence id.
-664   */
-665  private boolean isSkipResetSeqId(byte[] 
skipResetSeqId) {
-666if (skipResetSeqId != null  
skipResetSeqId.length == 1) {
-667  return 
Bytes.toBoolean(skipResetSeqId);
-668}
-669return false;
-670  }
-671
-672  /**
-673   * @param fs
-674   * @param dir Directory to create file 
in.
-675   * @return random filename inside 
passed codedir/code
-676   */
-677  public static Path getUniqueFile(final 
FileSystem fs, final Path dir)
-678  throws IOException {
-679if 
(!fs.getFileStatus(dir).isDirectory()) {
-680  throw new IOException("Expecting " 
+ dir.toString() +
-681" to be a directory");
-682}
-683return new Path(dir, 
UUID.randomUUID().toString().replaceAll("-", ""));
-684  }
-685
-686  public Long getMinimumTimestamp() {
-687return getReader().timeRange == null? 
null: getReader().timeRange.getMin();
-688  }
-689
-690  public Long getMaximumTimestamp() {
-691return getReader().timeRange == null? 
null: getReader().timeRange.getMax();
-692  }
-693
-694
-695  /**
-696   * Gets the approximate mid-point of 
this file that is optimal for use in splitting it.
-697   * @param comparator Comparator used to 
compare KVs.
-698   * @return The split point row, or null 
if splitting is not possible, or reader is null.
-699   */
-700  byte[] getFileSplitPoint(CellComparator 
comparator) throws IOException {
-701if (this.reader == null) {
-702  LOG.warn("Storefile " + this + " 
Reader is null; cannot get split point");
-703  return null;
-704}
-705// Get first, last, and mid keys.  
Midkey is the key that starts block
-706// in middle of hfile.  Has column 
and timestamp.  Need to return just
-707// the row we want to split on as 
midkey.
-708Cell midkey = 

[08/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/testdevapidocs/src-html/org/apache/hadoop/hbase/TestIOFencing.BlockCompactionsInCompletionHStore.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestIOFencing.BlockCompactionsInCompletionHStore.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestIOFencing.BlockCompactionsInCompletionHStore.html
index 157e668..e3058d6 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestIOFencing.BlockCompactionsInCompletionHStore.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestIOFencing.BlockCompactionsInCompletionHStore.html
@@ -25,350 +25,367 @@
 017 */
 018package org.apache.hadoop.hbase;
 019
-020import java.io.IOException;
-021import java.util.Collection;
-022import java.util.List;
-023import 
java.util.concurrent.CountDownLatch;
+020import static 
org.junit.Assert.assertEquals;
+021import static 
org.junit.Assert.assertTrue;
+022
+023import com.google.common.collect.Lists;
 024
-025import com.google.common.collect.Lists;
-026import org.apache.commons.logging.Log;
-027import 
org.apache.commons.logging.LogFactory;
-028import 
org.apache.hadoop.conf.Configuration;
-029import org.apache.hadoop.fs.FileSystem;
-030import org.apache.hadoop.fs.Path;
-031import 
org.apache.hadoop.hbase.client.Admin;
-032import 
org.apache.hadoop.hbase.client.Table;
-033import 
org.apache.hadoop.hbase.regionserver.CompactingMemStore;
-034import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-035import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor;
-036import 
org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
-037import 
org.apache.hadoop.hbase.regionserver.HRegion;
-038import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
-039import 
org.apache.hadoop.hbase.regionserver.HStore;
-040import 
org.apache.hadoop.hbase.regionserver.Region;
-041import 
org.apache.hadoop.hbase.regionserver.RegionServerServices;
-042import 
org.apache.hadoop.hbase.regionserver.Store;
-043import 
org.apache.hadoop.hbase.regionserver.StoreFile;
-044import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
-045import 
org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
-046import 
org.apache.hadoop.hbase.regionserver.wal.WALUtil;
-047import 
org.apache.hadoop.hbase.security.User;
-048import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-049import 
org.apache.hadoop.hbase.testclassification.MiscTests;
-050import 
org.apache.hadoop.hbase.util.Bytes;
-051import 
org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
-052import org.apache.hadoop.hbase.wal.WAL;
-053import org.junit.Test;
-054import 
org.junit.experimental.categories.Category;
-055
-056import static 
org.junit.Assert.assertEquals;
-057import static 
org.junit.Assert.assertTrue;
-058
-059/**
-060 * Test for the case where a regionserver 
going down has enough cycles to do damage to regions
-061 * that have actually been assigned 
elsehwere.
-062 *
-063 * pIf we happen to assign a 
region before it fully done with in its old location -- i.e. it is on two 
servers at the
-064 * same time -- all can work fine until 
the case where the region on the dying server decides to compact or otherwise
-065 * change the region file set.  The 
region in its new location will then get a surprise when it tries to do 
something
-066 * w/ a file removed by the region in its 
old location on dying server.
-067 *
-068 * pMaking a test for this case 
is a little tough in that even if a file is deleted up on the namenode,
-069 * if the file was opened before the 
delete, it will continue to let reads happen until something changes the
-070 * state of cached blocks in the 
dfsclient that was already open (a block from the deleted file is cleaned
-071 * from the datanode by NN).
-072 *
-073 * pWhat we will do below is do 
an explicit check for existence on the files listed in the region that
-074 * has had some files removed because of 
a compaction.  This sort of hurry's along and makes certain what is a chance
-075 * occurance.
-076 */
-077@Category({MiscTests.class, 
MediumTests.class})
-078public class TestIOFencing {
-079  private static final Log LOG = 
LogFactory.getLog(TestIOFencing.class);
-080  static {
-081// Uncomment the following lines if 
more verbosity is needed for
-082// debugging (see HBASE-12285 for 
details).
-083
//((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
-084
//((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
-085
//((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
-086
//((Log4JLogger)LogFactory.getLog("org.apache.hadoop.hdfs.server.namenode.FSNamesystem"))
-087//
.getLogger().setLevel(Level.ALL);
-088
//((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
-089  }
-090
-091  public abstract static class 

[10/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.html 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.html
index 038e85b..a90f309 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.html
@@ -653,7 +653,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 testScanAcrossSnapshot2
-publicvoidtestScanAcrossSnapshot2()
+publicvoidtestScanAcrossSnapshot2()
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException,
 http://docs.oracle.com/javase/8/docs/api/java/lang/CloneNotSupportedException.html?is-external=true;
 title="class or interface in java.lang">CloneNotSupportedException
 A simple test which verifies the 3 possible states when 
scanning across snapshot.
@@ -670,7 +670,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 verifyScanAcrossSnapshot2
-protectedvoidverifyScanAcrossSnapshot2(org.apache.hadoop.hbase.KeyValuekv1,
+protectedvoidverifyScanAcrossSnapshot2(org.apache.hadoop.hbase.KeyValuekv1,
  
org.apache.hadoop.hbase.KeyValuekv2)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
@@ -685,7 +685,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 assertScannerResults
-protectedvoidassertScannerResults(org.apache.hadoop.hbase.regionserver.KeyValueScannerscanner,
+protectedvoidassertScannerResults(org.apache.hadoop.hbase.regionserver.KeyValueScannerscanner,
 
org.apache.hadoop.hbase.KeyValue[]expected)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
@@ -700,7 +700,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 testMemstoreConcurrentControl
-publicvoidtestMemstoreConcurrentControl()
+publicvoidtestMemstoreConcurrentControl()
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:
@@ -714,7 +714,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 testMemstoreEditsVisibilityWithSameKey
-publicvoidtestMemstoreEditsVisibilityWithSameKey()
+publicvoidtestMemstoreEditsVisibilityWithSameKey()
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Regression test for HBASE-2616, HBASE-2670.
  When we insert a higher-memstoreTS version of a cell but with
@@ -732,7 +732,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 testMemstoreDeletesVisibilityWithSameKey
-publicvoidtestMemstoreDeletesVisibilityWithSameKey()
+publicvoidtestMemstoreDeletesVisibilityWithSameKey()
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 When we insert a higher-memstoreTS deletion of a cell but 
with
  the same timestamp, we still need to provide consistent reads
@@ -749,7 +749,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 testReadOwnWritesUnderConcurrency
-publicvoidtestReadOwnWritesUnderConcurrency()
+publicvoidtestReadOwnWritesUnderConcurrency()
throws http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in java.lang">Throwable
 
 Throws:
@@ -763,7 +763,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 testSnapshotting
-publicvoidtestSnapshotting()
+publicvoidtestSnapshotting()
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Test memstore snapshots
 
@@ -778,7 +778,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 testMultipleVersionsSimple
-publicvoidtestMultipleVersionsSimple()
+publicvoidtestMultipleVersionsSimple()
 throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in 

[17/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.html
index 573de43..4e9c7c3 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.html
@@ -32,1012 +32,1081 @@
 024import java.io.IOException;
 025import java.io.InterruptedIOException;
 026import java.util.ArrayList;
-027import java.util.List;
-028import java.util.NavigableSet;
-029import 
java.util.concurrent.CountDownLatch;
-030import 
java.util.concurrent.locks.ReentrantLock;
-031
-032import org.apache.commons.logging.Log;
-033import 
org.apache.commons.logging.LogFactory;
-034import org.apache.hadoop.hbase.Cell;
-035import 
org.apache.hadoop.hbase.CellComparator;
-036import 
org.apache.hadoop.hbase.CellUtil;
-037import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-038import 
org.apache.hadoop.hbase.HConstants;
-039import 
org.apache.hadoop.hbase.KeyValue;
-040import 
org.apache.hadoop.hbase.KeyValueUtil;
-041import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-042import 
org.apache.hadoop.hbase.client.IsolationLevel;
-043import 
org.apache.hadoop.hbase.client.Scan;
-044import 
org.apache.hadoop.hbase.executor.ExecutorService;
-045import 
org.apache.hadoop.hbase.filter.Filter;
-046import 
org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope;
-047import 
org.apache.hadoop.hbase.regionserver.ScannerContext.NextState;
-048import 
org.apache.hadoop.hbase.regionserver.handler.ParallelSeekHandler;
-049import 
org.apache.hadoop.hbase.regionserver.querymatcher.CompactionScanQueryMatcher;
-050import 
org.apache.hadoop.hbase.regionserver.querymatcher.LegacyScanQueryMatcher;
-051import 
org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher;
-052import 
org.apache.hadoop.hbase.regionserver.querymatcher.UserScanQueryMatcher;
-053import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-054
-055/**
-056 * Scanner scans both the memstore and 
the Store. Coalesce KeyValue stream
-057 * into Listlt;KeyValuegt; for 
a single row.
-058 */
-059@InterfaceAudience.Private
-060public class StoreScanner extends 
NonReversedNonLazyKeyValueScanner
-061implements KeyValueScanner, 
InternalScanner, ChangedReadersObserver {
-062  private static final Log LOG = 
LogFactory.getLog(StoreScanner.class);
-063  // In unit tests, the store could be 
null
-064  protected final Store store;
-065  protected ScanQueryMatcher matcher;
-066  protected KeyValueHeap heap;
-067  protected boolean cacheBlocks;
-068
-069  protected long countPerRow = 0;
-070  protected int storeLimit = -1;
-071  protected int storeOffset = 0;
-072
-073  // Used to indicate that the scanner 
has closed (see HBASE-1107)
-074  // Doesnt need to be volatile because 
it's always accessed via synchronized methods
-075  protected boolean closing = false;
-076  protected final boolean get;
-077  protected final boolean 
explicitColumnQuery;
-078  protected final boolean 
useRowColBloom;
-079  /**
-080   * A flag that enables StoreFileScanner 
parallel-seeking
-081   */
-082  protected boolean parallelSeekEnabled = 
false;
-083  protected ExecutorService executor;
-084  protected final Scan scan;
-085  protected final 
NavigableSetbyte[] columns;
-086  protected final long 
oldestUnexpiredTS;
-087  protected final long now;
-088  protected final int minVersions;
-089  protected final long maxRowSize;
-090  protected final long 
cellsPerHeartbeatCheck;
-091
-092  // Collects all the KVHeap that are 
eagerly getting closed during the
-093  // course of a scan
-094  protected ListKeyValueHeap 
heapsForDelayedClose = new ArrayList();
-095
-096  /**
-097   * The number of KVs seen by the 
scanner. Includes explicitly skipped KVs, but not
-098   * KVs skipped via seeking to next 
row/column. TODO: estimate them?
-099   */
-100  private long kvsScanned = 0;
-101  private Cell prevCell = null;
-102
-103  /** We don't ever expect to change 
this, the constant is just for clarity. */
-104  static final boolean 
LAZY_SEEK_ENABLED_BY_DEFAULT = true;
-105  public static final String 
STORESCANNER_PARALLEL_SEEK_ENABLE =
-106  
"hbase.storescanner.parallel.seek.enable";
+027import java.util.HashMap;
+028import java.util.List;
+029import java.util.Map;
+030import java.util.NavigableSet;
+031import 
java.util.concurrent.CountDownLatch;
+032import 
java.util.concurrent.locks.ReentrantLock;
+033
+034import org.apache.commons.logging.Log;
+035import 
org.apache.commons.logging.LogFactory;
+036import org.apache.hadoop.hbase.Cell;
+037import 
org.apache.hadoop.hbase.CellComparator;
+038import 
org.apache.hadoop.hbase.CellUtil;
+039import 

[13/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/hbase-archetypes/hbase-client-project/source-repository.html
--
diff --git a/hbase-archetypes/hbase-client-project/source-repository.html 
b/hbase-archetypes/hbase-client-project/source-repository.html
index 885be92..13b182f 100644
--- a/hbase-archetypes/hbase-client-project/source-repository.html
+++ b/hbase-archetypes/hbase-client-project/source-repository.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-05-10
+Last Published: 2017-05-11
   | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Exemplar for 
hbase-client archetype

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/hbase-archetypes/hbase-client-project/team-list.html
--
diff --git a/hbase-archetypes/hbase-client-project/team-list.html 
b/hbase-archetypes/hbase-client-project/team-list.html
index ac27d9b..522a947 100644
--- a/hbase-archetypes/hbase-client-project/team-list.html
+++ b/hbase-archetypes/hbase-client-project/team-list.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-05-10
+Last Published: 2017-05-11
   | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Exemplar for 
hbase-client archetype

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/hbase-archetypes/hbase-shaded-client-project/checkstyle.html
--
diff --git a/hbase-archetypes/hbase-shaded-client-project/checkstyle.html 
b/hbase-archetypes/hbase-shaded-client-project/checkstyle.html
index 7280f22..27795be 100644
--- a/hbase-archetypes/hbase-shaded-client-project/checkstyle.html
+++ b/hbase-archetypes/hbase-shaded-client-project/checkstyle.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-05-10
+Last Published: 2017-05-11
   | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Exemplar for 
hbase-shaded-client archetype

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/hbase-archetypes/hbase-shaded-client-project/dependencies.html
--
diff --git a/hbase-archetypes/hbase-shaded-client-project/dependencies.html 
b/hbase-archetypes/hbase-shaded-client-project/dependencies.html
index 34f1436..119f0dc 100644
--- a/hbase-archetypes/hbase-shaded-client-project/dependencies.html
+++ b/hbase-archetypes/hbase-shaded-client-project/dependencies.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-05-10
+Last Published: 2017-05-11
   | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Exemplar for 
hbase-shaded-client archetype
@@ -3509,7 +3509,7 @@ These include: bzip2, gzip, pack200, xz and ar, cpio, 
jar, tar, zip, dump.
 -
 
 hbase-hadoop-compat-2.0.0-SNAPSHOT-tests.jar
-22.29 kB
+22.30 kB
 -
 -
 -
@@ -4094,7 +4094,7 @@ These include: bzip2, gzip, pack200, xz and ar, cpio, 
jar, tar, zip, dump.
 Sealed
 
 151
-70.55 MB
+70.56 MB
 35,651
 31,647
 1,529

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/hbase-archetypes/hbase-shaded-client-project/dependency-convergence.html
--
diff --git 
a/hbase-archetypes/hbase-shaded-client-project/dependency-convergence.html 
b/hbase-archetypes/hbase-shaded-client-project/dependency-convergence.html
index 60e0c3c..6930437 100644
--- a/hbase-archetypes/hbase-shaded-client-project/dependency-convergence.html
+++ 

[44/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/devapidocs/org/apache/hadoop/hbase/regionserver/NonReversedNonLazyKeyValueScanner.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/NonReversedNonLazyKeyValueScanner.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/NonReversedNonLazyKeyValueScanner.html
index 9d22211..e88db1f 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/NonReversedNonLazyKeyValueScanner.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/NonReversedNonLazyKeyValueScanner.html
@@ -207,7 +207,7 @@ extends NonLazyKeyValueScanner
-doRealSeek,
 enforceSeek,
 getNextIndexedKey,
 isFileScanner,
 realSeekDone,
 requestSeek,
 shipped,
 shouldUseScanner
+doRealSeek,
 enforceSeek,
 getFilePath,
 getNextIndexedKey,
 isFileScanner,
 realSeekDone,
 requestSeek<
 /a>, shipped,
 shouldUseScanner
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/devapidocs/org/apache/hadoop/hbase/regionserver/ReversedKeyValueHeap.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/ReversedKeyValueHeap.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/ReversedKeyValueHeap.html
index 7799488..736c357 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/ReversedKeyValueHeap.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/ReversedKeyValueHeap.html
@@ -288,7 +288,7 @@ extends NonLazyKeyValueScanner
-doRealSeek,
 enforceSeek,
 isFileScanner,
 realSeekDone,
 shouldUseScanner
+doRealSeek,
 enforceSeek,
 getFilePath,
 isFileScanner,
 realSeekDone,
 shouldUseScanner
 
 
 
@@ -302,7 +302,7 @@ extends KeyValueScanner
-enforceSeek,
 isFileScanner,
 realSeekDone,
 shouldUseScanner
+enforceSeek,
 getFilePath,
 isFileScanner,
 realSeekDone,
 shouldUseScanner
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/devapidocs/org/apache/hadoop/hbase/regionserver/ReversedMobStoreScanner.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/ReversedMobStoreScanner.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/ReversedMobStoreScanner.html
index 6da46d3..9a698ff 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/ReversedMobStoreScanner.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/ReversedMobStoreScanner.html
@@ -193,7 +193,7 @@ extends StoreScanner
-cacheBlocks,
 cellsPerHeartbeatCheck,
 closing,
 columns,
 countPerRow,
 currentScanners,
 DEFAULT_HBASE_CELLS_SCANNED_PER_HEARTBEAT_CHECK,
 executor,
 explicitColumnQuery,
 flushed,
 flushedStoreFiles,
 get,
 HBASE_CELLS_SCANNED_PER_HEARTBEAT_CHECK,
 heap,
 heapsForDelayedClose,
 lastTop,
 LAZY_SEEK_ENABLED_BY_DEFAULT, href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#lazySeekEnabledGlobally">lazySeekEnabledGlobally,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#matcher">matcher,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#maxRowSize">maxRowSize,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#minVersions">minVersions,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#now">now,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#oldestUnexpiredTS">oldestUnexpiredTS,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#parallelSeekEnabled">parallelSeekEnabled,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/StoreScanner.html#readPt">readPt,
 > scan, store,
 storeLimit,
 storeOffset,
 STORESCANNER_PARALLEL_SEEK_ENABLE,
 useRowColBloom
+DEFAULT_HBASE_CELLS_SCANNED_PER_HEARTBEAT_CHECK,
 HBASE_CELLS_SCANNED_PER_HEARTBEAT_CHECK,
 heap,
 LAZY_SEEK_ENABLED_BY_DEFAULT,
 readPt,
 store,
 STORESCANNER_PARALLEL_SEEK_ENABLE,
 STORESCANNER_PREAD_MAX_BYTES
 
 
 
@@ -257,7 +257,7 @@ extends StoreScanner
-addCurrentScanners,
 checkFlushed,
 checkReseek,
 close,
 enableLazySeekGlobally,
 getAllScannersForTesting,
 getEstimatedNumberOfKvsScanned,
 getNextIndexedKey, getScannerOrder,
 getScannersNoCompaction,
 next,
 next,
 peek,
 resetScannerStack,
 selectScannersFrom,
 shipped,
 trySkipToNextColumn,
 trySkipToNextRow,
 updateReaders
+checkFlushed,
 close,
 enableLazySeekGlobally,
 getAllScannersForTesting,
 getEstimatedNumberOfKvsScanned,
 getNextIndexedKey,
 getScannerOrder,
 next,
 next,
 peek,
 reopenAfterFlush,
 selectScannersFrom,
 shipped,
 trySkipToNextColumn,
 trySkipToNextRow,
 updateReaders
 
 
 
@@ -271,7 +271,7 @@ extends NonLazyKeyValueScanner
-doRealSeek,
 enforceSeek,
 isFileScanner,
 realSeekDone,
 requestSeek,
 shouldUseScanner
+doRealSeek,
 enforceSeek,
 getFilePath,
 isFileScanner,
 realSeekDone,
 requestSeek,
 shouldUseScanner
 
 
 
@@ -285,7 +285,7 @@ extends KeyValueScanner
-close,
 

[07/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/testdevapidocs/src-html/org/apache/hadoop/hbase/TestIOFencing.BlockCompactionsInCompletionRegion.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestIOFencing.BlockCompactionsInCompletionRegion.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestIOFencing.BlockCompactionsInCompletionRegion.html
index 157e668..e3058d6 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestIOFencing.BlockCompactionsInCompletionRegion.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestIOFencing.BlockCompactionsInCompletionRegion.html
@@ -25,350 +25,367 @@
 017 */
 018package org.apache.hadoop.hbase;
 019
-020import java.io.IOException;
-021import java.util.Collection;
-022import java.util.List;
-023import 
java.util.concurrent.CountDownLatch;
+020import static 
org.junit.Assert.assertEquals;
+021import static 
org.junit.Assert.assertTrue;
+022
+023import com.google.common.collect.Lists;
 024
-025import com.google.common.collect.Lists;
-026import org.apache.commons.logging.Log;
-027import 
org.apache.commons.logging.LogFactory;
-028import 
org.apache.hadoop.conf.Configuration;
-029import org.apache.hadoop.fs.FileSystem;
-030import org.apache.hadoop.fs.Path;
-031import 
org.apache.hadoop.hbase.client.Admin;
-032import 
org.apache.hadoop.hbase.client.Table;
-033import 
org.apache.hadoop.hbase.regionserver.CompactingMemStore;
-034import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-035import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor;
-036import 
org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
-037import 
org.apache.hadoop.hbase.regionserver.HRegion;
-038import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
-039import 
org.apache.hadoop.hbase.regionserver.HStore;
-040import 
org.apache.hadoop.hbase.regionserver.Region;
-041import 
org.apache.hadoop.hbase.regionserver.RegionServerServices;
-042import 
org.apache.hadoop.hbase.regionserver.Store;
-043import 
org.apache.hadoop.hbase.regionserver.StoreFile;
-044import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
-045import 
org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
-046import 
org.apache.hadoop.hbase.regionserver.wal.WALUtil;
-047import 
org.apache.hadoop.hbase.security.User;
-048import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-049import 
org.apache.hadoop.hbase.testclassification.MiscTests;
-050import 
org.apache.hadoop.hbase.util.Bytes;
-051import 
org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
-052import org.apache.hadoop.hbase.wal.WAL;
-053import org.junit.Test;
-054import 
org.junit.experimental.categories.Category;
-055
-056import static 
org.junit.Assert.assertEquals;
-057import static 
org.junit.Assert.assertTrue;
-058
-059/**
-060 * Test for the case where a regionserver 
going down has enough cycles to do damage to regions
-061 * that have actually been assigned 
elsehwere.
-062 *
-063 * pIf we happen to assign a 
region before it fully done with in its old location -- i.e. it is on two 
servers at the
-064 * same time -- all can work fine until 
the case where the region on the dying server decides to compact or otherwise
-065 * change the region file set.  The 
region in its new location will then get a surprise when it tries to do 
something
-066 * w/ a file removed by the region in its 
old location on dying server.
-067 *
-068 * pMaking a test for this case 
is a little tough in that even if a file is deleted up on the namenode,
-069 * if the file was opened before the 
delete, it will continue to let reads happen until something changes the
-070 * state of cached blocks in the 
dfsclient that was already open (a block from the deleted file is cleaned
-071 * from the datanode by NN).
-072 *
-073 * pWhat we will do below is do 
an explicit check for existence on the files listed in the region that
-074 * has had some files removed because of 
a compaction.  This sort of hurry's along and makes certain what is a chance
-075 * occurance.
-076 */
-077@Category({MiscTests.class, 
MediumTests.class})
-078public class TestIOFencing {
-079  private static final Log LOG = 
LogFactory.getLog(TestIOFencing.class);
-080  static {
-081// Uncomment the following lines if 
more verbosity is needed for
-082// debugging (see HBASE-12285 for 
details).
-083
//((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
-084
//((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
-085
//((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
-086
//((Log4JLogger)LogFactory.getLog("org.apache.hadoop.hdfs.server.namenode.FSNamesystem"))
-087//
.getLogger().setLevel(Level.ALL);
-088
//((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
-089  }
-090
-091  public abstract static class 

[12/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/plugin-management.html
--
diff --git a/plugin-management.html b/plugin-management.html
index 12df7de..23766d3 100644
--- a/plugin-management.html
+++ b/plugin-management.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Plugin Management
 
@@ -441,7 +441,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-05-10
+  Last Published: 
2017-05-11
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/plugins.html
--
diff --git a/plugins.html b/plugins.html
index 176d801..e368782 100644
--- a/plugins.html
+++ b/plugins.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Build Plugins
 
@@ -376,7 +376,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-05-10
+  Last Published: 
2017-05-11
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/poweredbyhbase.html
--
diff --git a/poweredbyhbase.html b/poweredbyhbase.html
index 09f70a1..509533d 100644
--- a/poweredbyhbase.html
+++ b/poweredbyhbase.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Powered By Apache HBase™
 
@@ -774,7 +774,7 @@ under the License. -->
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-05-10
+  Last Published: 
2017-05-11
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/project-info.html
--
diff --git a/project-info.html b/project-info.html
index 39b402c..ca40564 100644
--- a/project-info.html
+++ b/project-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Information
 
@@ -340,7 +340,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-05-10
+  Last Published: 
2017-05-11
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/project-reports.html
--
diff --git a/project-reports.html b/project-reports.html
index 6d12103..dcb56cc 100644
--- a/project-reports.html
+++ b/project-reports.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Generated Reports
 
@@ -310,7 +310,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-05-10
+  Last Published: 
2017-05-11
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/project-summary.html
--
diff --git a/project-summary.html b/project-summary.html
index 391fdf0..7f8c7da 100644
--- a/project-summary.html
+++ b/project-summary.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Summary
 
@@ -336,7 +336,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-05-10
+  Last Published: 
2017-05-11
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/pseudo-distributed.html
--
diff --git a/pseudo-distributed.html b/pseudo-distributed.html
index 25b0137..6e4216e 100644
--- a/pseudo-distributed.html
+++ b/pseudo-distributed.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase   
 Running Apache HBase (TM) in pseudo-distributed mode
@@ -313,7 +313,7 @@ under the License. -->
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-05-10
+  Last Published: 
2017-05-11
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/replication.html

[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site cac6146f8 -> 1241ee85f


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.EnvironmentEdgeForMemstoreTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.EnvironmentEdgeForMemstoreTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.EnvironmentEdgeForMemstoreTest.html
index 6db2655..e2c4f9d 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.EnvironmentEdgeForMemstoreTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.EnvironmentEdgeForMemstoreTest.html
@@ -170,939 +170,938 @@
 162Scan scan = new Scan();
 163ListCell result = new 
ArrayList();
 164Configuration conf = 
HBaseConfiguration.create();
-165ScanInfo scanInfo =
-166new ScanInfo(conf, null, 0, 1, 
HConstants.LATEST_TIMESTAMP, KeepDeletedCells.FALSE, 0,
-167
this.memstore.getComparator());
-168ScanType scanType = 
ScanType.USER_SCAN;
-169StoreScanner s = new 
StoreScanner(scan, scanInfo, scanType, null, memstorescanners);
-170int count = 0;
-171try {
-172  while (s.next(result)) {
-173LOG.info(result);
-174count++;
-175// Row count is same as column 
count.
-176assertEquals(rowCount, 
result.size());
-177result.clear();
-178  }
-179} finally {
-180  s.close();
-181}
-182assertEquals(rowCount, count);
-183for (KeyValueScanner scanner : 
memstorescanners) {
-184  scanner.close();
-185}
-186
-187memstorescanners = 
this.memstore.getScanners(mvcc.getReadPoint());
-188// Now assert can count same number 
even if a snapshot mid-scan.
-189s = new StoreScanner(scan, scanInfo, 
scanType, null, memstorescanners);
-190count = 0;
-191try {
-192  while (s.next(result)) {
-193LOG.info(result);
-194// Assert the stuff is coming out 
in right order.
-195
assertTrue(CellUtil.matchingRow(result.get(0), Bytes.toBytes(count)));
-196count++;
-197// Row count is same as column 
count.
-198assertEquals(rowCount, 
result.size());
-199if (count == 2) {
-200  this.memstore.snapshot();
-201  LOG.info("Snapshotted");
-202}
-203result.clear();
-204  }
-205} finally {
-206  s.close();
-207}
-208assertEquals(rowCount, count);
-209for (KeyValueScanner scanner : 
memstorescanners) {
-210  scanner.close();
-211}
-212memstorescanners = 
this.memstore.getScanners(mvcc.getReadPoint());
-213// Assert that new values are seen in 
kvset as we scan.
-214long ts = 
System.currentTimeMillis();
-215s = new StoreScanner(scan, scanInfo, 
scanType, null, memstorescanners);
-216count = 0;
-217int snapshotIndex = 5;
-218try {
-219  while (s.next(result)) {
-220LOG.info(result);
-221// Assert the stuff is coming out 
in right order.
-222
assertTrue(CellUtil.matchingRow(result.get(0), Bytes.toBytes(count)));
-223// Row count is same as column 
count.
-224assertEquals("count=" + count + 
", result=" + result, rowCount, result.size());
-225count++;
-226if (count == snapshotIndex) {
-227  MemStoreSnapshot snapshot = 
this.memstore.snapshot();
-228  
this.memstore.clearSnapshot(snapshot.getId());
-229  // Added more rows into kvset.  
But the scanner wont see these rows.
-230  addRows(this.memstore, ts);
-231  LOG.info("Snapshotted, cleared 
it and then added values (which wont be seen)");
-232}
-233result.clear();
-234  }
-235} finally {
-236  s.close();
-237}
-238assertEquals(rowCount, count);
-239  }
-240
-241  /**
-242   * A simple test which verifies the 3 
possible states when scanning across snapshot.
-243   * @throws IOException
-244   * @throws CloneNotSupportedException
-245   */
-246  @Test
-247  public void testScanAcrossSnapshot2() 
throws IOException, CloneNotSupportedException {
-248// we are going to the scanning 
across snapshot with two kvs
-249// kv1 should always be returned 
before kv2
-250final byte[] one = 
Bytes.toBytes(1);
-251final byte[] two = 
Bytes.toBytes(2);
-252final byte[] f = 
Bytes.toBytes("f");
-253final byte[] q = 
Bytes.toBytes("q");
-254final byte[] v = Bytes.toBytes(3);
-255
-256final KeyValue kv1 = new 
KeyValue(one, f, q, v);
-257final KeyValue kv2 = new 
KeyValue(two, f, q, v);
-258
-259// use case 1: both kvs in kvset
-260this.memstore.add(kv1.clone(), 
null);
-261this.memstore.add(kv2.clone(), 
null);
-262verifyScanAcrossSnapshot2(kv1, 
kv2);
-263
-264// use case 2: both 

[21/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFile.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFile.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFile.html
index b01aa5a..8090868 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFile.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFile.html
@@ -597,215 +597,221 @@
 589return reader;
 590  }
 591
-592  public StoreFileScanner 
getStreamScanner(boolean canUseDropBehind, boolean cacheBlocks,
-593  boolean pread, boolean 
isCompaction, long readPt, long scannerOrder,
-594  boolean 
canOptimizeForNonNullColumn) throws IOException {
-595return 
createStreamReader(canUseDropBehind).getStoreFileScanner(
-596  cacheBlocks, pread, isCompaction, 
readPt, scannerOrder, canOptimizeForNonNullColumn);
-597  }
-598
-599  /**
-600   * @return Current reader.  Must call 
initReader first else returns null.
-601   * @see #initReader()
-602   */
-603  public StoreFileReader getReader() {
-604return this.reader;
-605  }
-606
-607  /**
-608   * @param evictOnClose whether to evict 
blocks belonging to this file
-609   * @throws IOException
-610   */
-611  public synchronized void 
closeReader(boolean evictOnClose)
-612  throws IOException {
-613if (this.reader != null) {
-614  this.reader.close(evictOnClose);
-615  this.reader = null;
-616}
-617  }
-618
-619  /**
-620   * Marks the status of the file as 
compactedAway.
-621   */
-622  public void markCompactedAway() {
-623this.compactedAway = true;
-624  }
-625
-626  /**
-627   * Delete this file
-628   * @throws IOException
-629   */
-630  public void deleteReader() throws 
IOException {
-631boolean evictOnClose =
-632cacheConf != null? 
cacheConf.shouldEvictOnClose(): true;
-633closeReader(evictOnClose);
-634this.fs.delete(getPath(), true);
-635  }
-636
-637  @Override
-638  public String toString() {
-639return this.fileInfo.toString();
-640  }
-641
-642  /**
-643   * @return a length description of this 
StoreFile, suitable for debug output
-644   */
-645  public String toStringDetailed() {
-646StringBuilder sb = new 
StringBuilder();
-647
sb.append(this.getPath().toString());
-648sb.append(", 
isReference=").append(isReference());
-649sb.append(", 
isBulkLoadResult=").append(isBulkLoadResult());
-650if (isBulkLoadResult()) {
-651  sb.append(", 
bulkLoadTS=").append(getBulkLoadTimestamp());
-652} else {
-653  sb.append(", 
seqid=").append(getMaxSequenceId());
-654}
-655sb.append(", 
majorCompaction=").append(isMajorCompaction());
-656
-657return sb.toString();
-658  }
-659
-660  /**
-661   * Gets whether to skip resetting the 
sequence id for cells.
-662   * @param skipResetSeqId The byte array 
of boolean.
-663   * @return Whether to skip resetting 
the sequence id.
-664   */
-665  private boolean isSkipResetSeqId(byte[] 
skipResetSeqId) {
-666if (skipResetSeqId != null  
skipResetSeqId.length == 1) {
-667  return 
Bytes.toBoolean(skipResetSeqId);
-668}
-669return false;
-670  }
-671
-672  /**
-673   * @param fs
-674   * @param dir Directory to create file 
in.
-675   * @return random filename inside 
passed codedir/code
-676   */
-677  public static Path getUniqueFile(final 
FileSystem fs, final Path dir)
-678  throws IOException {
-679if 
(!fs.getFileStatus(dir).isDirectory()) {
-680  throw new IOException("Expecting " 
+ dir.toString() +
-681" to be a directory");
-682}
-683return new Path(dir, 
UUID.randomUUID().toString().replaceAll("-", ""));
-684  }
-685
-686  public Long getMinimumTimestamp() {
-687return getReader().timeRange == null? 
null: getReader().timeRange.getMin();
-688  }
-689
-690  public Long getMaximumTimestamp() {
-691return getReader().timeRange == null? 
null: getReader().timeRange.getMax();
-692  }
-693
-694
-695  /**
-696   * Gets the approximate mid-point of 
this file that is optimal for use in splitting it.
-697   * @param comparator Comparator used to 
compare KVs.
-698   * @return The split point row, or null 
if splitting is not possible, or reader is null.
-699   */
-700  byte[] getFileSplitPoint(CellComparator 
comparator) throws IOException {
-701if (this.reader == null) {
-702  LOG.warn("Storefile " + this + " 
Reader is null; cannot get split point");
-703  return null;
-704}
-705// Get first, last, and mid keys.  
Midkey is the key that starts block
-706// in middle of hfile.  Has column 
and timestamp.  Need to return just
-707// the row we want to split on as 
midkey.
-708Cell midkey = this.reader.midkey();
-709if (midkey != null) {
-710  Cell firstKey = 
this.reader.getFirstKey();
-711  Cell lastKey = 

[05/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/testdevapidocs/src-html/org/apache/hadoop/hbase/TestIOFencing.CompactionBlockerRegion.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestIOFencing.CompactionBlockerRegion.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestIOFencing.CompactionBlockerRegion.html
index 157e668..e3058d6 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestIOFencing.CompactionBlockerRegion.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestIOFencing.CompactionBlockerRegion.html
@@ -25,350 +25,367 @@
 017 */
 018package org.apache.hadoop.hbase;
 019
-020import java.io.IOException;
-021import java.util.Collection;
-022import java.util.List;
-023import 
java.util.concurrent.CountDownLatch;
+020import static 
org.junit.Assert.assertEquals;
+021import static 
org.junit.Assert.assertTrue;
+022
+023import com.google.common.collect.Lists;
 024
-025import com.google.common.collect.Lists;
-026import org.apache.commons.logging.Log;
-027import 
org.apache.commons.logging.LogFactory;
-028import 
org.apache.hadoop.conf.Configuration;
-029import org.apache.hadoop.fs.FileSystem;
-030import org.apache.hadoop.fs.Path;
-031import 
org.apache.hadoop.hbase.client.Admin;
-032import 
org.apache.hadoop.hbase.client.Table;
-033import 
org.apache.hadoop.hbase.regionserver.CompactingMemStore;
-034import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-035import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor;
-036import 
org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
-037import 
org.apache.hadoop.hbase.regionserver.HRegion;
-038import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
-039import 
org.apache.hadoop.hbase.regionserver.HStore;
-040import 
org.apache.hadoop.hbase.regionserver.Region;
-041import 
org.apache.hadoop.hbase.regionserver.RegionServerServices;
-042import 
org.apache.hadoop.hbase.regionserver.Store;
-043import 
org.apache.hadoop.hbase.regionserver.StoreFile;
-044import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
-045import 
org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
-046import 
org.apache.hadoop.hbase.regionserver.wal.WALUtil;
-047import 
org.apache.hadoop.hbase.security.User;
-048import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-049import 
org.apache.hadoop.hbase.testclassification.MiscTests;
-050import 
org.apache.hadoop.hbase.util.Bytes;
-051import 
org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
-052import org.apache.hadoop.hbase.wal.WAL;
-053import org.junit.Test;
-054import 
org.junit.experimental.categories.Category;
-055
-056import static 
org.junit.Assert.assertEquals;
-057import static 
org.junit.Assert.assertTrue;
-058
-059/**
-060 * Test for the case where a regionserver 
going down has enough cycles to do damage to regions
-061 * that have actually been assigned 
elsehwere.
-062 *
-063 * pIf we happen to assign a 
region before it fully done with in its old location -- i.e. it is on two 
servers at the
-064 * same time -- all can work fine until 
the case where the region on the dying server decides to compact or otherwise
-065 * change the region file set.  The 
region in its new location will then get a surprise when it tries to do 
something
-066 * w/ a file removed by the region in its 
old location on dying server.
-067 *
-068 * pMaking a test for this case 
is a little tough in that even if a file is deleted up on the namenode,
-069 * if the file was opened before the 
delete, it will continue to let reads happen until something changes the
-070 * state of cached blocks in the 
dfsclient that was already open (a block from the deleted file is cleaned
-071 * from the datanode by NN).
-072 *
-073 * pWhat we will do below is do 
an explicit check for existence on the files listed in the region that
-074 * has had some files removed because of 
a compaction.  This sort of hurry's along and makes certain what is a chance
-075 * occurance.
-076 */
-077@Category({MiscTests.class, 
MediumTests.class})
-078public class TestIOFencing {
-079  private static final Log LOG = 
LogFactory.getLog(TestIOFencing.class);
-080  static {
-081// Uncomment the following lines if 
more verbosity is needed for
-082// debugging (see HBASE-12285 for 
details).
-083
//((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
-084
//((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
-085
//((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
-086
//((Log4JLogger)LogFactory.getLog("org.apache.hadoop.hdfs.server.namenode.FSNamesystem"))
-087//
.getLogger().setLevel(Level.ALL);
-088
//((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
-089  }
-090
-091  public abstract static class 
CompactionBlockerRegion extends HRegion {
-092volatile 

[15/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html
index e9107e6..1f0030b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html
@@ -69,15 +69,15 @@
 061  requiredArguments = {
 062@org.jamon.annotations.Argument(name 
= "master", type = "HMaster")},
 063  optionalArguments = {
-064@org.jamon.annotations.Argument(name 
= "serverManager", type = "ServerManager"),
-065@org.jamon.annotations.Argument(name 
= "metaLocation", type = "ServerName"),
-066@org.jamon.annotations.Argument(name 
= "deadServers", type = "SetServerName"),
+064@org.jamon.annotations.Argument(name 
= "catalogJanitorEnabled", type = "boolean"),
+065@org.jamon.annotations.Argument(name 
= "filter", type = "String"),
+066@org.jamon.annotations.Argument(name 
= "serverManager", type = "ServerManager"),
 067@org.jamon.annotations.Argument(name 
= "assignmentManager", type = "AssignmentManager"),
-068@org.jamon.annotations.Argument(name 
= "frags", type = "MapString,Integer"),
+068@org.jamon.annotations.Argument(name 
= "deadServers", type = "SetServerName"),
 069@org.jamon.annotations.Argument(name 
= "servers", type = "ListServerName"),
-070@org.jamon.annotations.Argument(name 
= "catalogJanitorEnabled", type = "boolean"),
-071@org.jamon.annotations.Argument(name 
= "filter", type = "String"),
-072@org.jamon.annotations.Argument(name 
= "format", type = "String")})
+070@org.jamon.annotations.Argument(name 
= "format", type = "String"),
+071@org.jamon.annotations.Argument(name 
= "frags", type = "MapString,Integer"),
+072@org.jamon.annotations.Argument(name 
= "metaLocation", type = "ServerName")})
 073public class MasterStatusTmpl
 074  extends 
org.jamon.AbstractTemplateProxy
 075{
@@ -118,57 +118,57 @@
 110  return m_master;
 111}
 112private HMaster m_master;
-113// 28, 1
-114public void 
setServerManager(ServerManager serverManager)
+113// 25, 1
+114public void 
setCatalogJanitorEnabled(boolean catalogJanitorEnabled)
 115{
-116  // 28, 1
-117  m_serverManager = serverManager;
-118  m_serverManager__IsNotDefault = 
true;
+116  // 25, 1
+117  m_catalogJanitorEnabled = 
catalogJanitorEnabled;
+118  
m_catalogJanitorEnabled__IsNotDefault = true;
 119}
-120public ServerManager 
getServerManager()
+120public boolean 
getCatalogJanitorEnabled()
 121{
-122  return m_serverManager;
+122  return m_catalogJanitorEnabled;
 123}
-124private ServerManager 
m_serverManager;
-125public boolean 
getServerManager__IsNotDefault()
+124private boolean 
m_catalogJanitorEnabled;
+125public boolean 
getCatalogJanitorEnabled__IsNotDefault()
 126{
-127  return 
m_serverManager__IsNotDefault;
+127  return 
m_catalogJanitorEnabled__IsNotDefault;
 128}
-129private boolean 
m_serverManager__IsNotDefault;
-130// 22, 1
-131public void 
setMetaLocation(ServerName metaLocation)
+129private boolean 
m_catalogJanitorEnabled__IsNotDefault;
+130// 26, 1
+131public void setFilter(String 
filter)
 132{
-133  // 22, 1
-134  m_metaLocation = metaLocation;
-135  m_metaLocation__IsNotDefault = 
true;
+133  // 26, 1
+134  m_filter = filter;
+135  m_filter__IsNotDefault = true;
 136}
-137public ServerName getMetaLocation()
+137public String getFilter()
 138{
-139  return m_metaLocation;
+139  return m_filter;
 140}
-141private ServerName m_metaLocation;
-142public boolean 
getMetaLocation__IsNotDefault()
+141private String m_filter;
+142public boolean 
getFilter__IsNotDefault()
 143{
-144  return 
m_metaLocation__IsNotDefault;
+144  return m_filter__IsNotDefault;
 145}
-146private boolean 
m_metaLocation__IsNotDefault;
-147// 24, 1
-148public void 
setDeadServers(SetServerName deadServers)
+146private boolean 
m_filter__IsNotDefault;
+147// 28, 1
+148public void 
setServerManager(ServerManager serverManager)
 149{
-150  // 24, 1
-151  m_deadServers = deadServers;
-152  m_deadServers__IsNotDefault = 
true;
+150  // 28, 1
+151  m_serverManager = serverManager;
+152  m_serverManager__IsNotDefault = 
true;
 153}
-154public SetServerName 
getDeadServers()
+154public ServerManager 
getServerManager()
 155{
-156  return m_deadServers;
+156  return m_serverManager;
 157}
-158private SetServerName 
m_deadServers;
-159public boolean 
getDeadServers__IsNotDefault()