hbase git commit: HBASE-18155 TestMasterProcedureWalLease is flakey

2017-06-03 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master 118429cba -> e65d8653e


HBASE-18155 TestMasterProcedureWalLease is flakey


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e65d8653
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e65d8653
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e65d8653

Branch: refs/heads/master
Commit: e65d8653e566bbbae03578a1f9ad858cabcb48bc
Parents: 118429cb
Author: Michael Stack 
Authored: Sat Jun 3 12:55:14 2017 -0700
Committer: Michael Stack 
Committed: Sat Jun 3 12:55:18 2017 -0700

--
 .../hadoop/hbase/master/procedure/TestMasterProcedureWalLease.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e65d8653/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureWalLease.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureWalLease.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureWalLease.java
index e3c8a17..4c63318 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureWalLease.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureWalLease.java
@@ -76,7 +76,7 @@ public class TestMasterProcedureWalLease {
   @Before
   public void setup() throws Exception {
 setupConf(UTIL.getConfiguration());
-UTIL.startMiniCluster(2, 2);
+UTIL.startMiniCluster(2, 3);
   }
 
   @After



[2/2] hbase git commit: HBASE-15995 Separate replication WAL reading from shipping

2017-06-03 Thread tedyu
HBASE-15995 Separate replication WAL reading from shipping

Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3cf44332
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3cf44332
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3cf44332

Branch: refs/heads/branch-1
Commit: 3cf4433260b60a0e0455090628cf60a9d5a180f3
Parents: b66a478
Author: Vincent 
Authored: Thu Jun 1 16:50:41 2017 -0700
Committer: tedyu 
Committed: Sat Jun 3 09:48:57 2017 -0700

--
 .../replication/ClusterMarkingEntryFilter.java  |  70 ++
 .../regionserver/ReplicationSource.java | 826 +--
 .../ReplicationSourceWALReaderThread.java   | 471 +++
 .../ReplicationWALReaderManager.java| 155 
 .../regionserver/WALEntryStream.java| 411 +
 .../TestReplicationWALReaderManager.java| 228 -
 .../regionserver/TestWALEntryStream.java| 440 ++
 7 files changed, 1610 insertions(+), 991 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3cf44332/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ClusterMarkingEntryFilter.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ClusterMarkingEntryFilter.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ClusterMarkingEntryFilter.java
new file mode 100644
index 000..7cd3fed
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ClusterMarkingEntryFilter.java
@@ -0,0 +1,70 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication;
+
+import java.util.UUID;
+
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.wal.WALKey;
+import org.apache.hadoop.hbase.wal.WAL.Entry;
+
+
+/**
+ * Filters out entries with our peerClusterId (i.e. already replicated)
+ * and marks all other entries with our clusterID
+ */
+@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.REPLICATION)
+@InterfaceStability.Evolving
+public class ClusterMarkingEntryFilter implements WALEntryFilter {
+  private UUID clusterId;
+  private UUID peerClusterId;
+  private ReplicationEndpoint replicationEndpoint;
+
+  /**
+   * @param clusterId id of this cluster
+   * @param peerClusterId of the other cluster
+   * @param replicationEndpoint ReplicationEndpoint which will handle the 
actual replication
+   */
+  public ClusterMarkingEntryFilter(UUID clusterId, UUID peerClusterId, 
ReplicationEndpoint replicationEndpoint) {
+this.clusterId = clusterId;
+this.peerClusterId = peerClusterId;
+this.replicationEndpoint = replicationEndpoint;
+  }
+  @Override
+  public Entry filter(Entry entry) {
+// don't replicate if the log entries have already been consumed by the 
cluster
+if (replicationEndpoint.canReplicateToSameCluster()
+|| !entry.getKey().getClusterIds().contains(peerClusterId)) {
+  WALEdit edit = entry.getEdit();
+  WALKey logKey = entry.getKey();
+
+  if (edit != null && !edit.isEmpty()) {
+// Mark that the current cluster has the change
+logKey.addClusterId(clusterId);
+// We need to set the CC to null else it will be compressed when sent 
to the sink
+entry.setCompressionContext(null);
+return entry;
+  }
+}
+return null;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/3cf44332/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
--
diff --git 

[1/2] hbase git commit: HBASE-15995 Separate replication WAL reading from shipping

2017-06-03 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/branch-1 b66a478e7 -> 3cf443326


http://git-wip-us.apache.org/repos/asf/hbase/blob/3cf44332/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java
new file mode 100644
index 000..c4d552c
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java
@@ -0,0 +1,411 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication.regionserver;
+
+import java.io.Closeable;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+import java.util.concurrent.PriorityBlockingQueue;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.regionserver.wal.ProtobufLogReader;
+import org.apache.hadoop.hbase.util.CancelableProgressable;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.LeaseNotRecoveredException;
+import org.apache.hadoop.hbase.wal.WAL.Entry;
+import org.apache.hadoop.hbase.wal.WAL.Reader;
+import org.apache.hadoop.hbase.wal.WALFactory;
+
+/**
+ * Streaming access to WAL entries. This class is given a queue of WAL {@link 
Path}, and continually
+ * iterates through all the WAL {@link Entry} in the queue. When it's done 
reading from a Path, it
+ * dequeues it and starts reading from the next.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class WALEntryStream implements Iterator, Closeable, 
Iterable {
+  private static final Log LOG = LogFactory.getLog(WALEntryStream.class);
+
+  private Reader reader;
+  private Path currentPath;
+  // cache of next entry for hasNext()
+  private Entry currentEntry;
+  // position after reading current entry
+  private long currentPosition = 0;
+  private PriorityBlockingQueue logQueue;
+  private FileSystem fs;
+  private Configuration conf;
+  private MetricsSource metrics;
+
+  /**
+   * Create an entry stream over the given queue
+   * @param logQueue the queue of WAL paths
+   * @param fs {@link FileSystem} to use to create {@link Reader} for this 
stream
+   * @param conf {@link Configuration} to use to create {@link Reader} for 
this stream
+   * @param metrics replication metrics
+   * @throws IOException
+   */
+  public WALEntryStream(PriorityBlockingQueue logQueue, FileSystem fs, 
Configuration conf,
+  MetricsSource metrics)
+  throws IOException {
+this(logQueue, fs, conf, 0, metrics);
+  }
+
+  /**
+   * Create an entry stream over the given queue at the given start position
+   * @param logQueue the queue of WAL paths
+   * @param fs {@link FileSystem} to use to create {@link Reader} for this 
stream
+   * @param conf {@link Configuration} to use to create {@link Reader} for 
this stream
+   * @param startPosition the position in the first WAL to start reading at
+   * @param metrics replication metrics
+   * @throws IOException
+   */
+  public WALEntryStream(PriorityBlockingQueue logQueue, FileSystem fs, 
Configuration conf,
+  long startPosition, MetricsSource metrics) throws IOException {
+this.logQueue = logQueue;
+this.fs = fs;
+this.conf = conf;
+this.currentPosition = startPosition;
+this.metrics = metrics;
+  }
+
+  /**
+   * @return true if there is another WAL {@link Entry}
+   * @throws WALEntryStreamRuntimeException if there was an Exception while 
reading
+   */
+  @Override
+  public boolean hasNext() {
+if (currentEntry == null) {
+  try {

[47/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/org/apache/hadoop/hbase/classification/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/classification/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/classification/package-tree.html
index 05f0ead..638fe69 100644
--- a/devapidocs/org/apache/hadoop/hbase/classification/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/classification/package-tree.html
@@ -89,11 +89,11 @@
 Annotation Type Hierarchy
 
 org.apache.hadoop.hbase.classification.InterfaceStability.Stable (implements 
java.lang.annotation.http://docs.oracle.com/javase/8/docs/api/java/lang/annotation/Annotation.html?is-external=true;
 title="class or interface in java.lang.annotation">Annotation)
+org.apache.hadoop.hbase.classification.InterfaceStability.Evolving (implements 
java.lang.annotation.http://docs.oracle.com/javase/8/docs/api/java/lang/annotation/Annotation.html?is-external=true;
 title="class or interface in java.lang.annotation">Annotation)
 org.apache.hadoop.hbase.classification.InterfaceAudience.Private (implements 
java.lang.annotation.http://docs.oracle.com/javase/8/docs/api/java/lang/annotation/Annotation.html?is-external=true;
 title="class or interface in java.lang.annotation">Annotation)
 org.apache.hadoop.hbase.classification.InterfaceAudience.Public (implements 
java.lang.annotation.http://docs.oracle.com/javase/8/docs/api/java/lang/annotation/Annotation.html?is-external=true;
 title="class or interface in java.lang.annotation">Annotation)
-org.apache.hadoop.hbase.classification.InterfaceAudience.LimitedPrivate (implements 
java.lang.annotation.http://docs.oracle.com/javase/8/docs/api/java/lang/annotation/Annotation.html?is-external=true;
 title="class or interface in java.lang.annotation">Annotation)
-org.apache.hadoop.hbase.classification.InterfaceStability.Evolving (implements 
java.lang.annotation.http://docs.oracle.com/javase/8/docs/api/java/lang/annotation/Annotation.html?is-external=true;
 title="class or interface in java.lang.annotation">Annotation)
 org.apache.hadoop.hbase.classification.InterfaceStability.Unstable (implements 
java.lang.annotation.http://docs.oracle.com/javase/8/docs/api/java/lang/annotation/Annotation.html?is-external=true;
 title="class or interface in java.lang.annotation">Annotation)
+org.apache.hadoop.hbase.classification.InterfaceAudience.LimitedPrivate (implements 
java.lang.annotation.http://docs.oracle.com/javase/8/docs/api/java/lang/annotation/Annotation.html?is-external=true;
 title="class or interface in java.lang.annotation">Annotation)
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
 
b/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
index 85d7b38..2869cea 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class ConnectionImplementation.MasterServiceState
+static class ConnectionImplementation.MasterServiceState
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 State of the MasterService connection/setup.
 
@@ -222,7 +222,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 connection
-Connection connection
+Connection connection
 
 
 
@@ -231,7 +231,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 stub
-org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService.BlockingInterface
 stub
+org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService.BlockingInterface
 stub
 
 
 
@@ -240,7 +240,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 userCount
-int userCount
+int userCount
 
 
 
@@ -257,7 +257,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 MasterServiceState
-MasterServiceState(Connectionconnection)
+MasterServiceState(Connectionconnection)
 
 
 
@@ -274,7 +274,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 toString
-publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtoString()
+publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtoString()
 
 Overrides:
 

[46/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.html
 
b/devapidocs/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.html
index 2ac9f51..5bf0ad2 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.html
@@ -479,7 +479,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 throwEnrichedException
-staticvoidthrowEnrichedException(http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutionException.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ExecutionExceptione,
+staticvoidthrowEnrichedException(http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutionException.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ExecutionExceptione,
intretries)
 throws RetriesExhaustedException,
DoNotRetryIOException
@@ -498,7 +498,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 addCallsForReplica
-privatevoidaddCallsForReplica(ResultBoundedCompletionServiceResultcs,
+privatevoidaddCallsForReplica(ResultBoundedCompletionServiceResultcs,
 RegionLocationsrl,
 intmin,
 intmax)
@@ -518,7 +518,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getRegionLocations
-staticRegionLocationsgetRegionLocations(booleanuseCache,
+staticRegionLocationsgetRegionLocations(booleanuseCache,
   intreplicaId,
   ClusterConnectioncConnection,
   TableNametableName,

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.RetryingRPC.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.RetryingRPC.html
 
b/devapidocs/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.RetryingRPC.html
index c809928..64ec36a 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.RetryingRPC.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.RetryingRPC.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-class ScannerCallableWithReplicas.RetryingRPC
+class ScannerCallableWithReplicas.RetryingRPC
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements RetryingCallablePairResult[],ScannerCallable, Cancellable
 
@@ -247,7 +247,7 @@ implements 
 
 callable
-finalScannerCallable callable
+finalScannerCallable callable
 
 
 
@@ -256,7 +256,7 @@ implements 
 
 caller
-RpcRetryingCallerResult[] caller
+RpcRetryingCallerResult[] caller
 
 
 
@@ -265,7 +265,7 @@ implements 
 
 cancelled
-private volatileboolean cancelled
+private volatileboolean cancelled
 
 
 
@@ -282,7 +282,7 @@ implements 
 
 RetryingRPC
-RetryingRPC(ScannerCallablecallable)
+RetryingRPC(ScannerCallablecallable)
 
 
 
@@ -299,7 +299,7 @@ implements 
 
 call
-publicPairResult[],ScannerCallablecall(intcallTimeout)
+publicPairResult[],ScannerCallablecall(intcallTimeout)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Description copied from 
interface:RetryingCallable
 Computes a result, or throws an exception if unable to do 
so.
@@ -321,7 +321,7 @@ implements 
 
 prepare
-publicvoidprepare(booleanreload)
+publicvoidprepare(booleanreload)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Description copied from 
interface:RetryingCallable
 Prepare by setting up any connections to servers, etc., 
ahead of call invocation.
@@ -342,7 +342,7 @@ implements 
 
 throwable
-publicvoidthrowable(http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in java.lang">Throwablet,
+publicvoidthrowable(http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in java.lang">Throwablet,
   booleanretrying)
 Description copied from 
interface:RetryingCallable
 Called when call throws an exception and we are going to 
retry; take action to

hbase-site git commit: INFRA-10751 Empty commit

2017-06-03 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site c9d354248 -> 82707ed52


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/82707ed5
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/82707ed5
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/82707ed5

Branch: refs/heads/asf-site
Commit: 82707ed523b50faf9444ddfe15d7b63fdaed0cc2
Parents: c9d3542
Author: jenkins 
Authored: Sat Jun 3 14:59:48 2017 +
Committer: jenkins 
Committed: Sat Jun 3 14:59:48 2017 +

--

--




[41/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html
index caa05f5..6ef3c33 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class HFileBlock.Writer
+static class HFileBlock.Writer
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Unified version 2 HFile block 
writer. The intended usage pattern
  is as follows:
@@ -459,7 +459,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 state
-privateHFileBlock.Writer.State 
state
+privateHFileBlock.Writer.State 
state
 Writer state. Used to ensure the correct usage 
protocol.
 
 
@@ -469,7 +469,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 dataBlockEncoder
-private finalHFileDataBlockEncoder dataBlockEncoder
+private finalHFileDataBlockEncoder dataBlockEncoder
 Data block encoder used for data blocks
 
 
@@ -479,7 +479,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 dataBlockEncodingCtx
-privateHFileBlockEncodingContext dataBlockEncodingCtx
+privateHFileBlockEncodingContext dataBlockEncodingCtx
 
 
 
@@ -488,7 +488,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 defaultBlockEncodingCtx
-privateHFileBlockDefaultEncodingContext defaultBlockEncodingCtx
+privateHFileBlockDefaultEncodingContext defaultBlockEncodingCtx
 block encoding context for non-data blocks
 
 
@@ -498,7 +498,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 baosInMemory
-privateByteArrayOutputStream baosInMemory
+privateByteArrayOutputStream baosInMemory
 The stream we use to accumulate data into a block in an 
uncompressed format.
  We reset this stream at the end of each block and reuse it. The
  header is written as the first HConstants.HFILEBLOCK_HEADER_SIZE
 bytes into this
@@ -511,7 +511,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 blockType
-privateBlockType blockType
+privateBlockType blockType
 Current block type. Set in startWriting(BlockType).
 Could be
  changed in finishBlock()
 from BlockType.DATA
  to BlockType.ENCODED_DATA.
@@ -523,7 +523,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 userDataStream
-privatehttp://docs.oracle.com/javase/8/docs/api/java/io/DataOutputStream.html?is-external=true;
 title="class or interface in java.io">DataOutputStream userDataStream
+privatehttp://docs.oracle.com/javase/8/docs/api/java/io/DataOutputStream.html?is-external=true;
 title="class or interface in java.io">DataOutputStream userDataStream
 A stream that we write uncompressed bytes to, which 
compresses them and
  writes them to baosInMemory.
 
@@ -534,7 +534,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 unencodedDataSizeWritten
-privateint unencodedDataSizeWritten
+privateint unencodedDataSizeWritten
 
 
 
@@ -543,7 +543,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 encodedDataSizeWritten
-privateint encodedDataSizeWritten
+privateint encodedDataSizeWritten
 
 
 
@@ -552,7 +552,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 onDiskBlockBytesWithHeader
-privateByteArrayOutputStream onDiskBlockBytesWithHeader
+privateByteArrayOutputStream onDiskBlockBytesWithHeader
 Bytes to be written to the file system, including the 
header. Compressed
  if compression is turned on. It also includes the checksum data that
  immediately follows the block data. (header + data + checksums)
@@ -564,7 +564,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 onDiskChecksum
-privatebyte[] onDiskChecksum
+privatebyte[] onDiskChecksum
 The size of the checksum data on disk. It is used only if 
data is
  not compressed. If data is compressed, then the checksums are already
  part of onDiskBytesWithHeader. If data is uncompressed, then this
@@ -577,7 +577,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 startOffset
-privatelong startOffset
+privatelong startOffset
 Current block's start offset in the HFile. Set in
  writeHeaderAndData(FSDataOutputStream).
 
@@ -588,7 +588,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 prevOffsetByType
-privatelong[] prevOffsetByType
+privatelong[] prevOffsetByType
 Offset of previous block by block type. Updated when the 
next block is
  started.
 
@@ -599,7 +599,7 @@ extends 

[25/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.Reader.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.Reader.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.Reader.html
index b6c2fe3..1765903 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.Reader.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.Reader.html
@@ -60,892 +60,917 @@
 052import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
 053import 
org.apache.hadoop.hbase.fs.HFileSystem;
 054import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
-055import 
org.apache.hadoop.hbase.io.compress.Compression;
-056import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-057import 
org.apache.hadoop.hbase.protobuf.ProtobufMagic;
-058import 
org.apache.hadoop.hbase.regionserver.CellSink;
-059import 
org.apache.hadoop.hbase.regionserver.ShipperListener;
-060import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
-061import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-062import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-063import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.BytesBytesPair;
-064import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HFileProtos;
-065import 
org.apache.hadoop.hbase.util.BloomFilterWriter;
-066import 
org.apache.hadoop.hbase.util.Bytes;
-067import 
org.apache.hadoop.hbase.util.FSUtils;
-068import org.apache.hadoop.io.Writable;
-069
-070import 
com.google.common.annotations.VisibleForTesting;
-071import 
com.google.common.base.Preconditions;
-072
-073/**
-074 * File format for hbase.
-075 * A file of sorted key/value pairs. Both 
keys and values are byte arrays.
-076 * p
-077 * The memory footprint of a HFile 
includes the following (below is taken from the
-078 * a
-079 * 
href=https://issues.apache.org/jira/browse/HADOOP-3315TFile/a; 
documentation
-080 * but applies also to HFile):
-081 * ul
-082 * liSome constant overhead of 
reading or writing a compressed block.
+055import 
org.apache.hadoop.hbase.io.MetricsIO;
+056import 
org.apache.hadoop.hbase.io.MetricsIOWrapperImpl;
+057import 
org.apache.hadoop.hbase.io.compress.Compression;
+058import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+059import 
org.apache.hadoop.hbase.protobuf.ProtobufMagic;
+060import 
org.apache.hadoop.hbase.regionserver.CellSink;
+061import 
org.apache.hadoop.hbase.regionserver.ShipperListener;
+062import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
+063import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+064import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
+065import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.BytesBytesPair;
+066import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HFileProtos;
+067import 
org.apache.hadoop.hbase.util.BloomFilterWriter;
+068import 
org.apache.hadoop.hbase.util.Bytes;
+069import 
org.apache.hadoop.hbase.util.FSUtils;
+070import org.apache.hadoop.io.Writable;
+071
+072import 
com.google.common.annotations.VisibleForTesting;
+073import 
com.google.common.base.Preconditions;
+074
+075/**
+076 * File format for hbase.
+077 * A file of sorted key/value pairs. Both 
keys and values are byte arrays.
+078 * p
+079 * The memory footprint of a HFile 
includes the following (below is taken from the
+080 * a
+081 * 
href=https://issues.apache.org/jira/browse/HADOOP-3315TFile/a; 
documentation
+082 * but applies also to HFile):
 083 * ul
-084 * liEach compressed block 
requires one compression/decompression codec for
-085 * I/O.
-086 * liTemporary space to buffer 
the key.
-087 * liTemporary space to buffer 
the value.
-088 * /ul
-089 * liHFile index, which is 
proportional to the total number of Data Blocks.
-090 * The total amount of memory needed to 
hold the index can be estimated as
-091 * (56+AvgKeySize)*NumBlocks.
-092 * /ul
-093 * Suggestions on performance 
optimization.
-094 * ul
-095 * liMinimum block size. We 
recommend a setting of minimum block size between
-096 * 8KB to 1MB for general usage. Larger 
block size is preferred if files are
-097 * primarily for sequential access. 
However, it would lead to inefficient random
-098 * access (because there are more data to 
decompress). Smaller blocks are good
-099 * for random access, but require more 
memory to hold the block index, and may
-100 * be slower to create (because we must 
flush the compressor stream at the
-101 * conclusion of each data block, which 
leads to an FS I/O flush). Further, due
-102 * to the internal caching in Compression 
codec, the smallest possible block
-103 * size would be around 20KB-30KB.
-104 * liThe current implementation 
does not offer true multi-threading for
-105 * reading. The implementation uses 

[40/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.html 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.html
index 34dc232..6566882 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.html
@@ -988,7 +988,7 @@ implements 
 
 checkKey
-protectedbooleancheckKey(Cellcell)
+protectedbooleancheckKey(Cellcell)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Checks that the given Cell's key does not violate the key 
order.
 
@@ -1007,7 +1007,7 @@ implements 
 
 checkValue
-protectedvoidcheckValue(byte[]value,
+protectedvoidcheckValue(byte[]value,
   intoffset,
   intlength)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -1024,7 +1024,7 @@ implements 
 
 getPath
-publicorg.apache.hadoop.fs.PathgetPath()
+publicorg.apache.hadoop.fs.PathgetPath()
 
 Specified by:
 getPathin
 interfaceHFile.Writer
@@ -1039,7 +1039,7 @@ implements 
 
 toString
-publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtoString()
+publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtoString()
 
 Overrides:
 http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--;
 title="class or interface in java.lang">toStringin 
classhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
@@ -1052,7 +1052,7 @@ implements 
 
 compressionByName
-public staticCompression.AlgorithmcompressionByName(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringalgoName)
+public staticCompression.AlgorithmcompressionByName(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringalgoName)
 
 
 
@@ -1061,7 +1061,7 @@ implements 
 
 createOutputStream
-protected staticorg.apache.hadoop.fs.FSDataOutputStreamcreateOutputStream(org.apache.hadoop.conf.Configurationconf,
+protected staticorg.apache.hadoop.fs.FSDataOutputStreamcreateOutputStream(org.apache.hadoop.conf.Configurationconf,
 
org.apache.hadoop.fs.FileSystemfs,
 
org.apache.hadoop.fs.Pathpath,
 http://docs.oracle.com/javase/8/docs/api/java/net/InetSocketAddress.html?is-external=true;
 title="class or interface in 
java.net">InetSocketAddress[]favoredNodes)
@@ -1079,7 +1079,7 @@ implements 
 
 finishInit
-protectedvoidfinishInit(org.apache.hadoop.conf.Configurationconf)
+protectedvoidfinishInit(org.apache.hadoop.conf.Configurationconf)
 Additional initialization steps
 
 
@@ -1089,7 +1089,7 @@ implements 
 
 checkBlockBoundary
-protectedvoidcheckBlockBoundary()
+protectedvoidcheckBlockBoundary()
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 At a block boundary, write all the inline blocks and opens 
new block.
 
@@ -1104,7 +1104,7 @@ implements 
 
 finishBlock
-privatevoidfinishBlock()
+privatevoidfinishBlock()
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Clean up the data block that is currently being 
written.
 
@@ -1119,7 +1119,7 @@ implements 
 
 getMidpoint
-public staticCellgetMidpoint(CellComparatorcomparator,
+public staticCellgetMidpoint(CellComparatorcomparator,
Cellleft,
Cellright)
 Try to return a Cell that falls between left 
and
@@ -1142,7 +1142,7 @@ implements 
 
 getMinimumMidpointArray
-private staticbyte[]getMinimumMidpointArray(byte[]leftArray,
+private staticbyte[]getMinimumMidpointArray(byte[]leftArray,
   intleftOffset,
   intleftLength,
   byte[]rightArray,
@@ -1169,7 +1169,7 @@ implements 
 
 getMinimumMidpointArray
-private 

[38/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html 
b/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
index 874be52..7e3084a 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
@@ -393,94 +393,94 @@ extends org.jamon.AbstractTemplateProxy.ImplData
 privateHMaster m_master
 
 
-
+
 
 
 
 
-m_serverManager
-privateServerManager m_serverManager
+m_format
+privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String m_format
 
 
-
+
 
 
 
 
-m_serverManager__IsNotDefault
-privateboolean m_serverManager__IsNotDefault
+m_format__IsNotDefault
+privateboolean m_format__IsNotDefault
 
 
-
+
 
 
 
 
-m_servers
-privatehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerName m_servers
+m_catalogJanitorEnabled
+privateboolean m_catalogJanitorEnabled
 
 
-
+
 
 
 
 
-m_servers__IsNotDefault
-privateboolean m_servers__IsNotDefault
+m_catalogJanitorEnabled__IsNotDefault
+privateboolean m_catalogJanitorEnabled__IsNotDefault
 
 
-
+
 
 
 
 
-m_deadServers
-privatehttp://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetServerName m_deadServers
+m_serverManager
+privateServerManager m_serverManager
 
 
-
+
 
 
 
 
-m_deadServers__IsNotDefault
-privateboolean m_deadServers__IsNotDefault
+m_serverManager__IsNotDefault
+privateboolean m_serverManager__IsNotDefault
 
 
-
+
 
 
 
 
-m_format
-privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String m_format
+m_filter
+privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String m_filter
 
 
-
+
 
 
 
 
-m_format__IsNotDefault
-privateboolean m_format__IsNotDefault
+m_filter__IsNotDefault
+privateboolean m_filter__IsNotDefault
 
 
-
+
 
 
 
 
-m_assignmentManager
-privateAssignmentManager m_assignmentManager
+m_deadServers
+privatehttp://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetServerName m_deadServers
 
 
-
+
 
 
 
 
-m_assignmentManager__IsNotDefault
-privateboolean m_assignmentManager__IsNotDefault
+m_deadServers__IsNotDefault
+privateboolean m_deadServers__IsNotDefault
 
 
 
@@ -501,40 +501,40 @@ extends org.jamon.AbstractTemplateProxy.ImplData
 privateboolean m_frags__IsNotDefault
 
 
-
+
 
 
 
 
-m_filter
-privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String m_filter
+m_assignmentManager
+privateAssignmentManager m_assignmentManager
 
 
-
+
 
 
 
 
-m_filter__IsNotDefault
-privateboolean m_filter__IsNotDefault
+m_assignmentManager__IsNotDefault
+privateboolean m_assignmentManager__IsNotDefault
 
 
-
+
 
 
 
 
-m_catalogJanitorEnabled
-privateboolean m_catalogJanitorEnabled
+m_servers
+privatehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerName m_servers
 
 
-
+
 
 
 
 
-m_catalogJanitorEnabled__IsNotDefault
-privateboolean m_catalogJanitorEnabled__IsNotDefault
+m_servers__IsNotDefault
+privateboolean m_servers__IsNotDefault
 
 
 
@@ -598,139 +598,139 @@ extends org.jamon.AbstractTemplateProxy.ImplData
 publicHMastergetMaster()
 
 
-
+
 
 
 
 
-setServerManager
-publicvoidsetServerManager(ServerManagerserverManager)
+setFormat
+publicvoidsetFormat(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringformat)
 
 
-
+
 
 
 
 
-getServerManager
-publicServerManagergetServerManager()
+getFormat
+publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetFormat()
 
 
-
+
 
 
 
 
-getServerManager__IsNotDefault
-publicbooleangetServerManager__IsNotDefault()
+getFormat__IsNotDefault
+publicbooleangetFormat__IsNotDefault()
 
 
-
+
 
 
 
 
-setServers
-publicvoidsetServers(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerNameservers)
+setCatalogJanitorEnabled
+publicvoidsetCatalogJanitorEnabled(booleancatalogJanitorEnabled)
 
 
-
+
 
 
 
 
-getServers
-publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerNamegetServers()
+getCatalogJanitorEnabled

[26/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.FileInfo.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.FileInfo.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.FileInfo.html
index b6c2fe3..1765903 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.FileInfo.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.FileInfo.html
@@ -60,892 +60,917 @@
 052import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
 053import 
org.apache.hadoop.hbase.fs.HFileSystem;
 054import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
-055import 
org.apache.hadoop.hbase.io.compress.Compression;
-056import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-057import 
org.apache.hadoop.hbase.protobuf.ProtobufMagic;
-058import 
org.apache.hadoop.hbase.regionserver.CellSink;
-059import 
org.apache.hadoop.hbase.regionserver.ShipperListener;
-060import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
-061import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-062import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-063import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.BytesBytesPair;
-064import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HFileProtos;
-065import 
org.apache.hadoop.hbase.util.BloomFilterWriter;
-066import 
org.apache.hadoop.hbase.util.Bytes;
-067import 
org.apache.hadoop.hbase.util.FSUtils;
-068import org.apache.hadoop.io.Writable;
-069
-070import 
com.google.common.annotations.VisibleForTesting;
-071import 
com.google.common.base.Preconditions;
-072
-073/**
-074 * File format for hbase.
-075 * A file of sorted key/value pairs. Both 
keys and values are byte arrays.
-076 * p
-077 * The memory footprint of a HFile 
includes the following (below is taken from the
-078 * a
-079 * 
href=https://issues.apache.org/jira/browse/HADOOP-3315TFile/a; 
documentation
-080 * but applies also to HFile):
-081 * ul
-082 * liSome constant overhead of 
reading or writing a compressed block.
+055import 
org.apache.hadoop.hbase.io.MetricsIO;
+056import 
org.apache.hadoop.hbase.io.MetricsIOWrapperImpl;
+057import 
org.apache.hadoop.hbase.io.compress.Compression;
+058import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+059import 
org.apache.hadoop.hbase.protobuf.ProtobufMagic;
+060import 
org.apache.hadoop.hbase.regionserver.CellSink;
+061import 
org.apache.hadoop.hbase.regionserver.ShipperListener;
+062import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
+063import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+064import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
+065import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.BytesBytesPair;
+066import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HFileProtos;
+067import 
org.apache.hadoop.hbase.util.BloomFilterWriter;
+068import 
org.apache.hadoop.hbase.util.Bytes;
+069import 
org.apache.hadoop.hbase.util.FSUtils;
+070import org.apache.hadoop.io.Writable;
+071
+072import 
com.google.common.annotations.VisibleForTesting;
+073import 
com.google.common.base.Preconditions;
+074
+075/**
+076 * File format for hbase.
+077 * A file of sorted key/value pairs. Both 
keys and values are byte arrays.
+078 * p
+079 * The memory footprint of a HFile 
includes the following (below is taken from the
+080 * a
+081 * 
href=https://issues.apache.org/jira/browse/HADOOP-3315TFile/a; 
documentation
+082 * but applies also to HFile):
 083 * ul
-084 * liEach compressed block 
requires one compression/decompression codec for
-085 * I/O.
-086 * liTemporary space to buffer 
the key.
-087 * liTemporary space to buffer 
the value.
-088 * /ul
-089 * liHFile index, which is 
proportional to the total number of Data Blocks.
-090 * The total amount of memory needed to 
hold the index can be estimated as
-091 * (56+AvgKeySize)*NumBlocks.
-092 * /ul
-093 * Suggestions on performance 
optimization.
-094 * ul
-095 * liMinimum block size. We 
recommend a setting of minimum block size between
-096 * 8KB to 1MB for general usage. Larger 
block size is preferred if files are
-097 * primarily for sequential access. 
However, it would lead to inefficient random
-098 * access (because there are more data to 
decompress). Smaller blocks are good
-099 * for random access, but require more 
memory to hold the block index, and may
-100 * be slower to create (because we must 
flush the compressor stream at the
-101 * conclusion of each data block, which 
leads to an FS I/O flush). Further, due
-102 * to the internal caching in Compression 
codec, the smallest possible block
-103 * size would be around 20KB-30KB.
-104 * liThe current implementation 
does not offer true multi-threading for
-105 * reading. The implementation uses 

[37/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/overview-tree.html
--
diff --git a/devapidocs/overview-tree.html b/devapidocs/overview-tree.html
index 075c735..5f9567f 100644
--- a/devapidocs/overview-tree.html
+++ b/devapidocs/overview-tree.html
@@ -804,6 +804,7 @@
 
 org.apache.hadoop.hbase.regionserver.wal.MetricsEditsReplaySourceImpl (implements 
org.apache.hadoop.hbase.regionserver.wal.MetricsEditsReplaySource)
 org.apache.hadoop.hbase.regionserver.MetricsHeapMemoryManagerSourceImpl (implements 
org.apache.hadoop.hbase.regionserver.MetricsHeapMemoryManagerSource)
+org.apache.hadoop.hbase.io.MetricsIOSourceImpl (implements 
org.apache.hadoop.hbase.io.MetricsIOSource)
 org.apache.hadoop.hbase.master.MetricsMasterFilesystemSourceImpl (implements 
org.apache.hadoop.hbase.master.MetricsMasterFileSystemSource)
 org.apache.hadoop.hbase.master.MetricsMasterProcSourceImpl (implements 
org.apache.hadoop.hbase.master.MetricsMasterProcSource)
 org.apache.hadoop.hbase.master.MetricsMasterQuotaSourceImpl (implements 
org.apache.hadoop.hbase.master.MetricsMasterQuotaSource)
@@ -2296,6 +2297,8 @@
 org.apache.hadoop.hbase.ipc.MetricsHBaseServerWrapperImpl (implements 
org.apache.hadoop.hbase.ipc.MetricsHBaseServerWrapper)
 org.apache.hadoop.hbase.regionserver.MetricsHeapMemoryManager
 org.apache.hadoop.hbase.metrics.MetricsInfoImpl (implements 
org.apache.hadoop.metrics2.MetricsInfo)
+org.apache.hadoop.hbase.io.MetricsIO
+org.apache.hadoop.hbase.io.MetricsIOWrapperImpl (implements 
org.apache.hadoop.hbase.io.MetricsIOWrapper)
 org.apache.hadoop.hbase.master.MetricsMaster
 org.apache.hadoop.hbase.master.MetricsMasterFileSystem
 org.apache.hadoop.hbase.master.MetricsMasterProcSourceFactoryImpl (implements 
org.apache.hadoop.hbase.master.MetricsMasterProcSourceFactory)
@@ -4403,6 +4406,7 @@
 org.apache.hadoop.hbase.regionserver.wal.MetricsEditsReplaySource
 org.apache.hadoop.hbase.ipc.MetricsHBaseServerSource
 org.apache.hadoop.hbase.regionserver.MetricsHeapMemoryManagerSource
+org.apache.hadoop.hbase.io.MetricsIOSource
 org.apache.hadoop.hbase.master.MetricsMasterFileSystemSource
 org.apache.hadoop.hbase.master.MetricsMasterProcSource
 org.apache.hadoop.hbase.master.MetricsMasterQuotaSource
@@ -4740,6 +4744,7 @@
 org.apache.hadoop.hbase.client.MetricsConnection.NewMetricT
 org.apache.hadoop.metrics2.MetricsExecutor
 org.apache.hadoop.hbase.ipc.MetricsHBaseServerWrapper
+org.apache.hadoop.hbase.io.MetricsIOWrapper
 org.apache.hadoop.hbase.master.MetricsMasterProcSourceFactory
 org.apache.hadoop.hbase.master.MetricsMasterQuotaSourceFactory
 org.apache.hadoop.hbase.master.MetricsMasterSourceFactory

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
index e696df2..1fb8178 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
@@ -16,11 +16,11 @@
 008@InterfaceAudience.Private
 009public class Version {
 010  public static final String version = 
"2.0.0-SNAPSHOT";
-011  public static final String revision = 
"549171465db83600dbdf6d0b5af01c3ad30dc550";
+011  public static final String revision = 
"118429cbac0d71d57d574db825b2f077146a961e";
 012  public static final String user = 
"jenkins";
-013  public static final String date = "Fri 
Jun  2 14:39:24 UTC 2017";
+013  public static final String date = "Sat 
Jun  3 14:38:58 UTC 2017";
 014  public static final String url = 
"git://asf920.gq1.ygridcore.net/home/jenkins/jenkins-slave/workspace/hbase_generate_website/hbase";
-015  public static final String srcChecksum 
= "41076c851d8ead0bdd4c42c7c733c1c2";
+015  public static final String srcChecksum 
= "8840b4fbe4479c4e1120cb4b37742b4e";
 016}
 
 



[31/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/src-html/org/apache/hadoop/hbase/client/MetaCache.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/MetaCache.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/MetaCache.html
index 69d5596..22ae2e0 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/MetaCache.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/MetaCache.html
@@ -321,94 +321,128 @@
 313  }
 314
 315  /**
-316   * Delete a cached location for a 
table, row and server
-317   */
-318  public void clearCache(final TableName 
tableName, final byte [] row, ServerName serverName) {
-319ConcurrentMapbyte[], 
RegionLocations tableLocations = getTableLocations(tableName);
-320
-321RegionLocations regionLocations = 
getCachedLocation(tableName, row);
-322if (regionLocations != null) {
-323  RegionLocations updatedLocations = 
regionLocations.removeByServer(serverName);
-324  if (updatedLocations != 
regionLocations) {
-325byte[] startKey = 
regionLocations.getRegionLocation().getRegionInfo().getStartKey();
-326boolean removed = false;
-327if (updatedLocations.isEmpty()) 
{
-328  removed = 
tableLocations.remove(startKey, regionLocations);
-329} else {
-330  removed = 
tableLocations.replace(startKey, regionLocations, updatedLocations);
-331}
-332if (removed) {
-333  if (metrics != null) {
-334
metrics.incrMetaCacheNumClearRegion();
-335  }
-336  if (LOG.isTraceEnabled()) {
-337LOG.trace("Removed locations 
of table: " + tableName + " ,row: " + Bytes.toString(row)
-338  + " mapping to server: " + 
serverName + " from cache");
-339  }
-340}
-341  }
-342}
-343  }
-344
-345  /**
-346   * Deletes the cached location of the 
region if necessary, based on some error from source.
-347   * @param hri The region in question.
-348   */
-349  public void clearCache(HRegionInfo hri) 
{
-350ConcurrentMapbyte[], 
RegionLocations tableLocations = getTableLocations(hri.getTable());
-351RegionLocations regionLocations = 
tableLocations.get(hri.getStartKey());
-352if (regionLocations != null) {
-353  HRegionLocation oldLocation = 
regionLocations.getRegionLocation(hri.getReplicaId());
-354  if (oldLocation == null) return;
-355  RegionLocations updatedLocations = 
regionLocations.remove(oldLocation);
-356  boolean removed;
-357  if (updatedLocations != 
regionLocations) {
-358if (updatedLocations.isEmpty()) 
{
-359  removed = 
tableLocations.remove(hri.getStartKey(), regionLocations);
-360} else {
-361  removed = 
tableLocations.replace(hri.getStartKey(), regionLocations, updatedLocations);
-362}
-363if (removed) {
-364  if (metrics != null) {
-365
metrics.incrMetaCacheNumClearRegion();
-366  }
-367  if (LOG.isTraceEnabled()) {
-368LOG.trace("Removed " + 
oldLocation + " from cache");
+316   * Delete a cached location with 
specific replicaId.
+317   * @param tableName tableName
+318   * @param row row key
+319   * @param replicaId region replica id
+320   */
+321  public void clearCache(final TableName 
tableName, final byte [] row, int replicaId) {
+322ConcurrentMapbyte[], 
RegionLocations tableLocations = getTableLocations(tableName);
+323
+324RegionLocations regionLocations = 
getCachedLocation(tableName, row);
+325if (regionLocations != null) {
+326  HRegionLocation toBeRemoved = 
regionLocations.getRegionLocation(replicaId);
+327  if (toBeRemoved != null) {
+328RegionLocations updatedLocations 
= regionLocations.remove(replicaId);
+329byte[] startKey = 
regionLocations.getRegionLocation().getRegionInfo().getStartKey();
+330boolean removed;
+331if (updatedLocations.isEmpty()) 
{
+332  removed = 
tableLocations.remove(startKey, regionLocations);
+333} else {
+334  removed = 
tableLocations.replace(startKey, regionLocations, updatedLocations);
+335}
+336
+337if (removed) {
+338  if (metrics != null) {
+339
metrics.incrMetaCacheNumClearRegion();
+340  }
+341  if (LOG.isTraceEnabled()) {
+342LOG.trace("Removed " + 
toBeRemoved + " from cache");
+343  }
+344}
+345  }
+346}
+347  }
+348
+349  /**
+350   * Delete a cached location for a 
table, row and server
+351   */
+352  public void clearCache(final TableName 
tableName, final byte [] row, ServerName serverName) {
+353ConcurrentMapbyte[], 
RegionLocations tableLocations = getTableLocations(tableName);
+354
+355RegionLocations regionLocations = 
getCachedLocation(tableName, row);
+356if (regionLocations != null) {
+357  RegionLocations updatedLocations = 

[51/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/c9d35424
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/c9d35424
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/c9d35424

Branch: refs/heads/asf-site
Commit: c9d354248e8772da6b522d5c0482b752136dda8c
Parents: 3b85ae9
Author: jenkins 
Authored: Sat Jun 3 14:59:12 2017 +
Committer: jenkins 
Committed: Sat Jun 3 14:59:12 2017 +

--
 acid-semantics.html |4 +-
 apache_hbase_reference_guide.pdf|4 +-
 apache_hbase_reference_guide.pdfmarks   |4 +-
 bulk-loads.html |4 +-
 checkstyle-aggregate.html   | 9866 +-
 checkstyle.rss  |   76 +-
 coc.html|4 +-
 cygwin.html |4 +-
 dependencies.html   |4 +-
 dependency-convergence.html |4 +-
 dependency-info.html|4 +-
 dependency-management.html  |4 +-
 devapidocs/allclasses-frame.html|5 +
 devapidocs/allclasses-noframe.html  |5 +
 devapidocs/constant-values.html |  102 +-
 devapidocs/index-all.html   |  126 +-
 .../hadoop/hbase/backup/package-tree.html   |2 +-
 .../hadoop/hbase/class-use/TableName.html   |  486 +-
 .../hbase/classification/package-tree.html  |4 +-
 ...ectionImplementation.MasterServiceState.html |   18 +-
 ...onImplementation.MasterServiceStubMaker.html |   10 +-
 ...ntation.ServerErrorTracker.ServerErrors.html |   10 +-
 ...ectionImplementation.ServerErrorTracker.html |   20 +-
 .../hbase/client/ConnectionImplementation.html  |   88 +-
 .../apache/hadoop/hbase/client/MetaCache.html   |   42 +-
 .../RpcRetryingCallerWithReadReplicas.html  |6 +-
 ...ScannerCallableWithReplicas.RetryingRPC.html |   24 +-
 .../client/ScannerCallableWithReplicas.html |   72 +-
 .../hadoop/hbase/client/package-tree.html   |   28 +-
 .../hadoop/hbase/filter/package-tree.html   |8 +-
 .../hadoop/hbase/io/LimitInputStream.html   |4 +-
 .../org/apache/hadoop/hbase/io/MetricsIO.html   |  387 +
 .../apache/hadoop/hbase/io/MetricsIOSource.html |  531 +
 .../hadoop/hbase/io/MetricsIOSourceImpl.html|  489 +
 .../hadoop/hbase/io/MetricsIOWrapper.html   |  226 +
 .../hadoop/hbase/io/MetricsIOWrapperImpl.html   |  281 +
 .../org/apache/hadoop/hbase/io/Reference.html   |4 +-
 .../hadoop/hbase/io/class-use/MetricsIO.html|  170 +
 .../hbase/io/class-use/MetricsIOSource.html |  231 +
 .../hbase/io/class-use/MetricsIOSourceImpl.html |  125 +
 .../hbase/io/class-use/MetricsIOWrapper.html|  248 +
 .../io/class-use/MetricsIOWrapperImpl.html  |  125 +
 .../io/hfile/HFile.CachingBlockReader.html  |6 +-
 .../hadoop/hbase/io/hfile/HFile.FileInfo.html   |   74 +-
 .../hadoop/hbase/io/hfile/HFile.Reader.html |   62 +-
 .../hadoop/hbase/io/hfile/HFile.Writer.html |   18 +-
 .../hbase/io/hfile/HFile.WriterFactory.html |   36 +-
 .../org/apache/hadoop/hbase/io/hfile/HFile.html |  155 +-
 .../io/hfile/HFileBlock.BlockIterator.html  |6 +-
 .../io/hfile/HFileBlock.BlockWritable.html  |6 +-
 .../hbase/io/hfile/HFileBlock.FSReader.html |   26 +-
 .../hbase/io/hfile/HFileBlock.FSReaderImpl.html |   76 +-
 .../io/hfile/HFileBlock.PrefetchedHeader.html   |   12 +-
 .../hbase/io/hfile/HFileBlock.Writer.State.html |   12 +-
 .../hbase/io/hfile/HFileBlock.Writer.html   |   80 +-
 .../hadoop/hbase/io/hfile/HFileBlock.html   |   88 +-
 .../hadoop/hbase/io/hfile/HFileWriterImpl.html  |   56 +-
 .../hbase/io/hfile/class-use/HFileBlock.html|   15 +-
 .../hadoop/hbase/io/hfile/package-tree.html |8 +-
 .../apache/hadoop/hbase/io/package-frame.html   |5 +
 .../apache/hadoop/hbase/io/package-summary.html |   26 +-
 .../apache/hadoop/hbase/io/package-tree.html|   13 +
 .../org/apache/hadoop/hbase/io/package-use.html |   15 +
 .../apache/hadoop/hbase/ipc/package-tree.html   |2 +-
 .../hadoop/hbase/mapreduce/package-tree.html|4 +-
 .../hadoop/hbase/master/package-tree.html   |6 +-
 .../hbase/master/procedure/package-tree.html|2 +-
 .../apache/hadoop/hbase/metrics/BaseSource.html |4 +-
 .../hadoop/hbase/metrics/BaseSourceImpl.html|2 +-
 .../hbase/metrics/class-use/BaseSource.html |   53 +-
 .../hbase/metrics/class-use/BaseSourceImpl.html |   40 +-
 .../hadoop/hbase/metrics/package-use.html   |   50 +-
 .../org/apache/hadoop/hbase/package-tree.html   |   12 +-
 

[44/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/org/apache/hadoop/hbase/io/class-use/MetricsIO.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/io/class-use/MetricsIO.html 
b/devapidocs/org/apache/hadoop/hbase/io/class-use/MetricsIO.html
new file mode 100644
index 000..bf9e35e
--- /dev/null
+++ b/devapidocs/org/apache/hadoop/hbase/io/class-use/MetricsIO.html
@@ -0,0 +1,170 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+Uses of Class org.apache.hadoop.hbase.io.MetricsIO (Apache HBase 
2.0.0-SNAPSHOT API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+
+
+
+Uses of Classorg.apache.hadoop.hbase.io.MetricsIO
+
+
+
+
+
+Packages that use MetricsIO
+
+Package
+Description
+
+
+
+org.apache.hadoop.hbase.io.hfile
+
+Provides implementations of HFile and 
HFile
+ BlockCache.
+
+
+
+
+
+
+
+
+
+
+Uses of MetricsIO in org.apache.hadoop.hbase.io.hfile
+
+Fields in org.apache.hadoop.hbase.io.hfile
 declared as MetricsIO
+
+Modifier and Type
+Field and Description
+
+
+
+(package private) static MetricsIO
+HFile.metrics
+Static instance for the metrics so that HFileReaders access 
the same instance
+
+
+
+
+
+
+
+
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+
+
+Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+
+

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/org/apache/hadoop/hbase/io/class-use/MetricsIOSource.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/class-use/MetricsIOSource.html 
b/devapidocs/org/apache/hadoop/hbase/io/class-use/MetricsIOSource.html
new file mode 100644
index 000..fdb6ad9
--- /dev/null
+++ b/devapidocs/org/apache/hadoop/hbase/io/class-use/MetricsIOSource.html
@@ -0,0 +1,231 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+Uses of Interface org.apache.hadoop.hbase.io.MetricsIOSource (Apache 
HBase 2.0.0-SNAPSHOT API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+
+
+
+Uses of 
Interfaceorg.apache.hadoop.hbase.io.MetricsIOSource
+
+
+
+
+
+Packages that use MetricsIOSource
+
+Package
+Description
+
+
+
+org.apache.hadoop.hbase.io
+
+
+
+org.apache.hadoop.hbase.regionserver
+
+
+
+
+
+
+
+
+
+
+Uses of MetricsIOSource in org.apache.hadoop.hbase.io
+
+Classes in org.apache.hadoop.hbase.io
 that implement MetricsIOSource
+
+Modifier and Type
+Class and Description
+
+
+
+class
+MetricsIOSourceImpl
+
+
+
+
+Fields in org.apache.hadoop.hbase.io
 declared as MetricsIOSource
+
+Modifier and Type
+Field and Description
+
+
+
+private MetricsIOSource
+MetricsIO.source
+
+
+
+
+Methods in org.apache.hadoop.hbase.io
 that return MetricsIOSource
+
+Modifier and Type
+Method and Description
+
+
+
+MetricsIOSource
+MetricsIO.getMetricsSource()
+
+
+
+
+Constructors in org.apache.hadoop.hbase.io
 with parameters of type MetricsIOSource
+
+Constructor and Description
+
+
+
+MetricsIO(MetricsIOSourcesource,
+ MetricsIOWrapperwrapper)
+
+
+
+
+
+
+
+Uses of MetricsIOSource in org.apache.hadoop.hbase.regionserver
+
+Methods in org.apache.hadoop.hbase.regionserver
 that return MetricsIOSource
+
+Modifier and Type
+Method and Description
+
+
+
+MetricsIOSource

[48/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
index 7809f07..339fd21 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
@@ -2434,65 +2434,73 @@ service.
 
 
 void
+MetaCache.clearCache(TableNametableName,
+  byte[]row,
+  intreplicaId)
+Delete a cached location with specific replicaId.
+
+
+
+void
 MetaCache.clearCache(TableNametableName,
   byte[]row,
   ServerNameserverName)
 Delete a cached location for a table, row and server
 
 
-
+
 void
 ConnectionImplementation.clearRegionCache(TableNametableName)
 
-
+
 void
 ClusterConnection.clearRegionCache(TableNametableName)
 Allows flushing the region cache of all locations that 
pertain to
  tableName
 
 
-
+
 void
 ConnectionImplementation.clearRegionCache(TableNametableName,
 byte[]row)
 
-
+
 void
 Admin.cloneSnapshot(byte[]snapshotName,
  TableNametableName)
 Create a new table by cloning the snapshot content.
 
 
-
+
 void
 HBaseAdmin.cloneSnapshot(byte[]snapshotName,
  TableNametableName)
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
 AsyncHBaseAdmin.cloneSnapshot(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringsnapshotName,
  TableNametableName)
 
-
+
 void
 Admin.cloneSnapshot(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringsnapshotName,
  TableNametableName)
 Create a new table by cloning the snapshot content.
 
 
-
+
 void
 HBaseAdmin.cloneSnapshot(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringsnapshotName,
  TableNametableName)
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
 AsyncAdmin.cloneSnapshot(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringsnapshotName,
  TableNametableName)
 Create a new table by cloning the snapshot content.
 
 
-
+
 void
 Admin.cloneSnapshot(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringsnapshotName,
  TableNametableName,
@@ -2500,13 +2508,13 @@ service.
 Create a new table by cloning the snapshot content.
 
 
-
+
 void
 HBaseAdmin.cloneSnapshot(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringsnapshotName,
  TableNametableName,
  booleanrestoreAcl)
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Future.html?is-external=true;
 title="class or interface in java.util.concurrent">Futurehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
 Admin.cloneSnapshotAsync(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringsnapshotName,
   TableNametableName)
@@ -2514,60 +2522,60 @@ service.
  and wait for it be completely cloned.
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Future.html?is-external=true;
 title="class or interface in java.util.concurrent">Futurehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
 HBaseAdmin.cloneSnapshotAsync(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringsnapshotName,
   TableNametableName)
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
 AsyncHBaseAdmin.compact(TableNametableName)
 
-
+
 void
 Admin.compact(TableNametableName)
 Compact a table.
 
 
-
+
 void
 

[50/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index 8b95c2a..43e2651 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Checkstyle Results
 
@@ -286,10 +286,10 @@
 Warnings
 Errors
 
-2218
+2223
 0
 0
-14484
+14486
 
 Files
 
@@ -1994,5176 +1994,5181 @@
 0
 5
 
+org/apache/hadoop/hbase/io/MetricsIO.java
+0
+0
+1
+
 org/apache/hadoop/hbase/io/Reference.java
 0
 0
 17
-
+
 org/apache/hadoop/hbase/io/TagCompressionContext.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/io/TimeRange.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/io/WALLink.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutput.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/io/compress/Compression.java
 0
 0
 9
-
+
 org/apache/hadoop/hbase/io/crypto/Cipher.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/io/crypto/Context.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/io/crypto/Decryptor.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/io/crypto/Encryption.java
 0
 0
 53
-
+
 org/apache/hadoop/hbase/io/crypto/Encryptor.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/io/crypto/KeyProvider.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/io/crypto/aes/AES.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/io/crypto/aes/AESDecryptor.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/io/crypto/aes/AESEncryptor.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/io/crypto/aes/CommonsCryptoAES.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/io/crypto/aes/CommonsCryptoAESDecryptor.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/io/crypto/aes/CommonsCryptoAESEncryptor.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/io/crypto/aes/CryptoAES.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java
 0
 0
 6
-
+
 org/apache/hadoop/hbase/io/encoding/CopyKeyDataBlockEncoder.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java
 0
 0
 15
-
+
 org/apache/hadoop/hbase/io/encoding/DataBlockEncoding.java
 0
 0
 10
-
+
 org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/io/encoding/HFileBlockDecodingContext.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/io/encoding/HFileBlockDefaultEncodingContext.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/io/encoding/HFileBlockEncodingContext.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/io/encoding/RowIndexCodecV1.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/io/encoding/RowIndexSeekerV1.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/io/hfile/BlockCache.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
 0
 0
 23
-
+
 org/apache/hadoop/hbase/io/hfile/BlockCachesIterator.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/io/hfile/BlockType.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/io/hfile/CacheConfig.java
 0
 0
 13
-
+
 org/apache/hadoop/hbase/io/hfile/CacheStats.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/io/hfile/CacheableDeserializer.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/io/hfile/CacheableDeserializerIdManager.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/io/hfile/ChecksumUtil.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/io/hfile/CompoundBloomFilter.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterBase.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/io/hfile/CorruptHFileException.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java
 0
 0
 14
-
+
 org/apache/hadoop/hbase/io/hfile/HFile.java
 0
 0
 37
-
+
 org/apache/hadoop/hbase/io/hfile/HFileBlock.java
 0
 0
-46
-
+47
+
 org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
 0
 0
 37
-
+
 org/apache/hadoop/hbase/io/hfile/HFileContext.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoder.java
 0
 0
 13
-
+
 org/apache/hadoop/hbase/io/hfile/HFileDataBlockEncoderImpl.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
 0
 0
 14
-
+
 org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
 0
 0
 52
-
+
 org/apache/hadoop/hbase/io/hfile/HFileScanner.java
 0
 0
 22
-
+
 org/apache/hadoop/hbase/io/hfile/HFileUtil.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java
 0
 0
 23
-
+
 org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
 0
 0
 9
-
+
 org/apache/hadoop/hbase/io/hfile/LruCachedBlock.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/io/hfile/LruCachedBlockQueue.java
 0
 0
 3
-
+
 

[34/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
index da54864..ec74d53 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
@@ -834,1240 +834,1241 @@
 826  } else {
 827// If we are not supposed to be 
using the cache, delete any existing cached location
 828// so it won't interfere.
-829metaCache.clearCache(tableName, 
row);
-830  }
-831
-832  // Query the meta region
-833  long pauseBase = this.pause;
-834  try {
-835Result regionInfoRow = null;
-836s.resetMvccReadPoint();
-837try (ReversedClientScanner rcs 
=
-838new 
ReversedClientScanner(conf, s, TableName.META_TABLE_NAME, this, 
rpcCallerFactory,
-839rpcControllerFactory, 
getMetaLookupPool(), metaReplicaCallTimeoutScanInMicroSecond)) {
-840  regionInfoRow = rcs.next();
-841}
-842
-843if (regionInfoRow == null) {
-844  throw new 
TableNotFoundException(tableName);
-845}
-846// convert the row result into 
the HRegionLocation we need!
-847RegionLocations locations = 
MetaTableAccessor.getRegionLocations(regionInfoRow);
-848if (locations == null || 
locations.getRegionLocation(replicaId) == null) {
-849  throw new 
IOException("HRegionInfo was null in " +
-850tableName + ", row=" + 
regionInfoRow);
-851}
-852HRegionInfo regionInfo = 
locations.getRegionLocation(replicaId).getRegionInfo();
-853if (regionInfo == null) {
-854  throw new 
IOException("HRegionInfo was null or empty in " +
-855TableName.META_TABLE_NAME + 
", row=" + regionInfoRow);
-856}
-857
-858// possible we got a region of a 
different table...
-859if 
(!regionInfo.getTable().equals(tableName)) {
-860  throw new 
TableNotFoundException(
-861"Table '" + tableName + 
"' was not found, got: " +
-862regionInfo.getTable() + 
".");
-863}
-864if (regionInfo.isSplit()) {
-865  throw new 
RegionOfflineException("the only available region for" +
-866" the required row is a split 
parent," +
-867" the daughters should be 
online soon: " +
-868
regionInfo.getRegionNameAsString());
-869}
-870if (regionInfo.isOffline()) {
-871  throw new 
RegionOfflineException("the region is offline, could" +
-872" be caused by a disable 
table call: " +
-873
regionInfo.getRegionNameAsString());
-874}
-875
-876ServerName serverName = 
locations.getRegionLocation(replicaId).getServerName();
-877if (serverName == null) {
-878  throw new 
NoServerForRegionException("No server address listed " +
-879"in " + 
TableName.META_TABLE_NAME + " for region " +
-880
regionInfo.getRegionNameAsString() + " containing row " +
-881Bytes.toStringBinary(row));
-882}
-883
-884if (isDeadServer(serverName)){
-885  throw new 
RegionServerStoppedException("hbase:meta says the region "+
-886  
regionInfo.getRegionNameAsString()+" is managed by the server " + serverName 
+
-887  ", but it is dead.");
-888}
-889// Instantiate the location
-890cacheLocation(tableName, 
locations);
-891return locations;
-892  } catch (TableNotFoundException e) 
{
-893// if we got this error, probably 
means the table just plain doesn't
-894// exist. rethrow the error 
immediately. this should always be coming
-895// from the HTable constructor.
-896throw e;
-897  } catch (IOException e) {
-898
ExceptionUtil.rethrowIfInterrupt(e);
-899if (e instanceof RemoteException) 
{
-900  e = 
((RemoteException)e).unwrapRemoteException();
-901}
-902if (e instanceof 
CallQueueTooBigException) {
-903  // Give a special check on 
CallQueueTooBigException, see #HBASE-17114
-904  pauseBase = 
this.pauseForCQTBE;
-905}
-906if (tries  maxAttempts - 1) 
{
-907  if (LOG.isDebugEnabled()) {
-908LOG.debug("locateRegionInMeta 
parentTable=" +
-909TableName.META_TABLE_NAME 
+ ", metaLocation=" +
-910  ", attempt=" + tries + " of 
" +
-911  maxAttempts + " 

[20/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockWritable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockWritable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockWritable.html
index 6414009..2ad3a12 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockWritable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockWritable.html
@@ -25,42 +25,42 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.hfile;
 019
-020import 
com.google.common.annotations.VisibleForTesting;
-021import 
com.google.common.base.Preconditions;
-022
-023import java.io.DataInputStream;
-024import java.io.DataOutput;
-025import java.io.DataOutputStream;
-026import java.io.IOException;
-027import java.io.InputStream;
-028import java.nio.ByteBuffer;
-029import 
java.util.concurrent.atomic.AtomicReference;
-030
-031import org.apache.commons.logging.Log;
-032import 
org.apache.commons.logging.LogFactory;
-033import 
org.apache.hadoop.fs.FSDataInputStream;
-034import 
org.apache.hadoop.fs.FSDataOutputStream;
-035import org.apache.hadoop.fs.Path;
-036import org.apache.hadoop.hbase.Cell;
-037import 
org.apache.hadoop.hbase.HConstants;
-038import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-039import 
org.apache.hadoop.hbase.fs.HFileSystem;
-040import 
org.apache.hadoop.hbase.io.ByteArrayOutputStream;
-041import 
org.apache.hadoop.hbase.io.ByteBuffInputStream;
-042import 
org.apache.hadoop.hbase.io.ByteBufferWriterDataOutputStream;
-043import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
-044import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-045import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
-046import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultDecodingContext;
-047import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
-048import 
org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
-049import 
org.apache.hadoop.hbase.nio.ByteBuff;
-050import 
org.apache.hadoop.hbase.nio.MultiByteBuff;
-051import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
-052import 
org.apache.hadoop.hbase.util.Bytes;
-053import 
org.apache.hadoop.hbase.util.ChecksumType;
-054import 
org.apache.hadoop.hbase.util.ClassSize;
-055import org.apache.hadoop.io.IOUtils;
+020import java.io.DataInputStream;
+021import java.io.DataOutput;
+022import java.io.DataOutputStream;
+023import java.io.IOException;
+024import java.io.InputStream;
+025import java.nio.ByteBuffer;
+026import 
java.util.concurrent.atomic.AtomicReference;
+027
+028import org.apache.commons.logging.Log;
+029import 
org.apache.commons.logging.LogFactory;
+030import 
org.apache.hadoop.fs.FSDataInputStream;
+031import 
org.apache.hadoop.fs.FSDataOutputStream;
+032import org.apache.hadoop.fs.Path;
+033import org.apache.hadoop.hbase.Cell;
+034import 
org.apache.hadoop.hbase.HConstants;
+035import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+036import 
org.apache.hadoop.hbase.fs.HFileSystem;
+037import 
org.apache.hadoop.hbase.io.ByteArrayOutputStream;
+038import 
org.apache.hadoop.hbase.io.ByteBuffInputStream;
+039import 
org.apache.hadoop.hbase.io.ByteBufferWriterDataOutputStream;
+040import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
+041import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+042import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
+043import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultDecodingContext;
+044import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
+045import 
org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
+046import 
org.apache.hadoop.hbase.nio.ByteBuff;
+047import 
org.apache.hadoop.hbase.nio.MultiByteBuff;
+048import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
+049import 
org.apache.hadoop.hbase.util.Bytes;
+050import 
org.apache.hadoop.hbase.util.ChecksumType;
+051import 
org.apache.hadoop.hbase.util.ClassSize;
+052import org.apache.hadoop.io.IOUtils;
+053
+054import 
com.google.common.annotations.VisibleForTesting;
+055import 
com.google.common.base.Preconditions;
 056
 057/**
 058 * Reads {@link HFile} version 2 blocks 
to HFiles and via {@link Cacheable} Interface to caches.
@@ -443,1645 +443,1656 @@
 435return nextBlockOnDiskSize;
 436  }
 437
-438  public BlockType getBlockType() {
-439return blockType;
-440  }
-441
-442  /** @return get data block encoding id 
that was used to encode this block */
-443  public short getDataBlockEncodingId() 
{
-444if (blockType != 
BlockType.ENCODED_DATA) {
-445  throw new 
IllegalArgumentException("Querying encoder ID of a block " +
-446  "of type other than " + 
BlockType.ENCODED_DATA + ": " + blockType);
-447}
-448return 

[29/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/src-html/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.html
index 33d2175..f9ef80a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.html
@@ -43,394 +43,410 @@
 035import org.apache.commons.logging.Log;
 036import 
org.apache.commons.logging.LogFactory;
 037import 
org.apache.hadoop.conf.Configuration;
-038import 
org.apache.hadoop.hbase.HRegionInfo;
-039import 
org.apache.hadoop.hbase.RegionLocations;
-040import 
org.apache.hadoop.hbase.TableName;
-041import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-042import 
org.apache.hadoop.hbase.client.ScannerCallable.MoreResults;
-043import 
org.apache.hadoop.hbase.util.Pair;
-044
-045/**
-046 * This class has the logic for handling 
scanners for regions with and without replicas.
-047 * 1. A scan is attempted on the default 
(primary) region
-048 * 2. The scanner sends all the RPCs to 
the default region until it is done, or, there
-049 * is a timeout on the default (a timeout 
of zero is disallowed).
-050 * 3. If there is a timeout in (2) above, 
scanner(s) is opened on the non-default replica(s)
-051 * 4. The results from the first 
successful scanner are taken, and it is stored which server
-052 * returned the results.
-053 * 5. The next RPCs are done on the above 
stored server until it is done or there is a timeout,
-054 * in which case, the other replicas are 
queried (as in (3) above).
-055 *
-056 */
-057@InterfaceAudience.Private
-058class ScannerCallableWithReplicas 
implements RetryingCallableResult[] {
-059  private static final Log LOG = 
LogFactory.getLog(ScannerCallableWithReplicas.class);
-060  volatile ScannerCallable 
currentScannerCallable;
-061  AtomicBoolean replicaSwitched = new 
AtomicBoolean(false);
-062  final ClusterConnection cConnection;
-063  protected final ExecutorService pool;
-064  protected final int 
timeBeforeReplicas;
-065  private final Scan scan;
-066  private final int retries;
-067  private Result lastResult;
-068  private final 
RpcRetryingCallerResult[] caller;
-069  private final TableName tableName;
-070  private Configuration conf;
-071  private int scannerTimeout;
-072  private SetScannerCallable 
outstandingCallables = new HashSet();
-073  private boolean someRPCcancelled = 
false; //required for testing purposes only
-074
-075  public 
ScannerCallableWithReplicas(TableName tableName, ClusterConnection 
cConnection,
-076  ScannerCallable baseCallable, 
ExecutorService pool, int timeBeforeReplicas, Scan scan,
-077  int retries, int scannerTimeout, 
int caching, Configuration conf,
-078  RpcRetryingCallerResult [] 
caller) {
-079this.currentScannerCallable = 
baseCallable;
-080this.cConnection = cConnection;
-081this.pool = pool;
-082if (timeBeforeReplicas  0) {
-083  throw new 
IllegalArgumentException("Invalid value of operation timeout on the 
primary");
-084}
-085this.timeBeforeReplicas = 
timeBeforeReplicas;
-086this.scan = scan;
-087this.retries = retries;
-088this.tableName = tableName;
-089this.conf = conf;
-090this.scannerTimeout = 
scannerTimeout;
-091this.caller = caller;
-092  }
-093
-094  public void setClose() {
-095currentScannerCallable.setClose();
-096  }
-097
-098  public void setRenew(boolean val) {
-099
currentScannerCallable.setRenew(val);
-100  }
-101
-102  public void setCaching(int caching) {
-103
currentScannerCallable.setCaching(caching);
-104  }
-105
-106  public int getCaching() {
-107return 
currentScannerCallable.getCaching();
-108  }
-109
-110  public HRegionInfo getHRegionInfo() {
-111return 
currentScannerCallable.getHRegionInfo();
-112  }
-113
-114  public MoreResults 
moreResultsInRegion() {
-115return 
currentScannerCallable.moreResultsInRegion();
-116  }
-117
-118  public MoreResults moreResultsForScan() 
{
-119return 
currentScannerCallable.moreResultsForScan();
-120  }
-121
-122  @Override
-123  public Result [] call(int timeout) 
throws IOException {
-124// If the active replica callable was 
closed somewhere, invoke the RPC to
-125// really close it. In the case of 
regular scanners, this applies. We make couple
-126// of RPCs to a RegionServer, and 
when that region is exhausted, we set
-127// the closed flag. Then an RPC is 
required to actually close the scanner.
-128if (currentScannerCallable != null 
 currentScannerCallable.closed) {
-129  // For closing we target that exact 
scanner (and not do replica fallback like in
-130  // the case of normal reads)
-131  if 

[49/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/checkstyle.rss
--
diff --git a/checkstyle.rss b/checkstyle.rss
index 07230c9..200d9bd 100644
--- a/checkstyle.rss
+++ b/checkstyle.rss
@@ -25,8 +25,8 @@ under the License.
 en-us
 2007 - 2017 The Apache Software Foundation
 
-  File: 2218,
- Errors: 14484,
+  File: 2223,
+ Errors: 14486,
  Warnings: 0,
  Infos: 0
   
@@ -2636,6 +2636,20 @@ under the License.
   
   
 
+  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.io.MetricsIOWrapperImpl.java;>org/apache/hadoop/hbase/io/MetricsIOWrapperImpl.java
+
+
+  0
+
+
+  0
+
+
+  0
+
+  
+  
+
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.ipc.IPCUtil.java;>org/apache/hadoop/hbase/ipc/IPCUtil.java
 
 
@@ -3798,6 +3812,20 @@ under the License.
   
   
 
+  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.io.MetricsIOWrapper.java;>org/apache/hadoop/hbase/io/MetricsIOWrapper.java
+
+
+  0
+
+
+  0
+
+
+  0
+
+  
+  
+
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.coprocessor.BaseRowProcessorEndpoint.java;>org/apache/hadoop/hbase/coprocessor/BaseRowProcessorEndpoint.java
 
 
@@ -7396,6 +7424,20 @@ under the License.
   
   
 
+  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.io.MetricsIOSourceImpl.java;>org/apache/hadoop/hbase/io/MetricsIOSourceImpl.java
+
+
+  0
+
+
+  0
+
+
+  0
+
+  
+  
+
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.types.StructIterator.java;>org/apache/hadoop/hbase/types/StructIterator.java
 
 
@@ -8530,6 +8572,20 @@ under the License.
   
   
 
+  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.io.MetricsIO.java;>org/apache/hadoop/hbase/io/MetricsIO.java
+
+
+  0
+
+
+  0
+
+
+  1
+
+  
+  
+
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.regionserver.CellSink.java;>org/apache/hadoop/hbase/regionserver/CellSink.java
 
 
@@ -12473,7 +12529,7 @@ under the License.
   0
 
 
-  46
+  47
 
   
   
@@ -17070,6 +17126,20 @@ under the License.
   
   
 
+  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.io.MetricsIOSource.java;>org/apache/hadoop/hbase/io/MetricsIOSource.java
+
+
+  0
+
+
+  0
+
+
+  0
+
+  
+  
+
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.master.RegionState.java;>org/apache/hadoop/hbase/master/RegionState.java
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/coc.html
--
diff --git a/coc.html b/coc.html
index 08b31cc..e65751d 100644
--- a/coc.html
+++ b/coc.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  
   Code of Conduct Policy
@@ -380,7 +380,7 @@ email to mailto:priv...@hbase.apache.org;>the priv
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-06-02
+  Last Published: 
2017-06-03
 
 
 


[28/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/src-html/org/apache/hadoop/hbase/io/MetricsIOSource.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/MetricsIOSource.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/MetricsIOSource.html
new file mode 100644
index 000..b074547
--- /dev/null
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/io/MetricsIOSource.html
@@ -0,0 +1,152 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+Source code
+
+
+
+
+001/**
+002 * Licensed to the Apache Software 
Foundation (ASF) under one
+003 * or more contributor license 
agreements.  See the NOTICE file
+004 * distributed with this work for 
additional information
+005 * regarding copyright ownership.  The 
ASF licenses this file
+006 * to you under the Apache License, 
Version 2.0 (the
+007 * "License"); you may not use this file 
except in compliance
+008 * with the License.  You may obtain a 
copy of the License at
+009 *
+010 * 
http://www.apache.org/licenses/LICENSE-2.0
+011 *
+012 * Unless required by applicable law or 
agreed to in writing, software
+013 * distributed under the License is 
distributed on an "AS IS" BASIS,
+014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+015 * See the License for the specific 
language governing permissions and
+016 * limitations under the License.
+017 */
+018
+019package org.apache.hadoop.hbase.io;
+020
+021import 
org.apache.hadoop.hbase.metrics.BaseSource;
+022
+023public interface MetricsIOSource extends 
BaseSource {
+024
+025  /**
+026   * The name of the metrics
+027   */
+028  String METRICS_NAME = "IO";
+029
+030  /**
+031   * The name of the metrics context that 
metrics will be under.
+032   */
+033  String METRICS_CONTEXT = 
"regionserver";
+034
+035  /**
+036   * Description
+037   */
+038  String METRICS_DESCRIPTION = "Metrics 
about FileSystem IO";
+039
+040  /**
+041   * The name of the metrics context that 
metrics will be under in jmx
+042   */
+043  String METRICS_JMX_CONTEXT = 
"RegionServer,sub=" + METRICS_NAME;
+044
+045
+046  String FS_READ_TIME_HISTO_KEY = 
"fsReadTime";
+047  String FS_PREAD_TIME_HISTO_KEY = 
"fsPReadTime";
+048  String FS_WRITE_HISTO_KEY = 
"fsWriteTime";
+049
+050  String CHECKSUM_FAILURES_KEY = 
"fsChecksumFailureCount";
+051
+052  String FS_READ_TIME_HISTO_DESC
+053= "Latency of HFile's sequential 
reads on this region server in milliseconds";
+054  String FS_PREAD_TIME_HISTO_DESC
+055= "Latency of HFile's positional 
reads on this region server in milliseconds";
+056  String FS_WRITE_TIME_HISTO_DESC
+057= "Latency of HFile's writes on this 
region server in milliseconds";
+058
+059  String CHECKSUM_FAILURES_DESC = "Number 
of checksum failures for the HBase HFile checksums at the"
+060  + " HBase level (separate from HDFS 
checksums)";
+061
+062
+063  /**
+064   * Update the fs sequential read time 
histogram
+065   * @param t time it took, in 
milliseconds
+066   */
+067  void updateFsReadTime(long t);
+068
+069  /**
+070   * Update the fs positional read time 
histogram
+071   * @param t time it took, in 
milliseconds
+072   */
+073  void updateFsPReadTime(long t);
+074
+075  /**
+076   * Update the fs write time histogram
+077   * @param t time it took, in 
milliseconds
+078   */
+079  void updateFsWriteTime(long t);
+080}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/src-html/org/apache/hadoop/hbase/io/MetricsIOSourceImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/MetricsIOSourceImpl.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/MetricsIOSourceImpl.html
new file mode 100644
index 000..11c20e6
--- /dev/null
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/io/MetricsIOSourceImpl.html
@@ -0,0 +1,156 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+Source code
+
+
+
+
+001/**
+002 * Licensed to the Apache Software 
Foundation (ASF) under one
+003 * or more contributor license 
agreements.  See the NOTICE file
+004 * distributed with this work for 
additional information
+005 * regarding copyright ownership.  The 
ASF licenses this file
+006 * to you under the Apache License, 
Version 2.0 (the
+007 * "License"); you may not use this file 
except in compliance
+008 * with the License.  You may obtain a 
copy of the License at
+009 *
+010 * 
http://www.apache.org/licenses/LICENSE-2.0
+011 *
+012 * Unless required by applicable law or 
agreed to in writing, software
+013 * distributed under the License is 
distributed on an "AS IS" BASIS,
+014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+015 * See the License for the specific 
language governing permissions and
+016 * 

[42/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockIterator.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockIterator.html 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockIterator.html
index 93168db..f476790 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockIterator.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockIterator.html
@@ -105,7 +105,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static interface HFileBlock.BlockIterator
+static interface HFileBlock.BlockIterator
 An interface allowing to iterate HFileBlocks.
 
 
@@ -159,7 +159,7 @@ var activeTableTab = "activeTableTab";
 
 
 nextBlock
-HFileBlocknextBlock()
+HFileBlocknextBlock()
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Get the next block, or null if there are no more blocks to 
iterate.
 
@@ -174,7 +174,7 @@ var activeTableTab = "activeTableTab";
 
 
 nextBlockWithBlockType
-HFileBlocknextBlockWithBlockType(BlockTypeblockType)
+HFileBlocknextBlockWithBlockType(BlockTypeblockType)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Similar to nextBlock()
 but checks block type, throws an
  exception if incorrect, and returns the HFile block

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockWritable.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockWritable.html 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockWritable.html
index a49df2b..39846ad 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockWritable.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockWritable.html
@@ -105,7 +105,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static interface HFileBlock.BlockWritable
+static interface HFileBlock.BlockWritable
 Something that can be written into a block.
 
 
@@ -158,7 +158,7 @@ var activeTableTab = "activeTableTab";
 
 
 getBlockType
-BlockTypegetBlockType()
+BlockTypegetBlockType()
 The type of block this data should use.
 
 
@@ -168,7 +168,7 @@ var activeTableTab = "activeTableTab";
 
 
 writeToBlock
-voidwriteToBlock(http://docs.oracle.com/javase/8/docs/api/java/io/DataOutput.html?is-external=true;
 title="class or interface in java.io">DataOutputout)
+voidwriteToBlock(http://docs.oracle.com/javase/8/docs/api/java/io/DataOutput.html?is-external=true;
 title="class or interface in java.io">DataOutputout)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Writes the block to the provided stream. Must not write any 
magic
  records.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html
index 7db9b41..d2adff0 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html
@@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static interface HFileBlock.FSReader
+static interface HFileBlock.FSReader
 A full-fledged reader with iteration ability.
 
 
@@ -156,9 +156,10 @@ var activeTableTab = "activeTableTab";
 
 
 HFileBlock
-readBlockData(longoffset,
+readBlockData(longoffset,
  longonDiskSize,
- booleanpread)
+ booleanpread,
+ booleanupdateMetrics)
 Reads the block at the given offset in the file with the 
given on-disk
  size and uncompressed size.
 
@@ -186,15 +187,16 @@ var activeTableTab = "activeTableTab";
 
 
 Method Detail
-
+
 
 
 
 
 readBlockData
-HFileBlockreadBlockData(longoffset,
+HFileBlockreadBlockData(longoffset,
  longonDiskSize,
- booleanpread)
+ booleanpread,
+ booleanupdateMetrics)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Reads the block at the given offset in the file with the 
given on-disk
  size and uncompressed size.
@@ -216,7 +218,7 @@ var activeTableTab = "activeTableTab";
 
 
 blockRange
-HFileBlock.BlockIteratorblockRange(longstartOffset,

[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.CachingBlockReader.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.CachingBlockReader.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.CachingBlockReader.html
index b6c2fe3..1765903 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.CachingBlockReader.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.CachingBlockReader.html
@@ -60,892 +60,917 @@
 052import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
 053import 
org.apache.hadoop.hbase.fs.HFileSystem;
 054import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
-055import 
org.apache.hadoop.hbase.io.compress.Compression;
-056import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-057import 
org.apache.hadoop.hbase.protobuf.ProtobufMagic;
-058import 
org.apache.hadoop.hbase.regionserver.CellSink;
-059import 
org.apache.hadoop.hbase.regionserver.ShipperListener;
-060import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
-061import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-062import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-063import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.BytesBytesPair;
-064import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HFileProtos;
-065import 
org.apache.hadoop.hbase.util.BloomFilterWriter;
-066import 
org.apache.hadoop.hbase.util.Bytes;
-067import 
org.apache.hadoop.hbase.util.FSUtils;
-068import org.apache.hadoop.io.Writable;
-069
-070import 
com.google.common.annotations.VisibleForTesting;
-071import 
com.google.common.base.Preconditions;
-072
-073/**
-074 * File format for hbase.
-075 * A file of sorted key/value pairs. Both 
keys and values are byte arrays.
-076 * p
-077 * The memory footprint of a HFile 
includes the following (below is taken from the
-078 * a
-079 * 
href=https://issues.apache.org/jira/browse/HADOOP-3315TFile/a; 
documentation
-080 * but applies also to HFile):
-081 * ul
-082 * liSome constant overhead of 
reading or writing a compressed block.
+055import 
org.apache.hadoop.hbase.io.MetricsIO;
+056import 
org.apache.hadoop.hbase.io.MetricsIOWrapperImpl;
+057import 
org.apache.hadoop.hbase.io.compress.Compression;
+058import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+059import 
org.apache.hadoop.hbase.protobuf.ProtobufMagic;
+060import 
org.apache.hadoop.hbase.regionserver.CellSink;
+061import 
org.apache.hadoop.hbase.regionserver.ShipperListener;
+062import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
+063import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+064import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
+065import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.BytesBytesPair;
+066import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HFileProtos;
+067import 
org.apache.hadoop.hbase.util.BloomFilterWriter;
+068import 
org.apache.hadoop.hbase.util.Bytes;
+069import 
org.apache.hadoop.hbase.util.FSUtils;
+070import org.apache.hadoop.io.Writable;
+071
+072import 
com.google.common.annotations.VisibleForTesting;
+073import 
com.google.common.base.Preconditions;
+074
+075/**
+076 * File format for hbase.
+077 * A file of sorted key/value pairs. Both 
keys and values are byte arrays.
+078 * p
+079 * The memory footprint of a HFile 
includes the following (below is taken from the
+080 * a
+081 * 
href=https://issues.apache.org/jira/browse/HADOOP-3315TFile/a; 
documentation
+082 * but applies also to HFile):
 083 * ul
-084 * liEach compressed block 
requires one compression/decompression codec for
-085 * I/O.
-086 * liTemporary space to buffer 
the key.
-087 * liTemporary space to buffer 
the value.
-088 * /ul
-089 * liHFile index, which is 
proportional to the total number of Data Blocks.
-090 * The total amount of memory needed to 
hold the index can be estimated as
-091 * (56+AvgKeySize)*NumBlocks.
-092 * /ul
-093 * Suggestions on performance 
optimization.
-094 * ul
-095 * liMinimum block size. We 
recommend a setting of minimum block size between
-096 * 8KB to 1MB for general usage. Larger 
block size is preferred if files are
-097 * primarily for sequential access. 
However, it would lead to inefficient random
-098 * access (because there are more data to 
decompress). Smaller blocks are good
-099 * for random access, but require more 
memory to hold the block index, and may
-100 * be slower to create (because we must 
flush the compressor stream at the
-101 * conclusion of each data block, which 
leads to an FS I/O flush). Further, due
-102 * to the internal caching in Compression 
codec, the smallest possible block
-103 * size would be around 20KB-30KB.
-104 * liThe current implementation 
does not offer true 

[30/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/src-html/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.RetryingRPC.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.RetryingRPC.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.RetryingRPC.html
index 33d2175..f9ef80a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.RetryingRPC.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.RetryingRPC.html
@@ -43,394 +43,410 @@
 035import org.apache.commons.logging.Log;
 036import 
org.apache.commons.logging.LogFactory;
 037import 
org.apache.hadoop.conf.Configuration;
-038import 
org.apache.hadoop.hbase.HRegionInfo;
-039import 
org.apache.hadoop.hbase.RegionLocations;
-040import 
org.apache.hadoop.hbase.TableName;
-041import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-042import 
org.apache.hadoop.hbase.client.ScannerCallable.MoreResults;
-043import 
org.apache.hadoop.hbase.util.Pair;
-044
-045/**
-046 * This class has the logic for handling 
scanners for regions with and without replicas.
-047 * 1. A scan is attempted on the default 
(primary) region
-048 * 2. The scanner sends all the RPCs to 
the default region until it is done, or, there
-049 * is a timeout on the default (a timeout 
of zero is disallowed).
-050 * 3. If there is a timeout in (2) above, 
scanner(s) is opened on the non-default replica(s)
-051 * 4. The results from the first 
successful scanner are taken, and it is stored which server
-052 * returned the results.
-053 * 5. The next RPCs are done on the above 
stored server until it is done or there is a timeout,
-054 * in which case, the other replicas are 
queried (as in (3) above).
-055 *
-056 */
-057@InterfaceAudience.Private
-058class ScannerCallableWithReplicas 
implements RetryingCallableResult[] {
-059  private static final Log LOG = 
LogFactory.getLog(ScannerCallableWithReplicas.class);
-060  volatile ScannerCallable 
currentScannerCallable;
-061  AtomicBoolean replicaSwitched = new 
AtomicBoolean(false);
-062  final ClusterConnection cConnection;
-063  protected final ExecutorService pool;
-064  protected final int 
timeBeforeReplicas;
-065  private final Scan scan;
-066  private final int retries;
-067  private Result lastResult;
-068  private final 
RpcRetryingCallerResult[] caller;
-069  private final TableName tableName;
-070  private Configuration conf;
-071  private int scannerTimeout;
-072  private SetScannerCallable 
outstandingCallables = new HashSet();
-073  private boolean someRPCcancelled = 
false; //required for testing purposes only
-074
-075  public 
ScannerCallableWithReplicas(TableName tableName, ClusterConnection 
cConnection,
-076  ScannerCallable baseCallable, 
ExecutorService pool, int timeBeforeReplicas, Scan scan,
-077  int retries, int scannerTimeout, 
int caching, Configuration conf,
-078  RpcRetryingCallerResult [] 
caller) {
-079this.currentScannerCallable = 
baseCallable;
-080this.cConnection = cConnection;
-081this.pool = pool;
-082if (timeBeforeReplicas  0) {
-083  throw new 
IllegalArgumentException("Invalid value of operation timeout on the 
primary");
-084}
-085this.timeBeforeReplicas = 
timeBeforeReplicas;
-086this.scan = scan;
-087this.retries = retries;
-088this.tableName = tableName;
-089this.conf = conf;
-090this.scannerTimeout = 
scannerTimeout;
-091this.caller = caller;
-092  }
-093
-094  public void setClose() {
-095currentScannerCallable.setClose();
-096  }
-097
-098  public void setRenew(boolean val) {
-099
currentScannerCallable.setRenew(val);
-100  }
-101
-102  public void setCaching(int caching) {
-103
currentScannerCallable.setCaching(caching);
-104  }
-105
-106  public int getCaching() {
-107return 
currentScannerCallable.getCaching();
-108  }
-109
-110  public HRegionInfo getHRegionInfo() {
-111return 
currentScannerCallable.getHRegionInfo();
-112  }
-113
-114  public MoreResults 
moreResultsInRegion() {
-115return 
currentScannerCallable.moreResultsInRegion();
-116  }
-117
-118  public MoreResults moreResultsForScan() 
{
-119return 
currentScannerCallable.moreResultsForScan();
-120  }
-121
-122  @Override
-123  public Result [] call(int timeout) 
throws IOException {
-124// If the active replica callable was 
closed somewhere, invoke the RPC to
-125// really close it. In the case of 
regular scanners, this applies. We make couple
-126// of RPCs to a RegionServer, and 
when that region is exhausted, we set
-127// the closed flag. Then an RPC is 
required to actually close the scanner.
-128if (currentScannerCallable != null 
 currentScannerCallable.closed) {
-129  // For closing we target that exact 
scanner (and not do replica fallback 

[45/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/org/apache/hadoop/hbase/io/MetricsIOSource.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/io/MetricsIOSource.html 
b/devapidocs/org/apache/hadoop/hbase/io/MetricsIOSource.html
new file mode 100644
index 000..8c2ab37
--- /dev/null
+++ b/devapidocs/org/apache/hadoop/hbase/io/MetricsIOSource.html
@@ -0,0 +1,531 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+MetricsIOSource (Apache HBase 2.0.0-SNAPSHOT API)
+
+
+
+
+
+var methods = {"i0":6,"i1":6,"i2":6};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.io
+Interface 
MetricsIOSource
+
+
+
+
+
+
+All Superinterfaces:
+BaseSource
+
+
+All Known Implementing Classes:
+MetricsIOSourceImpl
+
+
+
+public interface MetricsIOSource
+extends BaseSource
+
+
+
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields
+
+Modifier and Type
+Field and Description
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+CHECKSUM_FAILURES_DESC
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+CHECKSUM_FAILURES_KEY
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+FS_PREAD_TIME_HISTO_DESC
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+FS_PREAD_TIME_HISTO_KEY
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+FS_READ_TIME_HISTO_DESC
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+FS_READ_TIME_HISTO_KEY
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+FS_WRITE_HISTO_KEY
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+FS_WRITE_TIME_HISTO_DESC
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+METRICS_CONTEXT
+The name of the metrics context that metrics will be 
under.
+
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+METRICS_DESCRIPTION
+Description
+
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+METRICS_JMX_CONTEXT
+The name of the metrics context that metrics will be under 
in jmx
+
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+METRICS_NAME
+The name of the metrics
+
+
+
+
+
+
+
+Fields inherited from interfaceorg.apache.hadoop.hbase.metrics.BaseSource
+HBASE_METRICS_SYSTEM_NAME
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsInstance MethodsAbstract Methods
+
+Modifier and Type
+Method and Description
+
+
+void
+updateFsPReadTime(longt)
+Update the fs positional read time histogram
+
+
+
+void
+updateFsReadTime(longt)
+Update the fs sequential read time histogram
+
+
+
+void
+updateFsWriteTime(longt)
+Update the fs write time histogram
+
+
+
+
+
+
+
+Methods inherited from interfaceorg.apache.hadoop.hbase.metrics.BaseSource
+decGauge,
 getMetricRegistryInfo,
 getMetricsContext,
 getMetricsDescription,
 getMetricsJmxContext,
 getMetricsName,
 incCounters,
 incGauge,
 init, removeMetric,
 setGauge,
 updateHistogram
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field Detail
+
+
+
+
+
+METRICS_NAME
+static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 

[43/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/org/apache/hadoop/hbase/io/hfile/HFile.Reader.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/io/hfile/HFile.Reader.html 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/HFile.Reader.html
index 78efb66..d761220 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/hfile/HFile.Reader.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/HFile.Reader.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static interface HFile.Reader
+public static interface HFile.Reader
 extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html?is-external=true;
 title="class or interface in java.io">Closeable, HFile.CachingBlockReader
 An interface used by clients to open and iterate an HFile.
 
@@ -305,7 +305,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
 
 
 getName
-http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetName()
+http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetName()
 Returns this reader's "name". Usually the last component of 
the path.
  Needs to be constant as the file is being moved to support caching on
  write.
@@ -317,7 +317,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
 
 
 getComparator
-CellComparatorgetComparator()
+CellComparatorgetComparator()
 
 
 
@@ -326,7 +326,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
 
 
 getScanner
-HFileScannergetScanner(booleancacheBlocks,
+HFileScannergetScanner(booleancacheBlocks,
 booleanpread,
 booleanisCompaction)
 
@@ -337,7 +337,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
 
 
 getMetaBlock
-HFileBlockgetMetaBlock(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringmetaBlockName,
+HFileBlockgetMetaBlock(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringmetaBlockName,
 booleancacheBlock)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
@@ -352,7 +352,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
 
 
 loadFileInfo
-http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],byte[]loadFileInfo()
+http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],byte[]loadFileInfo()
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:
@@ -366,7 +366,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
 
 
 getLastKey
-CellgetLastKey()
+CellgetLastKey()
 
 
 
@@ -375,7 +375,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
 
 
 midkey
-Cellmidkey()
+Cellmidkey()
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:
@@ -389,7 +389,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
 
 
 length
-longlength()
+longlength()
 
 
 
@@ -398,7 +398,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
 
 
 getEntries
-longgetEntries()
+longgetEntries()
 
 
 
@@ -407,7 +407,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
 
 
 getFirstKey
-CellgetFirstKey()
+CellgetFirstKey()
 
 
 
@@ -416,7 +416,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
 
 
 indexSize
-longindexSize()
+longindexSize()
 
 
 
@@ -425,7 +425,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
 
 
 getFirstRowKey
-byte[]getFirstRowKey()
+byte[]getFirstRowKey()
 
 
 
@@ -434,7 +434,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
 
 
 getLastRowKey
-byte[]getLastRowKey()
+byte[]getLastRowKey()
 
 
 
@@ -443,7 +443,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
 
 
 getTrailer
-FixedFileTrailergetTrailer()
+FixedFileTrailergetTrailer()
 
 
 
@@ -452,7 +452,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
 
 
 getDataBlockIndexReader
-HFileBlockIndex.BlockIndexReadergetDataBlockIndexReader()
+HFileBlockIndex.BlockIndexReadergetDataBlockIndexReader()
 
 
 
@@ -461,7 +461,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
 
 
 getScanner

[39/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/org/apache/hadoop/hbase/metrics/class-use/BaseSourceImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/metrics/class-use/BaseSourceImpl.html 
b/devapidocs/org/apache/hadoop/hbase/metrics/class-use/BaseSourceImpl.html
index 39b5020..2e8565c 100644
--- a/devapidocs/org/apache/hadoop/hbase/metrics/class-use/BaseSourceImpl.html
+++ b/devapidocs/org/apache/hadoop/hbase/metrics/class-use/BaseSourceImpl.html
@@ -83,51 +83,55 @@
 
 
 
+org.apache.hadoop.hbase.io
+
+
+
 org.apache.hadoop.hbase.ipc
 
 Tools to help define network clients and servers.
 
 
-
+
 org.apache.hadoop.hbase.master
 
 
-
+
 org.apache.hadoop.hbase.master.balancer
 
 
-
+
 org.apache.hadoop.hbase.metrics
 
 Metrics API for HBase.
 
 
-
+
 org.apache.hadoop.hbase.regionserver
 
 
-
+
 org.apache.hadoop.hbase.regionserver.wal
 
 
-
+
 org.apache.hadoop.hbase.replication.regionserver
 
 
-
+
 org.apache.hadoop.hbase.rest
 
 HBase REST
 
 
-
+
 org.apache.hadoop.hbase.thrift
 
 Provides an HBase http://incubator.apache.org/thrift/;>Thrift
 service.
 
 
-
+
 org.apache.hadoop.hbase.zookeeper
 
 
@@ -136,6 +140,24 @@ service.
 
 
 
+
+
+
+Uses of BaseSourceImpl in org.apache.hadoop.hbase.io
+
+Subclasses of BaseSourceImpl in org.apache.hadoop.hbase.io
+
+Modifier and Type
+Class and Description
+
+
+
+class
+MetricsIOSourceImpl
+
+
+
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/org/apache/hadoop/hbase/metrics/package-use.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/metrics/package-use.html 
b/devapidocs/org/apache/hadoop/hbase/metrics/package-use.html
index eca4b48..b21ae93 100644
--- a/devapidocs/org/apache/hadoop/hbase/metrics/package-use.html
+++ b/devapidocs/org/apache/hadoop/hbase/metrics/package-use.html
@@ -93,65 +93,69 @@
 
 
 
+org.apache.hadoop.hbase.io
+
+
+
 org.apache.hadoop.hbase.ipc
 
 Tools to help define network clients and servers.
 
 
-
+
 org.apache.hadoop.hbase.master
 
 
-
+
 org.apache.hadoop.hbase.master.balancer
 
 
-
+
 org.apache.hadoop.hbase.metrics
 
 Metrics API for HBase.
 
 
-
+
 org.apache.hadoop.hbase.metrics.impl
 
 Implementation of the HBase Metrics framework.
 
 
-
+
 org.apache.hadoop.hbase.regionserver
 
 
-
+
 org.apache.hadoop.hbase.regionserver.wal
 
 
-
+
 org.apache.hadoop.hbase.replication.regionserver
 
 
-
+
 org.apache.hadoop.hbase.rest
 
 HBase REST
 
 
-
+
 org.apache.hadoop.hbase.thrift
 
 Provides an HBase http://incubator.apache.org/thrift/;>Thrift
 service.
 
 
-
+
 org.apache.hadoop.hbase.util
 
 
-
+
 org.apache.hadoop.hbase.zookeeper
 
 
-
+
 org.apache.hadoop.metrics2.lib
 
 
@@ -202,6 +206,28 @@ service.
 
 
 
+
+
+
+
+Classes in org.apache.hadoop.hbase.metrics
 used by org.apache.hadoop.hbase.io
+
+Class and Description
+
+
+
+BaseSource
+BaseSource for dynamic metrics to announce to 
Metrics2.
+
+
+
+BaseSourceImpl
+Hadoop 2 implementation of BaseSource (using metrics2 
framework).
+
+
+
+
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/org/apache/hadoop/hbase/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/package-tree.html
index 1987c2e..93ef0c8 100644
--- a/devapidocs/org/apache/hadoop/hbase/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/package-tree.html
@@ -422,16 +422,16 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.MemoryCompactionPolicy
-org.apache.hadoop.hbase.HConstants.Modify
-org.apache.hadoop.hbase.CompatibilitySingletonFactory.SingletonStorage
 org.apache.hadoop.hbase.KeepDeletedCells
+org.apache.hadoop.hbase.MetaTableAccessor.QueryType
 org.apache.hadoop.hbase.HealthChecker.HealthCheckerExitStatus
-org.apache.hadoop.hbase.Coprocessor.State
-org.apache.hadoop.hbase.KeyValue.Type
 org.apache.hadoop.hbase.HConstants.OperationStatusCode
+org.apache.hadoop.hbase.Coprocessor.State
+org.apache.hadoop.hbase.CompatibilitySingletonFactory.SingletonStorage
+org.apache.hadoop.hbase.HConstants.Modify
 org.apache.hadoop.hbase.ProcedureState
-org.apache.hadoop.hbase.MetaTableAccessor.QueryType
+org.apache.hadoop.hbase.KeyValue.Type
+org.apache.hadoop.hbase.MemoryCompactionPolicy
 
 
 


[32/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
index da54864..ec74d53 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
@@ -834,1240 +834,1241 @@
 826  } else {
 827// If we are not supposed to be 
using the cache, delete any existing cached location
 828// so it won't interfere.
-829metaCache.clearCache(tableName, 
row);
-830  }
-831
-832  // Query the meta region
-833  long pauseBase = this.pause;
-834  try {
-835Result regionInfoRow = null;
-836s.resetMvccReadPoint();
-837try (ReversedClientScanner rcs 
=
-838new 
ReversedClientScanner(conf, s, TableName.META_TABLE_NAME, this, 
rpcCallerFactory,
-839rpcControllerFactory, 
getMetaLookupPool(), metaReplicaCallTimeoutScanInMicroSecond)) {
-840  regionInfoRow = rcs.next();
-841}
-842
-843if (regionInfoRow == null) {
-844  throw new 
TableNotFoundException(tableName);
-845}
-846// convert the row result into 
the HRegionLocation we need!
-847RegionLocations locations = 
MetaTableAccessor.getRegionLocations(regionInfoRow);
-848if (locations == null || 
locations.getRegionLocation(replicaId) == null) {
-849  throw new 
IOException("HRegionInfo was null in " +
-850tableName + ", row=" + 
regionInfoRow);
-851}
-852HRegionInfo regionInfo = 
locations.getRegionLocation(replicaId).getRegionInfo();
-853if (regionInfo == null) {
-854  throw new 
IOException("HRegionInfo was null or empty in " +
-855TableName.META_TABLE_NAME + 
", row=" + regionInfoRow);
-856}
-857
-858// possible we got a region of a 
different table...
-859if 
(!regionInfo.getTable().equals(tableName)) {
-860  throw new 
TableNotFoundException(
-861"Table '" + tableName + 
"' was not found, got: " +
-862regionInfo.getTable() + 
".");
-863}
-864if (regionInfo.isSplit()) {
-865  throw new 
RegionOfflineException("the only available region for" +
-866" the required row is a split 
parent," +
-867" the daughters should be 
online soon: " +
-868
regionInfo.getRegionNameAsString());
-869}
-870if (regionInfo.isOffline()) {
-871  throw new 
RegionOfflineException("the region is offline, could" +
-872" be caused by a disable 
table call: " +
-873
regionInfo.getRegionNameAsString());
-874}
-875
-876ServerName serverName = 
locations.getRegionLocation(replicaId).getServerName();
-877if (serverName == null) {
-878  throw new 
NoServerForRegionException("No server address listed " +
-879"in " + 
TableName.META_TABLE_NAME + " for region " +
-880
regionInfo.getRegionNameAsString() + " containing row " +
-881Bytes.toStringBinary(row));
-882}
-883
-884if (isDeadServer(serverName)){
-885  throw new 
RegionServerStoppedException("hbase:meta says the region "+
-886  
regionInfo.getRegionNameAsString()+" is managed by the server " + serverName 
+
-887  ", but it is dead.");
-888}
-889// Instantiate the location
-890cacheLocation(tableName, 
locations);
-891return locations;
-892  } catch (TableNotFoundException e) 
{
-893// if we got this error, probably 
means the table just plain doesn't
-894// exist. rethrow the error 
immediately. this should always be coming
-895// from the HTable constructor.
-896throw e;
-897  } catch (IOException e) {
-898
ExceptionUtil.rethrowIfInterrupt(e);
-899if (e instanceof RemoteException) 
{
-900  e = 
((RemoteException)e).unwrapRemoteException();
-901}
-902if (e instanceof 
CallQueueTooBigException) {
-903  // Give a special check on 
CallQueueTooBigException, see #HBASE-17114
-904  pauseBase = 
this.pauseForCQTBE;
-905}
-906if (tries  maxAttempts - 1) 
{
-907  if (LOG.isDebugEnabled()) {
-908LOG.debug("locateRegionInMeta 
parentTable=" +
-909TableName.META_TABLE_NAME 
+ ", metaLocation=" +
-910  ", attempt=" + tries + " of 
" +
-911  maxAttempts + " failed; 
retrying after sleep of " +
-912  
ConnectionUtils.getPauseTime(pauseBase, tries) + " because: " + 
e.getMessage());
-913  }
-914  

[36/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
index da54864..ec74d53 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
@@ -834,1240 +834,1241 @@
 826  } else {
 827// If we are not supposed to be 
using the cache, delete any existing cached location
 828// so it won't interfere.
-829metaCache.clearCache(tableName, 
row);
-830  }
-831
-832  // Query the meta region
-833  long pauseBase = this.pause;
-834  try {
-835Result regionInfoRow = null;
-836s.resetMvccReadPoint();
-837try (ReversedClientScanner rcs 
=
-838new 
ReversedClientScanner(conf, s, TableName.META_TABLE_NAME, this, 
rpcCallerFactory,
-839rpcControllerFactory, 
getMetaLookupPool(), metaReplicaCallTimeoutScanInMicroSecond)) {
-840  regionInfoRow = rcs.next();
-841}
-842
-843if (regionInfoRow == null) {
-844  throw new 
TableNotFoundException(tableName);
-845}
-846// convert the row result into 
the HRegionLocation we need!
-847RegionLocations locations = 
MetaTableAccessor.getRegionLocations(regionInfoRow);
-848if (locations == null || 
locations.getRegionLocation(replicaId) == null) {
-849  throw new 
IOException("HRegionInfo was null in " +
-850tableName + ", row=" + 
regionInfoRow);
-851}
-852HRegionInfo regionInfo = 
locations.getRegionLocation(replicaId).getRegionInfo();
-853if (regionInfo == null) {
-854  throw new 
IOException("HRegionInfo was null or empty in " +
-855TableName.META_TABLE_NAME + 
", row=" + regionInfoRow);
-856}
-857
-858// possible we got a region of a 
different table...
-859if 
(!regionInfo.getTable().equals(tableName)) {
-860  throw new 
TableNotFoundException(
-861"Table '" + tableName + 
"' was not found, got: " +
-862regionInfo.getTable() + 
".");
-863}
-864if (regionInfo.isSplit()) {
-865  throw new 
RegionOfflineException("the only available region for" +
-866" the required row is a split 
parent," +
-867" the daughters should be 
online soon: " +
-868
regionInfo.getRegionNameAsString());
-869}
-870if (regionInfo.isOffline()) {
-871  throw new 
RegionOfflineException("the region is offline, could" +
-872" be caused by a disable 
table call: " +
-873
regionInfo.getRegionNameAsString());
-874}
-875
-876ServerName serverName = 
locations.getRegionLocation(replicaId).getServerName();
-877if (serverName == null) {
-878  throw new 
NoServerForRegionException("No server address listed " +
-879"in " + 
TableName.META_TABLE_NAME + " for region " +
-880
regionInfo.getRegionNameAsString() + " containing row " +
-881Bytes.toStringBinary(row));
-882}
-883
-884if (isDeadServer(serverName)){
-885  throw new 
RegionServerStoppedException("hbase:meta says the region "+
-886  
regionInfo.getRegionNameAsString()+" is managed by the server " + serverName 
+
-887  ", but it is dead.");
-888}
-889// Instantiate the location
-890cacheLocation(tableName, 
locations);
-891return locations;
-892  } catch (TableNotFoundException e) 
{
-893// if we got this error, probably 
means the table just plain doesn't
-894// exist. rethrow the error 
immediately. this should always be coming
-895// from the HTable constructor.
-896throw e;
-897  } catch (IOException e) {
-898
ExceptionUtil.rethrowIfInterrupt(e);
-899if (e instanceof RemoteException) 
{
-900  e = 
((RemoteException)e).unwrapRemoteException();
-901}
-902if (e instanceof 
CallQueueTooBigException) {
-903  // Give a special check on 
CallQueueTooBigException, see #HBASE-17114
-904  pauseBase = 
this.pauseForCQTBE;
-905}
-906if (tries  maxAttempts - 1) 
{
-907  if (LOG.isDebugEnabled()) {
-908LOG.debug("locateRegionInMeta 
parentTable=" +
-909TableName.META_TABLE_NAME 
+ ", metaLocation=" +
-910  ", attempt=" + tries + " of 
" +
-911  maxAttempts + " failed; 
retrying after sleep of " +
-912  

[33/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.html
index da54864..ec74d53 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.html
@@ -834,1240 +834,1241 @@
 826  } else {
 827// If we are not supposed to be 
using the cache, delete any existing cached location
 828// so it won't interfere.
-829metaCache.clearCache(tableName, 
row);
-830  }
-831
-832  // Query the meta region
-833  long pauseBase = this.pause;
-834  try {
-835Result regionInfoRow = null;
-836s.resetMvccReadPoint();
-837try (ReversedClientScanner rcs 
=
-838new 
ReversedClientScanner(conf, s, TableName.META_TABLE_NAME, this, 
rpcCallerFactory,
-839rpcControllerFactory, 
getMetaLookupPool(), metaReplicaCallTimeoutScanInMicroSecond)) {
-840  regionInfoRow = rcs.next();
-841}
-842
-843if (regionInfoRow == null) {
-844  throw new 
TableNotFoundException(tableName);
-845}
-846// convert the row result into 
the HRegionLocation we need!
-847RegionLocations locations = 
MetaTableAccessor.getRegionLocations(regionInfoRow);
-848if (locations == null || 
locations.getRegionLocation(replicaId) == null) {
-849  throw new 
IOException("HRegionInfo was null in " +
-850tableName + ", row=" + 
regionInfoRow);
-851}
-852HRegionInfo regionInfo = 
locations.getRegionLocation(replicaId).getRegionInfo();
-853if (regionInfo == null) {
-854  throw new 
IOException("HRegionInfo was null or empty in " +
-855TableName.META_TABLE_NAME + 
", row=" + regionInfoRow);
-856}
-857
-858// possible we got a region of a 
different table...
-859if 
(!regionInfo.getTable().equals(tableName)) {
-860  throw new 
TableNotFoundException(
-861"Table '" + tableName + 
"' was not found, got: " +
-862regionInfo.getTable() + 
".");
-863}
-864if (regionInfo.isSplit()) {
-865  throw new 
RegionOfflineException("the only available region for" +
-866" the required row is a split 
parent," +
-867" the daughters should be 
online soon: " +
-868
regionInfo.getRegionNameAsString());
-869}
-870if (regionInfo.isOffline()) {
-871  throw new 
RegionOfflineException("the region is offline, could" +
-872" be caused by a disable 
table call: " +
-873
regionInfo.getRegionNameAsString());
-874}
-875
-876ServerName serverName = 
locations.getRegionLocation(replicaId).getServerName();
-877if (serverName == null) {
-878  throw new 
NoServerForRegionException("No server address listed " +
-879"in " + 
TableName.META_TABLE_NAME + " for region " +
-880
regionInfo.getRegionNameAsString() + " containing row " +
-881Bytes.toStringBinary(row));
-882}
-883
-884if (isDeadServer(serverName)){
-885  throw new 
RegionServerStoppedException("hbase:meta says the region "+
-886  
regionInfo.getRegionNameAsString()+" is managed by the server " + serverName 
+
-887  ", but it is dead.");
-888}
-889// Instantiate the location
-890cacheLocation(tableName, 
locations);
-891return locations;
-892  } catch (TableNotFoundException e) 
{
-893// if we got this error, probably 
means the table just plain doesn't
-894// exist. rethrow the error 
immediately. this should always be coming
-895// from the HTable constructor.
-896throw e;
-897  } catch (IOException e) {
-898
ExceptionUtil.rethrowIfInterrupt(e);
-899if (e instanceof RemoteException) 
{
-900  e = 
((RemoteException)e).unwrapRemoteException();
-901}
-902if (e instanceof 
CallQueueTooBigException) {
-903  // Give a special check on 
CallQueueTooBigException, see #HBASE-17114
-904  pauseBase = 
this.pauseForCQTBE;
-905}
-906if (tries  maxAttempts - 1) 
{
-907  if (LOG.isDebugEnabled()) {
-908LOG.debug("locateRegionInMeta 
parentTable=" +
-909TableName.META_TABLE_NAME 
+ ", metaLocation=" +
-910  ", attempt=" + tries + " of 
" +
-911  maxAttempts + " failed; 
retrying after sleep of " +
-912  

[18/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html
index 6414009..2ad3a12 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html
@@ -25,42 +25,42 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.hfile;
 019
-020import 
com.google.common.annotations.VisibleForTesting;
-021import 
com.google.common.base.Preconditions;
-022
-023import java.io.DataInputStream;
-024import java.io.DataOutput;
-025import java.io.DataOutputStream;
-026import java.io.IOException;
-027import java.io.InputStream;
-028import java.nio.ByteBuffer;
-029import 
java.util.concurrent.atomic.AtomicReference;
-030
-031import org.apache.commons.logging.Log;
-032import 
org.apache.commons.logging.LogFactory;
-033import 
org.apache.hadoop.fs.FSDataInputStream;
-034import 
org.apache.hadoop.fs.FSDataOutputStream;
-035import org.apache.hadoop.fs.Path;
-036import org.apache.hadoop.hbase.Cell;
-037import 
org.apache.hadoop.hbase.HConstants;
-038import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-039import 
org.apache.hadoop.hbase.fs.HFileSystem;
-040import 
org.apache.hadoop.hbase.io.ByteArrayOutputStream;
-041import 
org.apache.hadoop.hbase.io.ByteBuffInputStream;
-042import 
org.apache.hadoop.hbase.io.ByteBufferWriterDataOutputStream;
-043import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
-044import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-045import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
-046import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultDecodingContext;
-047import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
-048import 
org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
-049import 
org.apache.hadoop.hbase.nio.ByteBuff;
-050import 
org.apache.hadoop.hbase.nio.MultiByteBuff;
-051import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
-052import 
org.apache.hadoop.hbase.util.Bytes;
-053import 
org.apache.hadoop.hbase.util.ChecksumType;
-054import 
org.apache.hadoop.hbase.util.ClassSize;
-055import org.apache.hadoop.io.IOUtils;
+020import java.io.DataInputStream;
+021import java.io.DataOutput;
+022import java.io.DataOutputStream;
+023import java.io.IOException;
+024import java.io.InputStream;
+025import java.nio.ByteBuffer;
+026import 
java.util.concurrent.atomic.AtomicReference;
+027
+028import org.apache.commons.logging.Log;
+029import 
org.apache.commons.logging.LogFactory;
+030import 
org.apache.hadoop.fs.FSDataInputStream;
+031import 
org.apache.hadoop.fs.FSDataOutputStream;
+032import org.apache.hadoop.fs.Path;
+033import org.apache.hadoop.hbase.Cell;
+034import 
org.apache.hadoop.hbase.HConstants;
+035import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+036import 
org.apache.hadoop.hbase.fs.HFileSystem;
+037import 
org.apache.hadoop.hbase.io.ByteArrayOutputStream;
+038import 
org.apache.hadoop.hbase.io.ByteBuffInputStream;
+039import 
org.apache.hadoop.hbase.io.ByteBufferWriterDataOutputStream;
+040import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
+041import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+042import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
+043import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultDecodingContext;
+044import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
+045import 
org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
+046import 
org.apache.hadoop.hbase.nio.ByteBuff;
+047import 
org.apache.hadoop.hbase.nio.MultiByteBuff;
+048import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
+049import 
org.apache.hadoop.hbase.util.Bytes;
+050import 
org.apache.hadoop.hbase.util.ChecksumType;
+051import 
org.apache.hadoop.hbase.util.ClassSize;
+052import org.apache.hadoop.io.IOUtils;
+053
+054import 
com.google.common.annotations.VisibleForTesting;
+055import 
com.google.common.base.Preconditions;
 056
 057/**
 058 * Reads {@link HFile} version 2 blocks 
to HFiles and via {@link Cacheable} Interface to caches.
@@ -443,1645 +443,1656 @@
 435return nextBlockOnDiskSize;
 436  }
 437
-438  public BlockType getBlockType() {
-439return blockType;
-440  }
-441
-442  /** @return get data block encoding id 
that was used to encode this block */
-443  public short getDataBlockEncodingId() 
{
-444if (blockType != 
BlockType.ENCODED_DATA) {
-445  throw new 
IllegalArgumentException("Querying encoder ID of a block " +
-446  "of type other than " + 
BlockType.ENCODED_DATA + ": " + blockType);
-447}
-448return 

[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.PrefetchedHeader.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.PrefetchedHeader.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.PrefetchedHeader.html
index 6414009..2ad3a12 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.PrefetchedHeader.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.PrefetchedHeader.html
@@ -25,42 +25,42 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.hfile;
 019
-020import 
com.google.common.annotations.VisibleForTesting;
-021import 
com.google.common.base.Preconditions;
-022
-023import java.io.DataInputStream;
-024import java.io.DataOutput;
-025import java.io.DataOutputStream;
-026import java.io.IOException;
-027import java.io.InputStream;
-028import java.nio.ByteBuffer;
-029import 
java.util.concurrent.atomic.AtomicReference;
-030
-031import org.apache.commons.logging.Log;
-032import 
org.apache.commons.logging.LogFactory;
-033import 
org.apache.hadoop.fs.FSDataInputStream;
-034import 
org.apache.hadoop.fs.FSDataOutputStream;
-035import org.apache.hadoop.fs.Path;
-036import org.apache.hadoop.hbase.Cell;
-037import 
org.apache.hadoop.hbase.HConstants;
-038import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-039import 
org.apache.hadoop.hbase.fs.HFileSystem;
-040import 
org.apache.hadoop.hbase.io.ByteArrayOutputStream;
-041import 
org.apache.hadoop.hbase.io.ByteBuffInputStream;
-042import 
org.apache.hadoop.hbase.io.ByteBufferWriterDataOutputStream;
-043import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
-044import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-045import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
-046import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultDecodingContext;
-047import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
-048import 
org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
-049import 
org.apache.hadoop.hbase.nio.ByteBuff;
-050import 
org.apache.hadoop.hbase.nio.MultiByteBuff;
-051import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
-052import 
org.apache.hadoop.hbase.util.Bytes;
-053import 
org.apache.hadoop.hbase.util.ChecksumType;
-054import 
org.apache.hadoop.hbase.util.ClassSize;
-055import org.apache.hadoop.io.IOUtils;
+020import java.io.DataInputStream;
+021import java.io.DataOutput;
+022import java.io.DataOutputStream;
+023import java.io.IOException;
+024import java.io.InputStream;
+025import java.nio.ByteBuffer;
+026import 
java.util.concurrent.atomic.AtomicReference;
+027
+028import org.apache.commons.logging.Log;
+029import 
org.apache.commons.logging.LogFactory;
+030import 
org.apache.hadoop.fs.FSDataInputStream;
+031import 
org.apache.hadoop.fs.FSDataOutputStream;
+032import org.apache.hadoop.fs.Path;
+033import org.apache.hadoop.hbase.Cell;
+034import 
org.apache.hadoop.hbase.HConstants;
+035import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+036import 
org.apache.hadoop.hbase.fs.HFileSystem;
+037import 
org.apache.hadoop.hbase.io.ByteArrayOutputStream;
+038import 
org.apache.hadoop.hbase.io.ByteBuffInputStream;
+039import 
org.apache.hadoop.hbase.io.ByteBufferWriterDataOutputStream;
+040import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
+041import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+042import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
+043import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultDecodingContext;
+044import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
+045import 
org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
+046import 
org.apache.hadoop.hbase.nio.ByteBuff;
+047import 
org.apache.hadoop.hbase.nio.MultiByteBuff;
+048import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
+049import 
org.apache.hadoop.hbase.util.Bytes;
+050import 
org.apache.hadoop.hbase.util.ChecksumType;
+051import 
org.apache.hadoop.hbase.util.ClassSize;
+052import org.apache.hadoop.io.IOUtils;
+053
+054import 
com.google.common.annotations.VisibleForTesting;
+055import 
com.google.common.base.Preconditions;
 056
 057/**
 058 * Reads {@link HFile} version 2 blocks 
to HFiles and via {@link Cacheable} Interface to caches.
@@ -443,1645 +443,1656 @@
 435return nextBlockOnDiskSize;
 436  }
 437
-438  public BlockType getBlockType() {
-439return blockType;
-440  }
-441
-442  /** @return get data block encoding id 
that was used to encode this block */
-443  public short getDataBlockEncodingId() 
{
-444if (blockType != 
BlockType.ENCODED_DATA) {
-445  throw new 
IllegalArgumentException("Querying encoder ID of a block " +
-446  "of type other than " + 
BlockType.ENCODED_DATA + ": " + blockType);
-447}
-448return 

[02/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestReplicaWithCluster.RegionServerHostingPrimayMetaRegionSlowOrStopCopro.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestReplicaWithCluster.RegionServerHostingPrimayMetaRegionSlowOrStopCopro.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestReplicaWithCluster.RegionServerHostingPrimayMetaRegionSlowOrStopCopro.html
new file mode 100644
index 000..fd80980
--- /dev/null
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestReplicaWithCluster.RegionServerHostingPrimayMetaRegionSlowOrStopCopro.html
@@ -0,0 +1,853 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+Source code
+
+
+
+
+001/**
+002 *
+003 * Licensed to the Apache Software 
Foundation (ASF) under one
+004 * or more contributor license 
agreements.  See the NOTICE file
+005 * distributed with this work for 
additional information
+006 * regarding copyright ownership.  The 
ASF licenses this file
+007 * to you under the Apache License, 
Version 2.0 (the
+008 * "License"); you may not use this file 
except in compliance
+009 * with the License.  You may obtain a 
copy of the License at
+010 *
+011 * 
http://www.apache.org/licenses/LICENSE-2.0
+012 *
+013 * Unless required by applicable law or 
agreed to in writing, software
+014 * distributed under the License is 
distributed on an "AS IS" BASIS,
+015 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+016 * See the License for the specific 
language governing permissions and
+017 * limitations under the License.
+018 */
+019
+020
+021package org.apache.hadoop.hbase.client;
+022
+023import java.io.IOException;
+024import java.util.ArrayList;
+025import java.util.Arrays;
+026import java.util.List;
+027import 
java.util.concurrent.CountDownLatch;
+028import java.util.concurrent.TimeUnit;
+029import 
java.util.concurrent.atomic.AtomicLong;
+030import 
java.util.concurrent.atomic.AtomicReference;
+031
+032import org.apache.commons.logging.Log;
+033import 
org.apache.commons.logging.LogFactory;
+034import 
org.apache.hadoop.conf.Configuration;
+035import org.apache.hadoop.fs.Path;
+036import org.apache.hadoop.hbase.Cell;
+037import 
org.apache.hadoop.hbase.HBaseConfiguration;
+038import 
org.apache.hadoop.hbase.HBaseTestingUtility;
+039import 
org.apache.hadoop.hbase.HColumnDescriptor;
+040import 
org.apache.hadoop.hbase.HConstants;
+041import 
org.apache.hadoop.hbase.HTableDescriptor;
+042import 
org.apache.hadoop.hbase.RegionLocations;
+043import 
org.apache.hadoop.hbase.ServerName;
+044import 
org.apache.hadoop.hbase.TableName;
+045import org.apache.hadoop.hbase.Waiter;
+046
+047import 
org.apache.hadoop.hbase.client.replication.ReplicationAdmin;
+048import 
org.apache.hadoop.hbase.coprocessor.ObserverContext;
+049import 
org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+050import 
org.apache.hadoop.hbase.coprocessor.RegionObserver;
+051import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+052import 
org.apache.hadoop.hbase.regionserver.RegionScanner;
+053import 
org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
+054import 
org.apache.hadoop.hbase.regionserver.StorefileRefresherChore;
+055import 
org.apache.hadoop.hbase.regionserver.TestHRegionServerBulkLoad;
+056import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
+057import 
org.apache.hadoop.hbase.testclassification.ClientTests;
+058import 
org.apache.hadoop.hbase.testclassification.MediumTests;
+059import 
org.apache.hadoop.hbase.util.Bytes;
+060import 
org.apache.hadoop.hbase.util.Pair;
+061import 
org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
+062import org.junit.AfterClass;
+063import org.junit.Assert;
+064import org.junit.BeforeClass;
+065import org.junit.Test;
+066import 
org.junit.experimental.categories.Category;
+067
+068@Category({MediumTests.class, 
ClientTests.class})
+069public class TestReplicaWithCluster {
+070  private static final Log LOG = 
LogFactory.getLog(TestReplicaWithCluster.class);
+071
+072  private static final int NB_SERVERS = 
3;
+073  private static final byte[] row = 
TestReplicaWithCluster.class.getName().getBytes();
+074  private static final 
HBaseTestingUtility HTU = new HBaseTestingUtility();
+075
+076  // second minicluster used in testing 
of replication
+077  private static HBaseTestingUtility 
HTU2;
+078  private static final byte[] f = 
HConstants.CATALOG_FAMILY;
+079
+080  private final static int REFRESH_PERIOD 
= 1000;
+081  private final static int 
META_SCAN_TIMEOUT_IN_MILLISEC = 200;
+082
+083  /**
+084   * This copro is used to synchronize 
the tests.
+085   */
+086  public static class SlowMeCopro 
implements RegionObserver {
+087static final AtomicLong sleepTime = 
new AtomicLong(0);
+088static final 
AtomicReferenceCountDownLatch cdl = new 

[35/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
index da54864..ec74d53 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
@@ -834,1240 +834,1241 @@
 826  } else {
 827// If we are not supposed to be 
using the cache, delete any existing cached location
 828// so it won't interfere.
-829metaCache.clearCache(tableName, 
row);
-830  }
-831
-832  // Query the meta region
-833  long pauseBase = this.pause;
-834  try {
-835Result regionInfoRow = null;
-836s.resetMvccReadPoint();
-837try (ReversedClientScanner rcs 
=
-838new 
ReversedClientScanner(conf, s, TableName.META_TABLE_NAME, this, 
rpcCallerFactory,
-839rpcControllerFactory, 
getMetaLookupPool(), metaReplicaCallTimeoutScanInMicroSecond)) {
-840  regionInfoRow = rcs.next();
-841}
-842
-843if (regionInfoRow == null) {
-844  throw new 
TableNotFoundException(tableName);
-845}
-846// convert the row result into 
the HRegionLocation we need!
-847RegionLocations locations = 
MetaTableAccessor.getRegionLocations(regionInfoRow);
-848if (locations == null || 
locations.getRegionLocation(replicaId) == null) {
-849  throw new 
IOException("HRegionInfo was null in " +
-850tableName + ", row=" + 
regionInfoRow);
-851}
-852HRegionInfo regionInfo = 
locations.getRegionLocation(replicaId).getRegionInfo();
-853if (regionInfo == null) {
-854  throw new 
IOException("HRegionInfo was null or empty in " +
-855TableName.META_TABLE_NAME + 
", row=" + regionInfoRow);
-856}
-857
-858// possible we got a region of a 
different table...
-859if 
(!regionInfo.getTable().equals(tableName)) {
-860  throw new 
TableNotFoundException(
-861"Table '" + tableName + 
"' was not found, got: " +
-862regionInfo.getTable() + 
".");
-863}
-864if (regionInfo.isSplit()) {
-865  throw new 
RegionOfflineException("the only available region for" +
-866" the required row is a split 
parent," +
-867" the daughters should be 
online soon: " +
-868
regionInfo.getRegionNameAsString());
-869}
-870if (regionInfo.isOffline()) {
-871  throw new 
RegionOfflineException("the region is offline, could" +
-872" be caused by a disable 
table call: " +
-873
regionInfo.getRegionNameAsString());
-874}
-875
-876ServerName serverName = 
locations.getRegionLocation(replicaId).getServerName();
-877if (serverName == null) {
-878  throw new 
NoServerForRegionException("No server address listed " +
-879"in " + 
TableName.META_TABLE_NAME + " for region " +
-880
regionInfo.getRegionNameAsString() + " containing row " +
-881Bytes.toStringBinary(row));
-882}
-883
-884if (isDeadServer(serverName)){
-885  throw new 
RegionServerStoppedException("hbase:meta says the region "+
-886  
regionInfo.getRegionNameAsString()+" is managed by the server " + serverName 
+
-887  ", but it is dead.");
-888}
-889// Instantiate the location
-890cacheLocation(tableName, 
locations);
-891return locations;
-892  } catch (TableNotFoundException e) 
{
-893// if we got this error, probably 
means the table just plain doesn't
-894// exist. rethrow the error 
immediately. this should always be coming
-895// from the HTable constructor.
-896throw e;
-897  } catch (IOException e) {
-898
ExceptionUtil.rethrowIfInterrupt(e);
-899if (e instanceof RemoteException) 
{
-900  e = 
((RemoteException)e).unwrapRemoteException();
-901}
-902if (e instanceof 
CallQueueTooBigException) {
-903  // Give a special check on 
CallQueueTooBigException, see #HBASE-17114
-904  pauseBase = 
this.pauseForCQTBE;
-905}
-906if (tries  maxAttempts - 1) 
{
-907  if (LOG.isDebugEnabled()) {
-908LOG.debug("locateRegionInMeta 
parentTable=" +
-909TableName.META_TABLE_NAME 
+ ", metaLocation=" +
-910  ", attempt=" + tries + " of 
" +
-911  maxAttempts + " failed; 
retrying after sleep of " +
-912

[24/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.Writer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.Writer.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.Writer.html
index b6c2fe3..1765903 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.Writer.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.Writer.html
@@ -60,892 +60,917 @@
 052import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
 053import 
org.apache.hadoop.hbase.fs.HFileSystem;
 054import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
-055import 
org.apache.hadoop.hbase.io.compress.Compression;
-056import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-057import 
org.apache.hadoop.hbase.protobuf.ProtobufMagic;
-058import 
org.apache.hadoop.hbase.regionserver.CellSink;
-059import 
org.apache.hadoop.hbase.regionserver.ShipperListener;
-060import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
-061import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-062import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-063import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.BytesBytesPair;
-064import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HFileProtos;
-065import 
org.apache.hadoop.hbase.util.BloomFilterWriter;
-066import 
org.apache.hadoop.hbase.util.Bytes;
-067import 
org.apache.hadoop.hbase.util.FSUtils;
-068import org.apache.hadoop.io.Writable;
-069
-070import 
com.google.common.annotations.VisibleForTesting;
-071import 
com.google.common.base.Preconditions;
-072
-073/**
-074 * File format for hbase.
-075 * A file of sorted key/value pairs. Both 
keys and values are byte arrays.
-076 * p
-077 * The memory footprint of a HFile 
includes the following (below is taken from the
-078 * a
-079 * 
href=https://issues.apache.org/jira/browse/HADOOP-3315TFile/a; 
documentation
-080 * but applies also to HFile):
-081 * ul
-082 * liSome constant overhead of 
reading or writing a compressed block.
+055import 
org.apache.hadoop.hbase.io.MetricsIO;
+056import 
org.apache.hadoop.hbase.io.MetricsIOWrapperImpl;
+057import 
org.apache.hadoop.hbase.io.compress.Compression;
+058import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+059import 
org.apache.hadoop.hbase.protobuf.ProtobufMagic;
+060import 
org.apache.hadoop.hbase.regionserver.CellSink;
+061import 
org.apache.hadoop.hbase.regionserver.ShipperListener;
+062import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
+063import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+064import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
+065import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.BytesBytesPair;
+066import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HFileProtos;
+067import 
org.apache.hadoop.hbase.util.BloomFilterWriter;
+068import 
org.apache.hadoop.hbase.util.Bytes;
+069import 
org.apache.hadoop.hbase.util.FSUtils;
+070import org.apache.hadoop.io.Writable;
+071
+072import 
com.google.common.annotations.VisibleForTesting;
+073import 
com.google.common.base.Preconditions;
+074
+075/**
+076 * File format for hbase.
+077 * A file of sorted key/value pairs. Both 
keys and values are byte arrays.
+078 * p
+079 * The memory footprint of a HFile 
includes the following (below is taken from the
+080 * a
+081 * 
href=https://issues.apache.org/jira/browse/HADOOP-3315TFile/a; 
documentation
+082 * but applies also to HFile):
 083 * ul
-084 * liEach compressed block 
requires one compression/decompression codec for
-085 * I/O.
-086 * liTemporary space to buffer 
the key.
-087 * liTemporary space to buffer 
the value.
-088 * /ul
-089 * liHFile index, which is 
proportional to the total number of Data Blocks.
-090 * The total amount of memory needed to 
hold the index can be estimated as
-091 * (56+AvgKeySize)*NumBlocks.
-092 * /ul
-093 * Suggestions on performance 
optimization.
-094 * ul
-095 * liMinimum block size. We 
recommend a setting of minimum block size between
-096 * 8KB to 1MB for general usage. Larger 
block size is preferred if files are
-097 * primarily for sequential access. 
However, it would lead to inefficient random
-098 * access (because there are more data to 
decompress). Smaller blocks are good
-099 * for random access, but require more 
memory to hold the block index, and may
-100 * be slower to create (because we must 
flush the compressor stream at the
-101 * conclusion of each data block, which 
leads to an FS I/O flush). Further, due
-102 * to the internal caching in Compression 
codec, the smallest possible block
-103 * size would be around 20KB-30KB.
-104 * liThe current implementation 
does not offer true multi-threading for
-105 * reading. The implementation uses 

[21/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockIterator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockIterator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockIterator.html
index 6414009..2ad3a12 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockIterator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockIterator.html
@@ -25,42 +25,42 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.hfile;
 019
-020import 
com.google.common.annotations.VisibleForTesting;
-021import 
com.google.common.base.Preconditions;
-022
-023import java.io.DataInputStream;
-024import java.io.DataOutput;
-025import java.io.DataOutputStream;
-026import java.io.IOException;
-027import java.io.InputStream;
-028import java.nio.ByteBuffer;
-029import 
java.util.concurrent.atomic.AtomicReference;
-030
-031import org.apache.commons.logging.Log;
-032import 
org.apache.commons.logging.LogFactory;
-033import 
org.apache.hadoop.fs.FSDataInputStream;
-034import 
org.apache.hadoop.fs.FSDataOutputStream;
-035import org.apache.hadoop.fs.Path;
-036import org.apache.hadoop.hbase.Cell;
-037import 
org.apache.hadoop.hbase.HConstants;
-038import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-039import 
org.apache.hadoop.hbase.fs.HFileSystem;
-040import 
org.apache.hadoop.hbase.io.ByteArrayOutputStream;
-041import 
org.apache.hadoop.hbase.io.ByteBuffInputStream;
-042import 
org.apache.hadoop.hbase.io.ByteBufferWriterDataOutputStream;
-043import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
-044import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-045import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
-046import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultDecodingContext;
-047import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
-048import 
org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
-049import 
org.apache.hadoop.hbase.nio.ByteBuff;
-050import 
org.apache.hadoop.hbase.nio.MultiByteBuff;
-051import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
-052import 
org.apache.hadoop.hbase.util.Bytes;
-053import 
org.apache.hadoop.hbase.util.ChecksumType;
-054import 
org.apache.hadoop.hbase.util.ClassSize;
-055import org.apache.hadoop.io.IOUtils;
+020import java.io.DataInputStream;
+021import java.io.DataOutput;
+022import java.io.DataOutputStream;
+023import java.io.IOException;
+024import java.io.InputStream;
+025import java.nio.ByteBuffer;
+026import 
java.util.concurrent.atomic.AtomicReference;
+027
+028import org.apache.commons.logging.Log;
+029import 
org.apache.commons.logging.LogFactory;
+030import 
org.apache.hadoop.fs.FSDataInputStream;
+031import 
org.apache.hadoop.fs.FSDataOutputStream;
+032import org.apache.hadoop.fs.Path;
+033import org.apache.hadoop.hbase.Cell;
+034import 
org.apache.hadoop.hbase.HConstants;
+035import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+036import 
org.apache.hadoop.hbase.fs.HFileSystem;
+037import 
org.apache.hadoop.hbase.io.ByteArrayOutputStream;
+038import 
org.apache.hadoop.hbase.io.ByteBuffInputStream;
+039import 
org.apache.hadoop.hbase.io.ByteBufferWriterDataOutputStream;
+040import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
+041import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+042import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
+043import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultDecodingContext;
+044import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
+045import 
org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
+046import 
org.apache.hadoop.hbase.nio.ByteBuff;
+047import 
org.apache.hadoop.hbase.nio.MultiByteBuff;
+048import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
+049import 
org.apache.hadoop.hbase.util.Bytes;
+050import 
org.apache.hadoop.hbase.util.ChecksumType;
+051import 
org.apache.hadoop.hbase.util.ClassSize;
+052import org.apache.hadoop.io.IOUtils;
+053
+054import 
com.google.common.annotations.VisibleForTesting;
+055import 
com.google.common.base.Preconditions;
 056
 057/**
 058 * Reads {@link HFile} version 2 blocks 
to HFiles and via {@link Cacheable} Interface to caches.
@@ -443,1645 +443,1656 @@
 435return nextBlockOnDiskSize;
 436  }
 437
-438  public BlockType getBlockType() {
-439return blockType;
-440  }
-441
-442  /** @return get data block encoding id 
that was used to encode this block */
-443  public short getDataBlockEncodingId() 
{
-444if (blockType != 
BlockType.ENCODED_DATA) {
-445  throw new 
IllegalArgumentException("Querying encoder ID of a block " +
-446  "of type other than " + 
BlockType.ENCODED_DATA + ": " + blockType);
-447}
-448return 

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestReplicaWithCluster.RegionServerHostingPrimayMetaRegionSlowCopro.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestReplicaWithCluster.RegionServerHostingPrimayMetaRegionSlowCopro.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestReplicaWithCluster.RegionServerHostingPrimayMetaRegionSlowCopro.html
deleted file mode 100644
index ebf0532..000
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestReplicaWithCluster.RegionServerHostingPrimayMetaRegionSlowCopro.html
+++ /dev/null
@@ -1,719 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-Source code
-
-
-
-
-001/**
-002 *
-003 * Licensed to the Apache Software 
Foundation (ASF) under one
-004 * or more contributor license 
agreements.  See the NOTICE file
-005 * distributed with this work for 
additional information
-006 * regarding copyright ownership.  The 
ASF licenses this file
-007 * to you under the Apache License, 
Version 2.0 (the
-008 * "License"); you may not use this file 
except in compliance
-009 * with the License.  You may obtain a 
copy of the License at
-010 *
-011 * 
http://www.apache.org/licenses/LICENSE-2.0
-012 *
-013 * Unless required by applicable law or 
agreed to in writing, software
-014 * distributed under the License is 
distributed on an "AS IS" BASIS,
-015 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
-016 * See the License for the specific 
language governing permissions and
-017 * limitations under the License.
-018 */
-019
-020
-021package org.apache.hadoop.hbase.client;
-022
-023import java.io.IOException;
-024import java.util.ArrayList;
-025import java.util.Arrays;
-026import java.util.List;
-027import 
java.util.concurrent.CountDownLatch;
-028import java.util.concurrent.TimeUnit;
-029import 
java.util.concurrent.atomic.AtomicLong;
-030import 
java.util.concurrent.atomic.AtomicReference;
-031
-032import org.apache.commons.logging.Log;
-033import 
org.apache.commons.logging.LogFactory;
-034import 
org.apache.hadoop.conf.Configuration;
-035import org.apache.hadoop.fs.Path;
-036import org.apache.hadoop.hbase.Cell;
-037import 
org.apache.hadoop.hbase.HBaseConfiguration;
-038import 
org.apache.hadoop.hbase.HBaseTestingUtility;
-039import 
org.apache.hadoop.hbase.HColumnDescriptor;
-040import 
org.apache.hadoop.hbase.HConstants;
-041import 
org.apache.hadoop.hbase.HTableDescriptor;
-042import 
org.apache.hadoop.hbase.RegionLocations;
-043import org.apache.hadoop.hbase.Waiter;
-044
-045import 
org.apache.hadoop.hbase.client.replication.ReplicationAdmin;
-046import 
org.apache.hadoop.hbase.coprocessor.ObserverContext;
-047import 
org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-048import 
org.apache.hadoop.hbase.coprocessor.RegionObserver;
-049import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-050import 
org.apache.hadoop.hbase.regionserver.RegionScanner;
-051import 
org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
-052import 
org.apache.hadoop.hbase.regionserver.StorefileRefresherChore;
-053import 
org.apache.hadoop.hbase.regionserver.TestHRegionServerBulkLoad;
-054import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-055import 
org.apache.hadoop.hbase.testclassification.ClientTests;
-056import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-057import 
org.apache.hadoop.hbase.util.Bytes;
-058import 
org.apache.hadoop.hbase.util.Pair;
-059import 
org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
-060import org.junit.AfterClass;
-061import org.junit.Assert;
-062import org.junit.BeforeClass;
-063import org.junit.Test;
-064import 
org.junit.experimental.categories.Category;
-065
-066@Category({MediumTests.class, 
ClientTests.class})
-067public class TestReplicaWithCluster {
-068  private static final Log LOG = 
LogFactory.getLog(TestReplicaWithCluster.class);
-069
-070  private static final int NB_SERVERS = 
3;
-071  private static final byte[] row = 
TestReplicaWithCluster.class.getName().getBytes();
-072  private static final 
HBaseTestingUtility HTU = new HBaseTestingUtility();
-073
-074  // second minicluster used in testing 
of replication
-075  private static HBaseTestingUtility 
HTU2;
-076  private static final byte[] f = 
HConstants.CATALOG_FAMILY;
-077
-078  private final static int REFRESH_PERIOD 
= 1000;
-079  private final static int 
META_SCAN_TIMEOUT_IN_MILLISEC = 200;
-080
-081  /**
-082   * This copro is used to synchronize 
the tests.
-083   */
-084  public static class SlowMeCopro 
implements RegionObserver {
-085static final AtomicLong sleepTime = 
new AtomicLong(0);
-086static final 
AtomicReferenceCountDownLatch cdl = new AtomicReference(new 
CountDownLatch(0));
-087
-088public SlowMeCopro() {
-089}
-090
-091@Override
-092public void 

[23/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.WriterFactory.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.WriterFactory.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.WriterFactory.html
index b6c2fe3..1765903 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.WriterFactory.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.WriterFactory.html
@@ -60,892 +60,917 @@
 052import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
 053import 
org.apache.hadoop.hbase.fs.HFileSystem;
 054import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
-055import 
org.apache.hadoop.hbase.io.compress.Compression;
-056import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-057import 
org.apache.hadoop.hbase.protobuf.ProtobufMagic;
-058import 
org.apache.hadoop.hbase.regionserver.CellSink;
-059import 
org.apache.hadoop.hbase.regionserver.ShipperListener;
-060import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
-061import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-062import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-063import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.BytesBytesPair;
-064import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HFileProtos;
-065import 
org.apache.hadoop.hbase.util.BloomFilterWriter;
-066import 
org.apache.hadoop.hbase.util.Bytes;
-067import 
org.apache.hadoop.hbase.util.FSUtils;
-068import org.apache.hadoop.io.Writable;
-069
-070import 
com.google.common.annotations.VisibleForTesting;
-071import 
com.google.common.base.Preconditions;
-072
-073/**
-074 * File format for hbase.
-075 * A file of sorted key/value pairs. Both 
keys and values are byte arrays.
-076 * p
-077 * The memory footprint of a HFile 
includes the following (below is taken from the
-078 * a
-079 * 
href=https://issues.apache.org/jira/browse/HADOOP-3315TFile/a; 
documentation
-080 * but applies also to HFile):
-081 * ul
-082 * liSome constant overhead of 
reading or writing a compressed block.
+055import 
org.apache.hadoop.hbase.io.MetricsIO;
+056import 
org.apache.hadoop.hbase.io.MetricsIOWrapperImpl;
+057import 
org.apache.hadoop.hbase.io.compress.Compression;
+058import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+059import 
org.apache.hadoop.hbase.protobuf.ProtobufMagic;
+060import 
org.apache.hadoop.hbase.regionserver.CellSink;
+061import 
org.apache.hadoop.hbase.regionserver.ShipperListener;
+062import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
+063import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+064import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
+065import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.BytesBytesPair;
+066import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HFileProtos;
+067import 
org.apache.hadoop.hbase.util.BloomFilterWriter;
+068import 
org.apache.hadoop.hbase.util.Bytes;
+069import 
org.apache.hadoop.hbase.util.FSUtils;
+070import org.apache.hadoop.io.Writable;
+071
+072import 
com.google.common.annotations.VisibleForTesting;
+073import 
com.google.common.base.Preconditions;
+074
+075/**
+076 * File format for hbase.
+077 * A file of sorted key/value pairs. Both 
keys and values are byte arrays.
+078 * p
+079 * The memory footprint of a HFile 
includes the following (below is taken from the
+080 * a
+081 * 
href=https://issues.apache.org/jira/browse/HADOOP-3315TFile/a; 
documentation
+082 * but applies also to HFile):
 083 * ul
-084 * liEach compressed block 
requires one compression/decompression codec for
-085 * I/O.
-086 * liTemporary space to buffer 
the key.
-087 * liTemporary space to buffer 
the value.
-088 * /ul
-089 * liHFile index, which is 
proportional to the total number of Data Blocks.
-090 * The total amount of memory needed to 
hold the index can be estimated as
-091 * (56+AvgKeySize)*NumBlocks.
-092 * /ul
-093 * Suggestions on performance 
optimization.
-094 * ul
-095 * liMinimum block size. We 
recommend a setting of minimum block size between
-096 * 8KB to 1MB for general usage. Larger 
block size is preferred if files are
-097 * primarily for sequential access. 
However, it would lead to inefficient random
-098 * access (because there are more data to 
decompress). Smaller blocks are good
-099 * for random access, but require more 
memory to hold the block index, and may
-100 * be slower to create (because we must 
flush the compressor stream at the
-101 * conclusion of each data block, which 
leads to an FS I/O flush). Further, due
-102 * to the internal caching in Compression 
codec, the smallest possible block
-103 * size would be around 20KB-30KB.
-104 * liThe current implementation 
does not offer true multi-threading for
-105 * 

[13/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.html
index 6414009..2ad3a12 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.html
@@ -25,42 +25,42 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.hfile;
 019
-020import 
com.google.common.annotations.VisibleForTesting;
-021import 
com.google.common.base.Preconditions;
-022
-023import java.io.DataInputStream;
-024import java.io.DataOutput;
-025import java.io.DataOutputStream;
-026import java.io.IOException;
-027import java.io.InputStream;
-028import java.nio.ByteBuffer;
-029import 
java.util.concurrent.atomic.AtomicReference;
-030
-031import org.apache.commons.logging.Log;
-032import 
org.apache.commons.logging.LogFactory;
-033import 
org.apache.hadoop.fs.FSDataInputStream;
-034import 
org.apache.hadoop.fs.FSDataOutputStream;
-035import org.apache.hadoop.fs.Path;
-036import org.apache.hadoop.hbase.Cell;
-037import 
org.apache.hadoop.hbase.HConstants;
-038import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-039import 
org.apache.hadoop.hbase.fs.HFileSystem;
-040import 
org.apache.hadoop.hbase.io.ByteArrayOutputStream;
-041import 
org.apache.hadoop.hbase.io.ByteBuffInputStream;
-042import 
org.apache.hadoop.hbase.io.ByteBufferWriterDataOutputStream;
-043import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
-044import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-045import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
-046import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultDecodingContext;
-047import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
-048import 
org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
-049import 
org.apache.hadoop.hbase.nio.ByteBuff;
-050import 
org.apache.hadoop.hbase.nio.MultiByteBuff;
-051import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
-052import 
org.apache.hadoop.hbase.util.Bytes;
-053import 
org.apache.hadoop.hbase.util.ChecksumType;
-054import 
org.apache.hadoop.hbase.util.ClassSize;
-055import org.apache.hadoop.io.IOUtils;
+020import java.io.DataInputStream;
+021import java.io.DataOutput;
+022import java.io.DataOutputStream;
+023import java.io.IOException;
+024import java.io.InputStream;
+025import java.nio.ByteBuffer;
+026import 
java.util.concurrent.atomic.AtomicReference;
+027
+028import org.apache.commons.logging.Log;
+029import 
org.apache.commons.logging.LogFactory;
+030import 
org.apache.hadoop.fs.FSDataInputStream;
+031import 
org.apache.hadoop.fs.FSDataOutputStream;
+032import org.apache.hadoop.fs.Path;
+033import org.apache.hadoop.hbase.Cell;
+034import 
org.apache.hadoop.hbase.HConstants;
+035import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+036import 
org.apache.hadoop.hbase.fs.HFileSystem;
+037import 
org.apache.hadoop.hbase.io.ByteArrayOutputStream;
+038import 
org.apache.hadoop.hbase.io.ByteBuffInputStream;
+039import 
org.apache.hadoop.hbase.io.ByteBufferWriterDataOutputStream;
+040import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
+041import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+042import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
+043import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultDecodingContext;
+044import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
+045import 
org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
+046import 
org.apache.hadoop.hbase.nio.ByteBuff;
+047import 
org.apache.hadoop.hbase.nio.MultiByteBuff;
+048import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
+049import 
org.apache.hadoop.hbase.util.Bytes;
+050import 
org.apache.hadoop.hbase.util.ChecksumType;
+051import 
org.apache.hadoop.hbase.util.ClassSize;
+052import org.apache.hadoop.io.IOUtils;
+053
+054import 
com.google.common.annotations.VisibleForTesting;
+055import 
com.google.common.base.Preconditions;
 056
 057/**
 058 * Reads {@link HFile} version 2 blocks 
to HFiles and via {@link Cacheable} Interface to caches.
@@ -443,1645 +443,1656 @@
 435return nextBlockOnDiskSize;
 436  }
 437
-438  public BlockType getBlockType() {
-439return blockType;
-440  }
-441
-442  /** @return get data block encoding id 
that was used to encode this block */
-443  public short getDataBlockEncodingId() 
{
-444if (blockType != 
BlockType.ENCODED_DATA) {
-445  throw new 
IllegalArgumentException("Querying encoder ID of a block " +
-446  "of type other than " + 
BlockType.ENCODED_DATA + ": " + blockType);
-447}
-448return buf.getShort(headerSize());
-449  }
-450
-451  /**
-452   * @return the on-disk size of 

[15/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.State.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.State.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.State.html
index 6414009..2ad3a12 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.State.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.State.html
@@ -25,42 +25,42 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.hfile;
 019
-020import 
com.google.common.annotations.VisibleForTesting;
-021import 
com.google.common.base.Preconditions;
-022
-023import java.io.DataInputStream;
-024import java.io.DataOutput;
-025import java.io.DataOutputStream;
-026import java.io.IOException;
-027import java.io.InputStream;
-028import java.nio.ByteBuffer;
-029import 
java.util.concurrent.atomic.AtomicReference;
-030
-031import org.apache.commons.logging.Log;
-032import 
org.apache.commons.logging.LogFactory;
-033import 
org.apache.hadoop.fs.FSDataInputStream;
-034import 
org.apache.hadoop.fs.FSDataOutputStream;
-035import org.apache.hadoop.fs.Path;
-036import org.apache.hadoop.hbase.Cell;
-037import 
org.apache.hadoop.hbase.HConstants;
-038import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-039import 
org.apache.hadoop.hbase.fs.HFileSystem;
-040import 
org.apache.hadoop.hbase.io.ByteArrayOutputStream;
-041import 
org.apache.hadoop.hbase.io.ByteBuffInputStream;
-042import 
org.apache.hadoop.hbase.io.ByteBufferWriterDataOutputStream;
-043import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
-044import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-045import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
-046import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultDecodingContext;
-047import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
-048import 
org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
-049import 
org.apache.hadoop.hbase.nio.ByteBuff;
-050import 
org.apache.hadoop.hbase.nio.MultiByteBuff;
-051import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
-052import 
org.apache.hadoop.hbase.util.Bytes;
-053import 
org.apache.hadoop.hbase.util.ChecksumType;
-054import 
org.apache.hadoop.hbase.util.ClassSize;
-055import org.apache.hadoop.io.IOUtils;
+020import java.io.DataInputStream;
+021import java.io.DataOutput;
+022import java.io.DataOutputStream;
+023import java.io.IOException;
+024import java.io.InputStream;
+025import java.nio.ByteBuffer;
+026import 
java.util.concurrent.atomic.AtomicReference;
+027
+028import org.apache.commons.logging.Log;
+029import 
org.apache.commons.logging.LogFactory;
+030import 
org.apache.hadoop.fs.FSDataInputStream;
+031import 
org.apache.hadoop.fs.FSDataOutputStream;
+032import org.apache.hadoop.fs.Path;
+033import org.apache.hadoop.hbase.Cell;
+034import 
org.apache.hadoop.hbase.HConstants;
+035import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+036import 
org.apache.hadoop.hbase.fs.HFileSystem;
+037import 
org.apache.hadoop.hbase.io.ByteArrayOutputStream;
+038import 
org.apache.hadoop.hbase.io.ByteBuffInputStream;
+039import 
org.apache.hadoop.hbase.io.ByteBufferWriterDataOutputStream;
+040import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
+041import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+042import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
+043import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultDecodingContext;
+044import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
+045import 
org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
+046import 
org.apache.hadoop.hbase.nio.ByteBuff;
+047import 
org.apache.hadoop.hbase.nio.MultiByteBuff;
+048import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
+049import 
org.apache.hadoop.hbase.util.Bytes;
+050import 
org.apache.hadoop.hbase.util.ChecksumType;
+051import 
org.apache.hadoop.hbase.util.ClassSize;
+052import org.apache.hadoop.io.IOUtils;
+053
+054import 
com.google.common.annotations.VisibleForTesting;
+055import 
com.google.common.base.Preconditions;
 056
 057/**
 058 * Reads {@link HFile} version 2 blocks 
to HFiles and via {@link Cacheable} Interface to caches.
@@ -443,1645 +443,1656 @@
 435return nextBlockOnDiskSize;
 436  }
 437
-438  public BlockType getBlockType() {
-439return blockType;
-440  }
-441
-442  /** @return get data block encoding id 
that was used to encode this block */
-443  public short getDataBlockEncodingId() 
{
-444if (blockType != 
BlockType.ENCODED_DATA) {
-445  throw new 
IllegalArgumentException("Querying encoder ID of a block " +
-446  "of type other than " + 
BlockType.ENCODED_DATA + ": " + blockType);
-447}
-448return 

[17/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Header.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Header.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Header.html
index 6414009..2ad3a12 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Header.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Header.html
@@ -25,42 +25,42 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.hfile;
 019
-020import 
com.google.common.annotations.VisibleForTesting;
-021import 
com.google.common.base.Preconditions;
-022
-023import java.io.DataInputStream;
-024import java.io.DataOutput;
-025import java.io.DataOutputStream;
-026import java.io.IOException;
-027import java.io.InputStream;
-028import java.nio.ByteBuffer;
-029import 
java.util.concurrent.atomic.AtomicReference;
-030
-031import org.apache.commons.logging.Log;
-032import 
org.apache.commons.logging.LogFactory;
-033import 
org.apache.hadoop.fs.FSDataInputStream;
-034import 
org.apache.hadoop.fs.FSDataOutputStream;
-035import org.apache.hadoop.fs.Path;
-036import org.apache.hadoop.hbase.Cell;
-037import 
org.apache.hadoop.hbase.HConstants;
-038import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-039import 
org.apache.hadoop.hbase.fs.HFileSystem;
-040import 
org.apache.hadoop.hbase.io.ByteArrayOutputStream;
-041import 
org.apache.hadoop.hbase.io.ByteBuffInputStream;
-042import 
org.apache.hadoop.hbase.io.ByteBufferWriterDataOutputStream;
-043import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
-044import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-045import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
-046import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultDecodingContext;
-047import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
-048import 
org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
-049import 
org.apache.hadoop.hbase.nio.ByteBuff;
-050import 
org.apache.hadoop.hbase.nio.MultiByteBuff;
-051import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
-052import 
org.apache.hadoop.hbase.util.Bytes;
-053import 
org.apache.hadoop.hbase.util.ChecksumType;
-054import 
org.apache.hadoop.hbase.util.ClassSize;
-055import org.apache.hadoop.io.IOUtils;
+020import java.io.DataInputStream;
+021import java.io.DataOutput;
+022import java.io.DataOutputStream;
+023import java.io.IOException;
+024import java.io.InputStream;
+025import java.nio.ByteBuffer;
+026import 
java.util.concurrent.atomic.AtomicReference;
+027
+028import org.apache.commons.logging.Log;
+029import 
org.apache.commons.logging.LogFactory;
+030import 
org.apache.hadoop.fs.FSDataInputStream;
+031import 
org.apache.hadoop.fs.FSDataOutputStream;
+032import org.apache.hadoop.fs.Path;
+033import org.apache.hadoop.hbase.Cell;
+034import 
org.apache.hadoop.hbase.HConstants;
+035import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+036import 
org.apache.hadoop.hbase.fs.HFileSystem;
+037import 
org.apache.hadoop.hbase.io.ByteArrayOutputStream;
+038import 
org.apache.hadoop.hbase.io.ByteBuffInputStream;
+039import 
org.apache.hadoop.hbase.io.ByteBufferWriterDataOutputStream;
+040import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
+041import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+042import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
+043import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultDecodingContext;
+044import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
+045import 
org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
+046import 
org.apache.hadoop.hbase.nio.ByteBuff;
+047import 
org.apache.hadoop.hbase.nio.MultiByteBuff;
+048import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
+049import 
org.apache.hadoop.hbase.util.Bytes;
+050import 
org.apache.hadoop.hbase.util.ChecksumType;
+051import 
org.apache.hadoop.hbase.util.ClassSize;
+052import org.apache.hadoop.io.IOUtils;
+053
+054import 
com.google.common.annotations.VisibleForTesting;
+055import 
com.google.common.base.Preconditions;
 056
 057/**
 058 * Reads {@link HFile} version 2 blocks 
to HFiles and via {@link Cacheable} Interface to caches.
@@ -443,1645 +443,1656 @@
 435return nextBlockOnDiskSize;
 436  }
 437
-438  public BlockType getBlockType() {
-439return blockType;
-440  }
-441
-442  /** @return get data block encoding id 
that was used to encode this block */
-443  public short getDataBlockEncodingId() 
{
-444if (blockType != 
BlockType.ENCODED_DATA) {
-445  throw new 
IllegalArgumentException("Querying encoder ID of a block " +
-446  "of type other than " + 
BlockType.ENCODED_DATA + ": " + blockType);
-447}
-448return buf.getShort(headerSize());
-449  }
-450
-451  /**

[19/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html
index 6414009..2ad3a12 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html
@@ -25,42 +25,42 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.hfile;
 019
-020import 
com.google.common.annotations.VisibleForTesting;
-021import 
com.google.common.base.Preconditions;
-022
-023import java.io.DataInputStream;
-024import java.io.DataOutput;
-025import java.io.DataOutputStream;
-026import java.io.IOException;
-027import java.io.InputStream;
-028import java.nio.ByteBuffer;
-029import 
java.util.concurrent.atomic.AtomicReference;
-030
-031import org.apache.commons.logging.Log;
-032import 
org.apache.commons.logging.LogFactory;
-033import 
org.apache.hadoop.fs.FSDataInputStream;
-034import 
org.apache.hadoop.fs.FSDataOutputStream;
-035import org.apache.hadoop.fs.Path;
-036import org.apache.hadoop.hbase.Cell;
-037import 
org.apache.hadoop.hbase.HConstants;
-038import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-039import 
org.apache.hadoop.hbase.fs.HFileSystem;
-040import 
org.apache.hadoop.hbase.io.ByteArrayOutputStream;
-041import 
org.apache.hadoop.hbase.io.ByteBuffInputStream;
-042import 
org.apache.hadoop.hbase.io.ByteBufferWriterDataOutputStream;
-043import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
-044import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-045import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
-046import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultDecodingContext;
-047import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
-048import 
org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
-049import 
org.apache.hadoop.hbase.nio.ByteBuff;
-050import 
org.apache.hadoop.hbase.nio.MultiByteBuff;
-051import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
-052import 
org.apache.hadoop.hbase.util.Bytes;
-053import 
org.apache.hadoop.hbase.util.ChecksumType;
-054import 
org.apache.hadoop.hbase.util.ClassSize;
-055import org.apache.hadoop.io.IOUtils;
+020import java.io.DataInputStream;
+021import java.io.DataOutput;
+022import java.io.DataOutputStream;
+023import java.io.IOException;
+024import java.io.InputStream;
+025import java.nio.ByteBuffer;
+026import 
java.util.concurrent.atomic.AtomicReference;
+027
+028import org.apache.commons.logging.Log;
+029import 
org.apache.commons.logging.LogFactory;
+030import 
org.apache.hadoop.fs.FSDataInputStream;
+031import 
org.apache.hadoop.fs.FSDataOutputStream;
+032import org.apache.hadoop.fs.Path;
+033import org.apache.hadoop.hbase.Cell;
+034import 
org.apache.hadoop.hbase.HConstants;
+035import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+036import 
org.apache.hadoop.hbase.fs.HFileSystem;
+037import 
org.apache.hadoop.hbase.io.ByteArrayOutputStream;
+038import 
org.apache.hadoop.hbase.io.ByteBuffInputStream;
+039import 
org.apache.hadoop.hbase.io.ByteBufferWriterDataOutputStream;
+040import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
+041import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+042import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
+043import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultDecodingContext;
+044import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
+045import 
org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
+046import 
org.apache.hadoop.hbase.nio.ByteBuff;
+047import 
org.apache.hadoop.hbase.nio.MultiByteBuff;
+048import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
+049import 
org.apache.hadoop.hbase.util.Bytes;
+050import 
org.apache.hadoop.hbase.util.ChecksumType;
+051import 
org.apache.hadoop.hbase.util.ClassSize;
+052import org.apache.hadoop.io.IOUtils;
+053
+054import 
com.google.common.annotations.VisibleForTesting;
+055import 
com.google.common.base.Preconditions;
 056
 057/**
 058 * Reads {@link HFile} version 2 blocks 
to HFiles and via {@link Cacheable} Interface to caches.
@@ -443,1645 +443,1656 @@
 435return nextBlockOnDiskSize;
 436  }
 437
-438  public BlockType getBlockType() {
-439return blockType;
-440  }
-441
-442  /** @return get data block encoding id 
that was used to encode this block */
-443  public short getDataBlockEncodingId() 
{
-444if (blockType != 
BlockType.ENCODED_DATA) {
-445  throw new 
IllegalArgumentException("Querying encoder ID of a block " +
-446  "of type other than " + 
BlockType.ENCODED_DATA + ": " + blockType);
-447}
-448return buf.getShort(headerSize());
-449  }
-450

[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.html
index b6c2fe3..1765903 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFile.html
@@ -60,892 +60,917 @@
 052import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
 053import 
org.apache.hadoop.hbase.fs.HFileSystem;
 054import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
-055import 
org.apache.hadoop.hbase.io.compress.Compression;
-056import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-057import 
org.apache.hadoop.hbase.protobuf.ProtobufMagic;
-058import 
org.apache.hadoop.hbase.regionserver.CellSink;
-059import 
org.apache.hadoop.hbase.regionserver.ShipperListener;
-060import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
-061import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-062import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-063import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.BytesBytesPair;
-064import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HFileProtos;
-065import 
org.apache.hadoop.hbase.util.BloomFilterWriter;
-066import 
org.apache.hadoop.hbase.util.Bytes;
-067import 
org.apache.hadoop.hbase.util.FSUtils;
-068import org.apache.hadoop.io.Writable;
-069
-070import 
com.google.common.annotations.VisibleForTesting;
-071import 
com.google.common.base.Preconditions;
-072
-073/**
-074 * File format for hbase.
-075 * A file of sorted key/value pairs. Both 
keys and values are byte arrays.
-076 * p
-077 * The memory footprint of a HFile 
includes the following (below is taken from the
-078 * a
-079 * 
href=https://issues.apache.org/jira/browse/HADOOP-3315TFile/a; 
documentation
-080 * but applies also to HFile):
-081 * ul
-082 * liSome constant overhead of 
reading or writing a compressed block.
+055import 
org.apache.hadoop.hbase.io.MetricsIO;
+056import 
org.apache.hadoop.hbase.io.MetricsIOWrapperImpl;
+057import 
org.apache.hadoop.hbase.io.compress.Compression;
+058import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+059import 
org.apache.hadoop.hbase.protobuf.ProtobufMagic;
+060import 
org.apache.hadoop.hbase.regionserver.CellSink;
+061import 
org.apache.hadoop.hbase.regionserver.ShipperListener;
+062import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
+063import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+064import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
+065import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.BytesBytesPair;
+066import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HFileProtos;
+067import 
org.apache.hadoop.hbase.util.BloomFilterWriter;
+068import 
org.apache.hadoop.hbase.util.Bytes;
+069import 
org.apache.hadoop.hbase.util.FSUtils;
+070import org.apache.hadoop.io.Writable;
+071
+072import 
com.google.common.annotations.VisibleForTesting;
+073import 
com.google.common.base.Preconditions;
+074
+075/**
+076 * File format for hbase.
+077 * A file of sorted key/value pairs. Both 
keys and values are byte arrays.
+078 * p
+079 * The memory footprint of a HFile 
includes the following (below is taken from the
+080 * a
+081 * 
href=https://issues.apache.org/jira/browse/HADOOP-3315TFile/a; 
documentation
+082 * but applies also to HFile):
 083 * ul
-084 * liEach compressed block 
requires one compression/decompression codec for
-085 * I/O.
-086 * liTemporary space to buffer 
the key.
-087 * liTemporary space to buffer 
the value.
-088 * /ul
-089 * liHFile index, which is 
proportional to the total number of Data Blocks.
-090 * The total amount of memory needed to 
hold the index can be estimated as
-091 * (56+AvgKeySize)*NumBlocks.
-092 * /ul
-093 * Suggestions on performance 
optimization.
-094 * ul
-095 * liMinimum block size. We 
recommend a setting of minimum block size between
-096 * 8KB to 1MB for general usage. Larger 
block size is preferred if files are
-097 * primarily for sequential access. 
However, it would lead to inefficient random
-098 * access (because there are more data to 
decompress). Smaller blocks are good
-099 * for random access, but require more 
memory to hold the block index, and may
-100 * be slower to create (because we must 
flush the compressor stream at the
-101 * conclusion of each data block, which 
leads to an FS I/O flush). Further, due
-102 * to the internal caching in Compression 
codec, the smallest possible block
-103 * size would be around 20KB-30KB.
-104 * liThe current implementation 
does not offer true multi-threading for
-105 * reading. The implementation uses 
FSDataInputStream seek()+read(), which is
-106 

[09/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.Intf.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.Intf.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.Intf.html
index c476d69..33ab3af 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.Intf.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.Intf.html
@@ -69,14 +69,14 @@
 061  requiredArguments = {
 062@org.jamon.annotations.Argument(name 
= "master", type = "HMaster")},
 063  optionalArguments = {
-064@org.jamon.annotations.Argument(name 
= "serverManager", type = "ServerManager"),
-065@org.jamon.annotations.Argument(name 
= "servers", type = "ListServerName"),
-066@org.jamon.annotations.Argument(name 
= "deadServers", type = "SetServerName"),
-067@org.jamon.annotations.Argument(name 
= "format", type = "String"),
-068@org.jamon.annotations.Argument(name 
= "assignmentManager", type = "AssignmentManager"),
+064@org.jamon.annotations.Argument(name 
= "format", type = "String"),
+065@org.jamon.annotations.Argument(name 
= "catalogJanitorEnabled", type = "boolean"),
+066@org.jamon.annotations.Argument(name 
= "serverManager", type = "ServerManager"),
+067@org.jamon.annotations.Argument(name 
= "filter", type = "String"),
+068@org.jamon.annotations.Argument(name 
= "deadServers", type = "SetServerName"),
 069@org.jamon.annotations.Argument(name 
= "frags", type = "MapString,Integer"),
-070@org.jamon.annotations.Argument(name 
= "filter", type = "String"),
-071@org.jamon.annotations.Argument(name 
= "catalogJanitorEnabled", type = "boolean"),
+070@org.jamon.annotations.Argument(name 
= "assignmentManager", type = "AssignmentManager"),
+071@org.jamon.annotations.Argument(name 
= "servers", type = "ListServerName"),
 072@org.jamon.annotations.Argument(name 
= "metaLocation", type = "ServerName")})
 073public class MasterStatusTmpl
 074  extends 
org.jamon.AbstractTemplateProxy
@@ -118,91 +118,91 @@
 110  return m_master;
 111}
 112private HMaster m_master;
-113// 28, 1
-114public void 
setServerManager(ServerManager serverManager)
+113// 27, 1
+114public void setFormat(String 
format)
 115{
-116  // 28, 1
-117  m_serverManager = serverManager;
-118  m_serverManager__IsNotDefault = 
true;
+116  // 27, 1
+117  m_format = format;
+118  m_format__IsNotDefault = true;
 119}
-120public ServerManager 
getServerManager()
+120public String getFormat()
 121{
-122  return m_serverManager;
+122  return m_format;
 123}
-124private ServerManager 
m_serverManager;
-125public boolean 
getServerManager__IsNotDefault()
+124private String m_format;
+125public boolean 
getFormat__IsNotDefault()
 126{
-127  return 
m_serverManager__IsNotDefault;
+127  return m_format__IsNotDefault;
 128}
-129private boolean 
m_serverManager__IsNotDefault;
-130// 23, 1
-131public void 
setServers(ListServerName servers)
+129private boolean 
m_format__IsNotDefault;
+130// 25, 1
+131public void 
setCatalogJanitorEnabled(boolean catalogJanitorEnabled)
 132{
-133  // 23, 1
-134  m_servers = servers;
-135  m_servers__IsNotDefault = true;
+133  // 25, 1
+134  m_catalogJanitorEnabled = 
catalogJanitorEnabled;
+135  
m_catalogJanitorEnabled__IsNotDefault = true;
 136}
-137public ListServerName 
getServers()
+137public boolean 
getCatalogJanitorEnabled()
 138{
-139  return m_servers;
+139  return m_catalogJanitorEnabled;
 140}
-141private ListServerName 
m_servers;
-142public boolean 
getServers__IsNotDefault()
+141private boolean 
m_catalogJanitorEnabled;
+142public boolean 
getCatalogJanitorEnabled__IsNotDefault()
 143{
-144  return m_servers__IsNotDefault;
+144  return 
m_catalogJanitorEnabled__IsNotDefault;
 145}
-146private boolean 
m_servers__IsNotDefault;
-147// 24, 1
-148public void 
setDeadServers(SetServerName deadServers)
+146private boolean 
m_catalogJanitorEnabled__IsNotDefault;
+147// 28, 1
+148public void 
setServerManager(ServerManager serverManager)
 149{
-150  // 24, 1
-151  m_deadServers = deadServers;
-152  m_deadServers__IsNotDefault = 
true;
+150  // 28, 1
+151  m_serverManager = serverManager;
+152  m_serverManager__IsNotDefault = 
true;
 153}
-154public SetServerName 
getDeadServers()
+154public ServerManager 
getServerManager()
 155{
-156  return m_deadServers;
+156  return m_serverManager;
 157}
-158private SetServerName 
m_deadServers;
-159public boolean 
getDeadServers__IsNotDefault()
+158private 

[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html
index 6414009..2ad3a12 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html
@@ -25,42 +25,42 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.hfile;
 019
-020import 
com.google.common.annotations.VisibleForTesting;
-021import 
com.google.common.base.Preconditions;
-022
-023import java.io.DataInputStream;
-024import java.io.DataOutput;
-025import java.io.DataOutputStream;
-026import java.io.IOException;
-027import java.io.InputStream;
-028import java.nio.ByteBuffer;
-029import 
java.util.concurrent.atomic.AtomicReference;
-030
-031import org.apache.commons.logging.Log;
-032import 
org.apache.commons.logging.LogFactory;
-033import 
org.apache.hadoop.fs.FSDataInputStream;
-034import 
org.apache.hadoop.fs.FSDataOutputStream;
-035import org.apache.hadoop.fs.Path;
-036import org.apache.hadoop.hbase.Cell;
-037import 
org.apache.hadoop.hbase.HConstants;
-038import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-039import 
org.apache.hadoop.hbase.fs.HFileSystem;
-040import 
org.apache.hadoop.hbase.io.ByteArrayOutputStream;
-041import 
org.apache.hadoop.hbase.io.ByteBuffInputStream;
-042import 
org.apache.hadoop.hbase.io.ByteBufferWriterDataOutputStream;
-043import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
-044import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-045import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
-046import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultDecodingContext;
-047import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
-048import 
org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
-049import 
org.apache.hadoop.hbase.nio.ByteBuff;
-050import 
org.apache.hadoop.hbase.nio.MultiByteBuff;
-051import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
-052import 
org.apache.hadoop.hbase.util.Bytes;
-053import 
org.apache.hadoop.hbase.util.ChecksumType;
-054import 
org.apache.hadoop.hbase.util.ClassSize;
-055import org.apache.hadoop.io.IOUtils;
+020import java.io.DataInputStream;
+021import java.io.DataOutput;
+022import java.io.DataOutputStream;
+023import java.io.IOException;
+024import java.io.InputStream;
+025import java.nio.ByteBuffer;
+026import 
java.util.concurrent.atomic.AtomicReference;
+027
+028import org.apache.commons.logging.Log;
+029import 
org.apache.commons.logging.LogFactory;
+030import 
org.apache.hadoop.fs.FSDataInputStream;
+031import 
org.apache.hadoop.fs.FSDataOutputStream;
+032import org.apache.hadoop.fs.Path;
+033import org.apache.hadoop.hbase.Cell;
+034import 
org.apache.hadoop.hbase.HConstants;
+035import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+036import 
org.apache.hadoop.hbase.fs.HFileSystem;
+037import 
org.apache.hadoop.hbase.io.ByteArrayOutputStream;
+038import 
org.apache.hadoop.hbase.io.ByteBuffInputStream;
+039import 
org.apache.hadoop.hbase.io.ByteBufferWriterDataOutputStream;
+040import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
+041import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+042import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
+043import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultDecodingContext;
+044import 
org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext;
+045import 
org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
+046import 
org.apache.hadoop.hbase.nio.ByteBuff;
+047import 
org.apache.hadoop.hbase.nio.MultiByteBuff;
+048import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
+049import 
org.apache.hadoop.hbase.util.Bytes;
+050import 
org.apache.hadoop.hbase.util.ChecksumType;
+051import 
org.apache.hadoop.hbase.util.ClassSize;
+052import org.apache.hadoop.io.IOUtils;
+053
+054import 
com.google.common.annotations.VisibleForTesting;
+055import 
com.google.common.base.Preconditions;
 056
 057/**
 058 * Reads {@link HFile} version 2 blocks 
to HFiles and via {@link Cacheable} Interface to caches.
@@ -443,1645 +443,1656 @@
 435return nextBlockOnDiskSize;
 436  }
 437
-438  public BlockType getBlockType() {
-439return blockType;
-440  }
-441
-442  /** @return get data block encoding id 
that was used to encode this block */
-443  public short getDataBlockEncodingId() 
{
-444if (blockType != 
BlockType.ENCODED_DATA) {
-445  throw new 
IllegalArgumentException("Querying encoder ID of a block " +
-446  "of type other than " + 
BlockType.ENCODED_DATA + ": " + blockType);
-447}
-448return buf.getShort(headerSize());
-449  }
-450
-451  /**

[07/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/hbase-archetypes/hbase-archetype-builder/project-info.html
--
diff --git a/hbase-archetypes/hbase-archetype-builder/project-info.html 
b/hbase-archetypes/hbase-archetype-builder/project-info.html
index c3c3a47..2a64232 100644
--- a/hbase-archetypes/hbase-archetype-builder/project-info.html
+++ b/hbase-archetypes/hbase-archetype-builder/project-info.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-06-02
+Last Published: 2017-06-03
   | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Archetype builder

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/hbase-archetypes/hbase-archetype-builder/project-summary.html
--
diff --git a/hbase-archetypes/hbase-archetype-builder/project-summary.html 
b/hbase-archetypes/hbase-archetype-builder/project-summary.html
index e9e7468..5a4c43e 100644
--- a/hbase-archetypes/hbase-archetype-builder/project-summary.html
+++ b/hbase-archetypes/hbase-archetype-builder/project-summary.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-06-02
+Last Published: 2017-06-03
   | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Archetype builder

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/hbase-archetypes/hbase-archetype-builder/source-repository.html
--
diff --git a/hbase-archetypes/hbase-archetype-builder/source-repository.html 
b/hbase-archetypes/hbase-archetype-builder/source-repository.html
index 3a336d2..8d6a6a2 100644
--- a/hbase-archetypes/hbase-archetype-builder/source-repository.html
+++ b/hbase-archetypes/hbase-archetype-builder/source-repository.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-06-02
+Last Published: 2017-06-03
   | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Archetype builder

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/hbase-archetypes/hbase-archetype-builder/team-list.html
--
diff --git a/hbase-archetypes/hbase-archetype-builder/team-list.html 
b/hbase-archetypes/hbase-archetype-builder/team-list.html
index 70380bd..9dd6d4a 100644
--- a/hbase-archetypes/hbase-archetype-builder/team-list.html
+++ b/hbase-archetypes/hbase-archetype-builder/team-list.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-06-02
+Last Published: 2017-06-03
   | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Archetype builder

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/hbase-archetypes/hbase-client-project/checkstyle.html
--
diff --git a/hbase-archetypes/hbase-client-project/checkstyle.html 
b/hbase-archetypes/hbase-client-project/checkstyle.html
index 54d65ef..c885c9b 100644
--- a/hbase-archetypes/hbase-client-project/checkstyle.html
+++ b/hbase-archetypes/hbase-client-project/checkstyle.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-06-02
+Last Published: 2017-06-03
   | Version: 

[04/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/testdevapidocs/org/apache/hadoop/hbase/client/package-frame.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/client/package-frame.html 
b/testdevapidocs/org/apache/hadoop/hbase/client/package-frame.html
index e8a119b..119cc62 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/client/package-frame.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/client/package-frame.html
@@ -180,7 +180,7 @@
 TestReplicasClient.SlowMeCopro
 TestReplicationShell
 TestReplicaWithCluster
-TestReplicaWithCluster.RegionServerHostingPrimayMetaRegionSlowCopro
+TestReplicaWithCluster.RegionServerHostingPrimayMetaRegionSlowOrStopCopro
 TestReplicaWithCluster.RegionServerStoppedCopro
 TestReplicaWithCluster.SlowMeCopro
 TestRestoreSnapshotFromClient

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/testdevapidocs/org/apache/hadoop/hbase/client/package-summary.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/client/package-summary.html 
b/testdevapidocs/org/apache/hadoop/hbase/client/package-summary.html
index b02afaa..7979e2e 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/client/package-summary.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/client/package-summary.html
@@ -872,7 +872,7 @@
 
 
 
-TestReplicaWithCluster.RegionServerHostingPrimayMetaRegionSlowCopro
+TestReplicaWithCluster.RegionServerHostingPrimayMetaRegionSlowOrStopCopro
 
 This copro is used to slow down the primary meta region 
scan a bit
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/testdevapidocs/org/apache/hadoop/hbase/client/package-tree.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/client/package-tree.html 
b/testdevapidocs/org/apache/hadoop/hbase/client/package-tree.html
index b2f30f5..52ed8e3 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/client/package-tree.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/client/package-tree.html
@@ -386,7 +386,7 @@
 org.apache.hadoop.hbase.client.TestReplicasClient
 org.apache.hadoop.hbase.client.TestReplicasClient.SlowMeCopro (implements 
org.apache.hadoop.hbase.coprocessor.RegionObserver)
 org.apache.hadoop.hbase.client.TestReplicaWithCluster
-org.apache.hadoop.hbase.client.TestReplicaWithCluster.RegionServerHostingPrimayMetaRegionSlowCopro
 (implements org.apache.hadoop.hbase.coprocessor.RegionObserver)
+org.apache.hadoop.hbase.client.TestReplicaWithCluster.RegionServerHostingPrimayMetaRegionSlowOrStopCopro
 (implements org.apache.hadoop.hbase.coprocessor.RegionObserver)
 org.apache.hadoop.hbase.client.TestReplicaWithCluster.RegionServerStoppedCopro 
(implements org.apache.hadoop.hbase.coprocessor.RegionObserver)
 org.apache.hadoop.hbase.client.TestReplicaWithCluster.SlowMeCopro (implements 
org.apache.hadoop.hbase.coprocessor.RegionObserver)
 org.apache.hadoop.hbase.client.TestRestoreSnapshotFromClient

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/testdevapidocs/org/apache/hadoop/hbase/io/TestImmutableBytesWritable.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/io/TestImmutableBytesWritable.html 
b/testdevapidocs/org/apache/hadoop/hbase/io/TestImmutableBytesWritable.html
index 3bedf11..963c4d9 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/io/TestImmutableBytesWritable.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/io/TestImmutableBytesWritable.html
@@ -50,7 +50,7 @@ var activeTableTab = "activeTableTab";
 
 
 PrevClass
-NextClass
+NextClass
 
 
 Frames
@@ -358,7 +358,7 @@ extends junit.framework.TestCase
 
 
 PrevClass
-NextClass
+NextClass
 
 
 Frames

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/testdevapidocs/org/apache/hadoop/hbase/io/TestMetricsIO.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/io/TestMetricsIO.html 
b/testdevapidocs/org/apache/hadoop/hbase/io/TestMetricsIO.html
new file mode 100644
index 000..0f66059
--- /dev/null
+++ b/testdevapidocs/org/apache/hadoop/hbase/io/TestMetricsIO.html
@@ -0,0 +1,308 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+TestMetricsIO (Apache HBase 2.0.0-SNAPSHOT Test API)
+
+
+
+
+
+var methods = {"i0":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation 

[11/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.html
index 5d648ff..fc59087 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.html
@@ -227,622 +227,626 @@
 219  throws IOException {
 220
trailer.setFileInfoOffset(outputStream.getPos());
 221finishFileInfo();
-222fileInfo.write(out);
-223  }
-224
-225  /**
-226   * Checks that the given Cell's key 
does not violate the key order.
-227   *
-228   * @param cell Cell whose key to 
check.
-229   * @return true if the key is 
duplicate
-230   * @throws IOException if the key or 
the key order is wrong
-231   */
-232  protected boolean checkKey(final Cell 
cell) throws IOException {
-233boolean isDuplicateKey = false;
-234
-235if (cell == null) {
-236  throw new IOException("Key cannot 
be null or empty");
-237}
-238if (lastCell != null) {
-239  int keyComp = 
comparator.compareKeyIgnoresMvcc(lastCell, cell);
-240
-241  if (keyComp  0) {
-242throw new IOException("Added a 
key not lexically larger than"
-243+ " previous. Current cell = 
" + cell + ", lastCell = " + lastCell);
-244  } else if (keyComp == 0) {
-245isDuplicateKey = true;
-246  }
-247}
-248return isDuplicateKey;
-249  }
-250
-251  /** Checks the given value for 
validity. */
-252  protected void checkValue(final byte[] 
value, final int offset,
-253  final int length) throws 
IOException {
-254if (value == null) {
-255  throw new IOException("Value cannot 
be null");
-256}
-257  }
-258
-259  /**
-260   * @return Path or null if we were 
passed a stream rather than a Path.
-261   */
-262  @Override
-263  public Path getPath() {
-264return path;
-265  }
-266
-267  @Override
-268  public String toString() {
-269return "writer=" + (path != null ? 
path.toString() : null) + ", name="
-270+ name + ", compression=" + 
hFileContext.getCompression().getName();
-271  }
-272
-273  public static Compression.Algorithm 
compressionByName(String algoName) {
-274if (algoName == null)
-275  return 
HFile.DEFAULT_COMPRESSION_ALGORITHM;
-276return 
Compression.getCompressionAlgorithmByName(algoName);
-277  }
-278
-279  /** A helper method to create HFile 
output streams in constructors */
-280  protected static FSDataOutputStream 
createOutputStream(Configuration conf,
-281  FileSystem fs, Path path, 
InetSocketAddress[] favoredNodes) throws IOException {
-282FsPermission perms = 
FSUtils.getFilePermissions(fs, conf,
-283
HConstants.DATA_FILE_UMASK_KEY);
-284return FSUtils.create(conf, fs, path, 
perms, favoredNodes);
-285  }
-286
-287  /** Additional initialization steps 
*/
-288  protected void finishInit(final 
Configuration conf) {
-289if (blockWriter != null) {
-290  throw new 
IllegalStateException("finishInit called twice");
-291}
-292
-293blockWriter = new 
HFileBlock.Writer(blockEncoder, hFileContext);
+222long startTime = 
System.currentTimeMillis();
+223fileInfo.write(out);
+224
HFile.updateWriteLatency(System.currentTimeMillis() - startTime);
+225  }
+226
+227  /**
+228   * Checks that the given Cell's key 
does not violate the key order.
+229   *
+230   * @param cell Cell whose key to 
check.
+231   * @return true if the key is 
duplicate
+232   * @throws IOException if the key or 
the key order is wrong
+233   */
+234  protected boolean checkKey(final Cell 
cell) throws IOException {
+235boolean isDuplicateKey = false;
+236
+237if (cell == null) {
+238  throw new IOException("Key cannot 
be null or empty");
+239}
+240if (lastCell != null) {
+241  int keyComp = 
comparator.compareKeyIgnoresMvcc(lastCell, cell);
+242
+243  if (keyComp  0) {
+244throw new IOException("Added a 
key not lexically larger than"
+245+ " previous. Current cell = 
" + cell + ", lastCell = " + lastCell);
+246  } else if (keyComp == 0) {
+247isDuplicateKey = true;
+248  }
+249}
+250return isDuplicateKey;
+251  }
+252
+253  /** Checks the given value for 
validity. */
+254  protected void checkValue(final byte[] 
value, final int offset,
+255  final int length) throws 
IOException {
+256if (value == null) {
+257  throw new IOException("Value cannot 
be null");
+258}
+259  }
+260
+261  /**
+262   * @return Path or null if we were 
passed a stream rather than a Path.
+263   */
+264  @Override
+265  public Path getPath() {
+266return path;
+267  }
+268
+269  @Override
+270  public String toString() {
+271return "writer=" + (path != null ? 

[12/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.BlockIndexNotLoadedException.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.BlockIndexNotLoadedException.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.BlockIndexNotLoadedException.html
index 7e5781e..5011f32 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.BlockIndexNotLoadedException.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.BlockIndexNotLoadedException.html
@@ -1403,7 +1403,7 @@
 1395// Cache Miss, please load.
 1396  }
 1397
-1398  HFileBlock metaBlock = 
fsBlockReader.readBlockData(metaBlockOffset, blockSize, true).
+1398  HFileBlock metaBlock = 
fsBlockReader.readBlockData(metaBlockOffset, blockSize, true, false).
 1399  unpack(hfileContext, 
fsBlockReader);
 1400
 1401  // Cache the block
@@ -1491,7 +1491,7 @@
 1483}
 1484// Load block from filesystem.
 1485HFileBlock hfileBlock =
-1486
fsBlockReader.readBlockData(dataBlockOffset, onDiskBlockSize, pread);
+1486
fsBlockReader.readBlockData(dataBlockOffset, onDiskBlockSize, pread, 
!isCompaction);
 1487validateBlockType(hfileBlock, 
expectedBlockType);
 1488HFileBlock unpacked = 
hfileBlock.unpack(hfileContext, fsBlockReader);
 1489BlockType.BlockCategory category 
= hfileBlock.getBlockType().getCategory();

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.EncodedScanner.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.EncodedScanner.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.EncodedScanner.html
index 7e5781e..5011f32 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.EncodedScanner.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.EncodedScanner.html
@@ -1403,7 +1403,7 @@
 1395// Cache Miss, please load.
 1396  }
 1397
-1398  HFileBlock metaBlock = 
fsBlockReader.readBlockData(metaBlockOffset, blockSize, true).
+1398  HFileBlock metaBlock = 
fsBlockReader.readBlockData(metaBlockOffset, blockSize, true, false).
 1399  unpack(hfileContext, 
fsBlockReader);
 1400
 1401  // Cache the block
@@ -1491,7 +1491,7 @@
 1483}
 1484// Load block from filesystem.
 1485HFileBlock hfileBlock =
-1486
fsBlockReader.readBlockData(dataBlockOffset, onDiskBlockSize, pread);
+1486
fsBlockReader.readBlockData(dataBlockOffset, onDiskBlockSize, pread, 
!isCompaction);
 1487validateBlockType(hfileBlock, 
expectedBlockType);
 1488HFileBlock unpacked = 
hfileBlock.unpack(hfileContext, fsBlockReader);
 1489BlockType.BlockCategory category 
= hfileBlock.getBlockType().getCategory();

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.HFileScannerImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.HFileScannerImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.HFileScannerImpl.html
index 7e5781e..5011f32 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.HFileScannerImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.HFileScannerImpl.html
@@ -1403,7 +1403,7 @@
 1395// Cache Miss, please load.
 1396  }
 1397
-1398  HFileBlock metaBlock = 
fsBlockReader.readBlockData(metaBlockOffset, blockSize, true).
+1398  HFileBlock metaBlock = 
fsBlockReader.readBlockData(metaBlockOffset, blockSize, true, false).
 1399  unpack(hfileContext, 
fsBlockReader);
 1400
 1401  // Cache the block
@@ -1491,7 +1491,7 @@
 1483}
 1484// Load block from filesystem.
 1485HFileBlock hfileBlock =
-1486
fsBlockReader.readBlockData(dataBlockOffset, onDiskBlockSize, pread);
+1486
fsBlockReader.readBlockData(dataBlockOffset, onDiskBlockSize, pread, 
!isCompaction);
 1487validateBlockType(hfileBlock, 
expectedBlockType);
 1488HFileBlock unpacked = 
hfileBlock.unpack(hfileContext, fsBlockReader);
 1489BlockType.BlockCategory category 
= hfileBlock.getBlockType().getCategory();

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.NotSeekedException.html

[06/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/hbase-spark/dependencies.html
--
diff --git a/hbase-spark/dependencies.html b/hbase-spark/dependencies.html
index 4b5f368..ce1c24c 100644
--- a/hbase-spark/dependencies.html
+++ b/hbase-spark/dependencies.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-06-02
+Last Published: 2017-06-03
   | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Spark
@@ -4775,7 +4775,7 @@ The following provides more details on the included 
cryptographic software:
 -
 
 hbase-server-2.0.0-SNAPSHOT-tests.jar
-7.88 MB
+7.89 MB
 -
 -
 -
@@ -5594,7 +5594,7 @@ The following provides more details on the included 
cryptographic software:
 Sealed
 
 206
-131.89 MB
+131.90 MB
 76,781
 70,912
 2,235
@@ -5612,7 +5612,7 @@ The following provides more details on the included 
cryptographic software:
 compile: 1
 
 test: 17
-test: 25.98 MB
+test: 25.99 MB
 test: 6,272
 test: 5,648
 test: 152

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/hbase-spark/dependency-convergence.html
--
diff --git a/hbase-spark/dependency-convergence.html 
b/hbase-spark/dependency-convergence.html
index b1a8c92..ec6d3d1 100644
--- a/hbase-spark/dependency-convergence.html
+++ b/hbase-spark/dependency-convergence.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-06-02
+Last Published: 2017-06-03
   | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Spark

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/hbase-spark/dependency-info.html
--
diff --git a/hbase-spark/dependency-info.html b/hbase-spark/dependency-info.html
index 6e30540..76653d5 100644
--- a/hbase-spark/dependency-info.html
+++ b/hbase-spark/dependency-info.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-06-02
+Last Published: 2017-06-03
   | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Spark

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/hbase-spark/dependency-management.html
--
diff --git a/hbase-spark/dependency-management.html 
b/hbase-spark/dependency-management.html
index 24b0156..c7818e5 100644
--- a/hbase-spark/dependency-management.html
+++ b/hbase-spark/dependency-management.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-06-02
+Last Published: 2017-06-03
   | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Spark

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/hbase-spark/index.html
--
diff --git a/hbase-spark/index.html b/hbase-spark/index.html
index 001f662..497b2dc 100644
--- a/hbase-spark/index.html
+++ b/hbase-spark/index.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-06-02
+Last Published: 2017-06-03
   | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Spark


[10/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactory.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactory.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactory.html
index d236b53..145c30b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactory.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactory.html
@@ -26,49 +26,59 @@
 018
 019package 
org.apache.hadoop.hbase.regionserver;
 020
-021/**
-022 * Interface of a factory to create 
Metrics Sources used inside of regionservers.
-023 */
-024public interface 
MetricsRegionServerSourceFactory {
-025
-026  /**
-027   * Given a wrapper create a 
MetricsRegionServerSource.
-028   *
-029   * @param regionServerWrapper The 
wrapped region server
-030   * @return a Metrics Source.
-031   */
-032  MetricsRegionServerSource 
createServer(MetricsRegionServerWrapper regionServerWrapper);
-033
-034  /**
-035   * Create a MetricsRegionSource from a 
MetricsRegionWrapper.
-036   *
-037   * @param wrapper The wrapped region
-038   * @return A metrics region source
-039   */
-040  MetricsRegionSource 
createRegion(MetricsRegionWrapper wrapper);
-041
-042  /**
-043   * Create a MetricsTableSource from a 
MetricsTableWrapper.
-044   *
-045   * @param table The table name
-046   * @param wrapper The wrapped table 
aggregate
-047   * @return A metrics table source
-048   */
-049  MetricsTableSource createTable(String 
table, MetricsTableWrapperAggregate wrapper);
-050
-051  /**
-052   * Get a MetricsTableAggregateSource
-053   *
-054   * @return A metrics table aggregate 
source
-055   */
-056  MetricsTableAggregateSource 
getTableAggregate();
-057
-058  /**
-059   * Get a 
MetricsHeapMemoryManagerSource
-060   * @return A metrics heap memory 
manager source
-061   */
-062  MetricsHeapMemoryManagerSource 
getHeapMemoryManager();
-063}
+021import 
org.apache.hadoop.hbase.io.MetricsIOSource;
+022import 
org.apache.hadoop.hbase.io.MetricsIOWrapper;
+023
+024/**
+025 * Interface of a factory to create 
Metrics Sources used inside of regionservers.
+026 */
+027public interface 
MetricsRegionServerSourceFactory {
+028
+029  /**
+030   * Given a wrapper create a 
MetricsRegionServerSource.
+031   *
+032   * @param regionServerWrapper The 
wrapped region server
+033   * @return a Metrics Source.
+034   */
+035  MetricsRegionServerSource 
createServer(MetricsRegionServerWrapper regionServerWrapper);
+036
+037  /**
+038   * Create a MetricsRegionSource from a 
MetricsRegionWrapper.
+039   *
+040   * @param wrapper The wrapped region
+041   * @return A metrics region source
+042   */
+043  MetricsRegionSource 
createRegion(MetricsRegionWrapper wrapper);
+044
+045  /**
+046   * Create a MetricsTableSource from a 
MetricsTableWrapper.
+047   *
+048   * @param table The table name
+049   * @param wrapper The wrapped table 
aggregate
+050   * @return A metrics table source
+051   */
+052  MetricsTableSource createTable(String 
table, MetricsTableWrapperAggregate wrapper);
+053
+054  /**
+055   * Get a MetricsTableAggregateSource
+056   *
+057   * @return A metrics table aggregate 
source
+058   */
+059  MetricsTableAggregateSource 
getTableAggregate();
+060
+061  /**
+062   * Get a 
MetricsHeapMemoryManagerSource
+063   * @return A metrics heap memory 
manager source
+064   */
+065  MetricsHeapMemoryManagerSource 
getHeapMemoryManager();
+066
+067  /**
+068   * Create a MetricsIOSource from a 
MetricsIOWrapper.
+069   *
+070   * @return A metrics IO source
+071   */
+072  MetricsIOSource 
createIO(MetricsIOWrapper wrapper);
+073}
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.FactoryStorage.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.FactoryStorage.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.FactoryStorage.html
index bc1fd0e..a082014 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.FactoryStorage.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.FactoryStorage.html
@@ -26,64 +26,71 @@
 018package 
org.apache.hadoop.hbase.regionserver;
 019
 020import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-021
-022/**
-023 * Factory to create 
MetricsRegionServerSource when given a  MetricsRegionServerWrapper
-024 */
-025@InterfaceAudience.Private
-026public class 
MetricsRegionServerSourceFactoryImpl 

[08/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.Intf.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.Intf.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.Intf.html
index e85e810..6817483 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.Intf.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.Intf.html
@@ -33,10 +33,10 @@
 025  requiredArguments = {
 026@org.jamon.annotations.Argument(name 
= "regionServer", type = "HRegionServer")},
 027  optionalArguments = {
-028@org.jamon.annotations.Argument(name 
= "bcv", type = "String"),
+028@org.jamon.annotations.Argument(name 
= "format", type = "String"),
 029@org.jamon.annotations.Argument(name 
= "filter", type = "String"),
-030@org.jamon.annotations.Argument(name 
= "format", type = "String"),
-031@org.jamon.annotations.Argument(name 
= "bcn", type = "String")})
+030@org.jamon.annotations.Argument(name 
= "bcn", type = "String"),
+031@org.jamon.annotations.Argument(name 
= "bcv", type = "String")})
 032public class RSStatusTmpl
 033  extends 
org.jamon.AbstractTemplateProxy
 034{
@@ -77,23 +77,23 @@
 069  return m_regionServer;
 070}
 071private HRegionServer 
m_regionServer;
-072// 24, 1
-073public void setBcv(String bcv)
+072// 22, 1
+073public void setFormat(String 
format)
 074{
-075  // 24, 1
-076  m_bcv = bcv;
-077  m_bcv__IsNotDefault = true;
+075  // 22, 1
+076  m_format = format;
+077  m_format__IsNotDefault = true;
 078}
-079public String getBcv()
+079public String getFormat()
 080{
-081  return m_bcv;
+081  return m_format;
 082}
-083private String m_bcv;
-084public boolean 
getBcv__IsNotDefault()
+083private String m_format;
+084public boolean 
getFormat__IsNotDefault()
 085{
-086  return m_bcv__IsNotDefault;
+086  return m_format__IsNotDefault;
 087}
-088private boolean 
m_bcv__IsNotDefault;
+088private boolean 
m_format__IsNotDefault;
 089// 21, 1
 090public void setFilter(String 
filter)
 091{
@@ -111,40 +111,40 @@
 103  return m_filter__IsNotDefault;
 104}
 105private boolean 
m_filter__IsNotDefault;
-106// 22, 1
-107public void setFormat(String 
format)
+106// 23, 1
+107public void setBcn(String bcn)
 108{
-109  // 22, 1
-110  m_format = format;
-111  m_format__IsNotDefault = true;
+109  // 23, 1
+110  m_bcn = bcn;
+111  m_bcn__IsNotDefault = true;
 112}
-113public String getFormat()
+113public String getBcn()
 114{
-115  return m_format;
+115  return m_bcn;
 116}
-117private String m_format;
-118public boolean 
getFormat__IsNotDefault()
+117private String m_bcn;
+118public boolean 
getBcn__IsNotDefault()
 119{
-120  return m_format__IsNotDefault;
+120  return m_bcn__IsNotDefault;
 121}
-122private boolean 
m_format__IsNotDefault;
-123// 23, 1
-124public void setBcn(String bcn)
+122private boolean 
m_bcn__IsNotDefault;
+123// 24, 1
+124public void setBcv(String bcv)
 125{
-126  // 23, 1
-127  m_bcn = bcn;
-128  m_bcn__IsNotDefault = true;
+126  // 24, 1
+127  m_bcv = bcv;
+128  m_bcv__IsNotDefault = true;
 129}
-130public String getBcn()
+130public String getBcv()
 131{
-132  return m_bcn;
+132  return m_bcv;
 133}
-134private String m_bcn;
-135public boolean 
getBcn__IsNotDefault()
+134private String m_bcv;
+135public boolean 
getBcv__IsNotDefault()
 136{
-137  return m_bcn__IsNotDefault;
+137  return m_bcv__IsNotDefault;
 138}
-139private boolean 
m_bcn__IsNotDefault;
+139private boolean 
m_bcv__IsNotDefault;
 140  }
 141  @Override
 142  protected 
org.jamon.AbstractTemplateProxy.ImplData makeImplData()
@@ -156,10 +156,10 @@
 148return (ImplData) 
super.getImplData();
 149  }
 150  
-151  protected String bcv;
-152  public final 
org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl setBcv(String p_bcv)
+151  protected String format;
+152  public final 
org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl setFormat(String 
p_format)
 153  {
-154(getImplData()).setBcv(p_bcv);
+154
(getImplData()).setFormat(p_format);
 155return this;
 156  }
 157  
@@ -170,17 +170,17 @@
 162return this;
 163  }
 164  
-165  protected String format;
-166  public final 
org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl setFormat(String 
p_format)
+165  protected String bcn;
+166  public final 
org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl setBcn(String p_bcn)
 167  {
-168
(getImplData()).setFormat(p_format);

[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 3b85ae9b2 -> c9d354248


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestReplicaWithCluster.RegionServerStoppedCopro.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestReplicaWithCluster.RegionServerStoppedCopro.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestReplicaWithCluster.RegionServerStoppedCopro.html
index ebf0532..fd80980 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestReplicaWithCluster.RegionServerStoppedCopro.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestReplicaWithCluster.RegionServerStoppedCopro.html
@@ -48,493 +48,493 @@
 040import 
org.apache.hadoop.hbase.HConstants;
 041import 
org.apache.hadoop.hbase.HTableDescriptor;
 042import 
org.apache.hadoop.hbase.RegionLocations;
-043import org.apache.hadoop.hbase.Waiter;
-044
-045import 
org.apache.hadoop.hbase.client.replication.ReplicationAdmin;
-046import 
org.apache.hadoop.hbase.coprocessor.ObserverContext;
-047import 
org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-048import 
org.apache.hadoop.hbase.coprocessor.RegionObserver;
-049import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-050import 
org.apache.hadoop.hbase.regionserver.RegionScanner;
-051import 
org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
-052import 
org.apache.hadoop.hbase.regionserver.StorefileRefresherChore;
-053import 
org.apache.hadoop.hbase.regionserver.TestHRegionServerBulkLoad;
-054import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-055import 
org.apache.hadoop.hbase.testclassification.ClientTests;
-056import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-057import 
org.apache.hadoop.hbase.util.Bytes;
-058import 
org.apache.hadoop.hbase.util.Pair;
-059import 
org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
-060import org.junit.AfterClass;
-061import org.junit.Assert;
-062import org.junit.BeforeClass;
-063import org.junit.Test;
-064import 
org.junit.experimental.categories.Category;
-065
-066@Category({MediumTests.class, 
ClientTests.class})
-067public class TestReplicaWithCluster {
-068  private static final Log LOG = 
LogFactory.getLog(TestReplicaWithCluster.class);
-069
-070  private static final int NB_SERVERS = 
3;
-071  private static final byte[] row = 
TestReplicaWithCluster.class.getName().getBytes();
-072  private static final 
HBaseTestingUtility HTU = new HBaseTestingUtility();
-073
-074  // second minicluster used in testing 
of replication
-075  private static HBaseTestingUtility 
HTU2;
-076  private static final byte[] f = 
HConstants.CATALOG_FAMILY;
-077
-078  private final static int REFRESH_PERIOD 
= 1000;
-079  private final static int 
META_SCAN_TIMEOUT_IN_MILLISEC = 200;
-080
-081  /**
-082   * This copro is used to synchronize 
the tests.
-083   */
-084  public static class SlowMeCopro 
implements RegionObserver {
-085static final AtomicLong sleepTime = 
new AtomicLong(0);
-086static final 
AtomicReferenceCountDownLatch cdl = new AtomicReference(new 
CountDownLatch(0));
-087
-088public SlowMeCopro() {
-089}
-090
-091@Override
-092public void preGetOp(final 
ObserverContextRegionCoprocessorEnvironment e,
-093 final Get get, 
final ListCell results) throws IOException {
-094
-095  if 
(e.getEnvironment().getRegion().getRegionInfo().getReplicaId() == 0) {
-096CountDownLatch latch = 
cdl.get();
-097try {
-098  if (sleepTime.get()  0) {
-099LOG.info("Sleeping for " + 
sleepTime.get() + " ms");
-100
Thread.sleep(sleepTime.get());
-101  } else if (latch.getCount() 
 0) {
-102LOG.info("Waiting for the 
counterCountDownLatch");
-103latch.await(2, 
TimeUnit.MINUTES); // To help the tests to finish.
-104if (latch.getCount()  0) 
{
-105  throw new 
RuntimeException("Can't wait more");
-106}
-107  }
-108} catch (InterruptedException e1) 
{
-109  LOG.error(e1);
-110}
-111  } else {
-112LOG.info("We're not the primary 
replicas.");
-113  }
-114}
-115  }
-116
-117  /**
-118   * This copro is used to simulate 
region server down exception for Get and Scan
-119   */
-120  public static class 
RegionServerStoppedCopro implements RegionObserver {
-121
-122public RegionServerStoppedCopro() {
-123}
-124
-125@Override
-126public void preGetOp(final 
ObserverContextRegionCoprocessorEnvironment e,
-127final Get get, final 
ListCell results) throws IOException {
-128
-129  int replicaId = 
e.getEnvironment().getRegion().getRegionInfo().getReplicaId();
+043import 
org.apache.hadoop.hbase.ServerName;
+044import 
org.apache.hadoop.hbase.TableName;

[05/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/testdevapidocs/org/apache/hadoop/hbase/client/TestReplicaWithCluster.RegionServerHostingPrimayMetaRegionSlowCopro.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestReplicaWithCluster.RegionServerHostingPrimayMetaRegionSlowCopro.html
 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestReplicaWithCluster.RegionServerHostingPrimayMetaRegionSlowCopro.html
deleted file mode 100644
index 6f2c479..000
--- 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestReplicaWithCluster.RegionServerHostingPrimayMetaRegionSlowCopro.html
+++ /dev/null
@@ -1,372 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-
-
-
-TestReplicaWithCluster.RegionServerHostingPrimayMetaRegionSlowCopro 
(Apache HBase 2.0.0-SNAPSHOT Test API)
-
-
-
-
-
-var methods = {"i0":10};
-var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
-var altColor = "altColor";
-var rowColor = "rowColor";
-var tableTab = "tableTab";
-var activeTableTab = "activeTableTab";
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-PrevClass
-NextClass
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-Summary:
-Nested|
-Field|
-Constr|
-Method
-
-
-Detail:
-Field|
-Constr|
-Method
-
-
-
-
-
-
-
-
-org.apache.hadoop.hbase.client
-Class 
TestReplicaWithCluster.RegionServerHostingPrimayMetaRegionSlowCopro
-
-
-
-http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
-
-
-org.apache.hadoop.hbase.client.TestReplicaWithCluster.RegionServerHostingPrimayMetaRegionSlowCopro
-
-
-
-
-
-
-
-All Implemented Interfaces:
-org.apache.hadoop.hbase.Coprocessor, 
org.apache.hadoop.hbase.coprocessor.RegionObserver
-
-
-Enclosing class:
-TestReplicaWithCluster
-
-
-
-public static class TestReplicaWithCluster.RegionServerHostingPrimayMetaRegionSlowCopro
-extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
-implements org.apache.hadoop.hbase.coprocessor.RegionObserver
-This copro is used to slow down the primary meta region 
scan a bit
-
-
-
-
-
-
-
-
-
-
-
-Nested Class Summary
-
-
-
-
-Nested classes/interfaces inherited from 
interfaceorg.apache.hadoop.hbase.coprocessor.RegionObserver
-org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType
-
-
-
-
-
-Nested classes/interfaces inherited from 
interfaceorg.apache.hadoop.hbase.Coprocessor
-org.apache.hadoop.hbase.Coprocessor.State
-
-
-
-
-
-
-
-
-Field Summary
-
-Fields
-
-Modifier and Type
-Field and Description
-
-
-(package private) static boolean
-slowDownPrimaryMetaScan
-
-
-
-
-
-
-Fields inherited from 
interfaceorg.apache.hadoop.hbase.Coprocessor
-PRIORITY_HIGHEST, PRIORITY_LOWEST, PRIORITY_SYSTEM, PRIORITY_USER, 
VERSION
-
-
-
-
-
-
-
-
-Constructor Summary
-
-Constructors
-
-Constructor and Description
-
-
-RegionServerHostingPrimayMetaRegionSlowCopro()
-
-
-
-
-
-
-
-
-
-Method Summary
-
-All MethodsInstance MethodsConcrete Methods
-
-Modifier and Type
-Method and Description
-
-
-org.apache.hadoop.hbase.regionserver.RegionScanner
-preScannerOpen(org.apache.hadoop.hbase.coprocessor.ObserverContextorg.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironmente,
-  org.apache.hadoop.hbase.client.Scanscan,
-  
org.apache.hadoop.hbase.regionserver.RegionScanners)
-
-
-
-
-
-
-Methods inherited from classjava.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
-http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
 title="class or interface in java.lang">hashCode,