[08/30] hbase-site git commit: Published site at 931156f66b1decc19d89f8bb3ce9e5f355fb4fb2.

2018-10-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b8b907f/testdevapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html 
b/testdevapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
index 85db2da..a8efa41 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
@@ -124,7 +124,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public class HBaseTestingUtility
+public class HBaseTestingUtility
 extends HBaseZKTestingUtility
 Facility for testing HBase. Replacement for
  old HBaseTestCase and HBaseClusterTestCase functionality.
@@ -140,8 +140,7 @@ extends To preserve test data directories, pass the system property 
"hbase.testing.preserve.testdir"
- setting it to true.
- Trigger pre commit.
+ setting it to true.
 
 
 
@@ -2019,7 +2018,7 @@ extends 
 TEST_DIRECTORY_KEY
 https://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true;
 title="class or interface in java.lang">@Deprecated
-private static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String TEST_DIRECTORY_KEY
+private static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String TEST_DIRECTORY_KEY
 Deprecated.can be used only with mini dfs
 System property key to get test directory value. Name is as 
it is because mini dfs has
  hard-codings to put test data here. It should NOT be used directly in HBase, 
as it's a property
@@ -2036,7 +2035,7 @@ private static finalhttps://docs.oracle.com/javase/8/docs/api/jav
 
 
 REGIONS_PER_SERVER_KEY
-public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String REGIONS_PER_SERVER_KEY
+public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String REGIONS_PER_SERVER_KEY
 
 See Also:
 Constant
 Field Values
@@ -2049,7 +2048,7 @@ private static finalhttps://docs.oracle.com/javase/8/docs/api/jav
 
 
 DEFAULT_REGIONS_PER_SERVER
-public static finalint DEFAULT_REGIONS_PER_SERVER
+public static finalint DEFAULT_REGIONS_PER_SERVER
 The default number of regions per regionserver when 
creating a pre-split
  table.
 
@@ -2064,7 +2063,7 @@ private static finalhttps://docs.oracle.com/javase/8/docs/api/jav
 
 
 PRESPLIT_TEST_TABLE_KEY
-public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String PRESPLIT_TEST_TABLE_KEY
+public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String PRESPLIT_TEST_TABLE_KEY
 
 See Also:
 Constant
 Field Values
@@ -2077,7 +2076,7 @@ private static finalhttps://docs.oracle.com/javase/8/docs/api/jav
 
 
 PRESPLIT_TEST_TABLE
-public static finalboolean PRESPLIT_TEST_TABLE
+public static finalboolean PRESPLIT_TEST_TABLE
 
 See Also:
 Constant
 Field Values
@@ -2090,7 +2089,7 @@ private static finalhttps://docs.oracle.com/javase/8/docs/api/jav
 
 
 dfsCluster
-privateorg.apache.hadoop.hdfs.MiniDFSCluster dfsCluster
+privateorg.apache.hadoop.hdfs.MiniDFSCluster dfsCluster
 
 
 
@@ -2099,7 +2098,7 @@ private static finalhttps://docs.oracle.com/javase/8/docs/api/jav
 
 
 hbaseCluster
-private volatileHBaseCluster hbaseCluster
+private volatileHBaseCluster hbaseCluster
 
 
 
@@ -2108,7 +2107,7 @@ private static finalhttps://docs.oracle.com/javase/8/docs/api/jav
 
 
 mrCluster
-privateorg.apache.hadoop.mapred.MiniMRCluster mrCluster
+privateorg.apache.hadoop.mapred.MiniMRCluster mrCluster
 
 
 
@@ -2117,7 +2116,7 @@ private static finalhttps://docs.oracle.com/javase/8/docs/api/jav
 
 
 miniClusterRunning
-private volatileboolean miniClusterRunning
+private volatileboolean miniClusterRunning
 If there is a mini cluster running for this testing utility 
instance.
 
 
@@ -2127,7 +2126,7 @@ private static finalhttps://docs.oracle.com/javase/8/docs/api/jav
 
 
 hadoopLogDir
-privatehttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String hadoopLogDir
+privatehttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String hadoopLogDir
 
 
 
@@ -2136,7 +2135,7 @@ private static finalhttps://docs.oracle.com/javase/8/docs/api/jav
 
 
 dataTestDirOnTestFS
-privateorg.apache.hadoop.fs.Path dataTestDirOnTestFS
+privateorg.apache.hadoop.fs.Path dataTestDirOnTestFS
 Directory on test filesystem where we put the data for this 
instance of
  HBaseTestingUtility
 
@@ -2147,7 +2146,7 @@ 

[08/30] hbase-site git commit: Published site at a8e184dc77470bdf9d62e19c5d36bc1de7cf4c6d.

2018-07-31 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5fd895c6/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
index 1ad6fc7..99b6bf2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
@@ -54,1964 +54,1980 @@
 046import 
java.util.concurrent.atomic.AtomicInteger;
 047import 
java.util.concurrent.locks.ReentrantLock;
 048import 
org.apache.hadoop.conf.Configuration;
-049import 
org.apache.hadoop.hbase.CallQueueTooBigException;
-050import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-051import 
org.apache.hadoop.hbase.HConstants;
-052import 
org.apache.hadoop.hbase.HRegionLocation;
-053import 
org.apache.hadoop.hbase.MasterNotRunningException;
-054import 
org.apache.hadoop.hbase.MetaTableAccessor;
-055import 
org.apache.hadoop.hbase.RegionLocations;
-056import 
org.apache.hadoop.hbase.ServerName;
-057import 
org.apache.hadoop.hbase.TableName;
-058import 
org.apache.hadoop.hbase.TableNotEnabledException;
-059import 
org.apache.hadoop.hbase.TableNotFoundException;
-060import 
org.apache.hadoop.hbase.ZooKeeperConnectionException;
-061import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-062import 
org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
-063import 
org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicyFactory;
-064import 
org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil;
-065import 
org.apache.hadoop.hbase.exceptions.RegionMovedException;
-066import 
org.apache.hadoop.hbase.ipc.RpcClient;
-067import 
org.apache.hadoop.hbase.ipc.RpcClientFactory;
-068import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-069import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-070import 
org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
-071import 
org.apache.hadoop.hbase.security.User;
-072import 
org.apache.hadoop.hbase.util.Bytes;
-073import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-074import 
org.apache.hadoop.hbase.util.ExceptionUtil;
-075import 
org.apache.hadoop.hbase.util.Pair;
-076import 
org.apache.hadoop.hbase.util.ReflectionUtils;
-077import 
org.apache.hadoop.hbase.util.Threads;
-078import 
org.apache.hadoop.ipc.RemoteException;
-079import 
org.apache.yetus.audience.InterfaceAudience;
-080import 
org.apache.zookeeper.KeeperException;
-081import org.slf4j.Logger;
-082import org.slf4j.LoggerFactory;
-083
-084import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-085import 
org.apache.hbase.thirdparty.com.google.common.base.Throwables;
-086import 
org.apache.hbase.thirdparty.com.google.protobuf.BlockingRpcChannel;
-087import 
org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
-088import 
org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService.BlockingInterface;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersRequest;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersResponse;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersRequest;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersResponse;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeRequest;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeResponse;
-105import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerRequest;
-106import 

[08/30] hbase-site git commit: Published site at 59867eeeebd28fcc49f338ef36769fb6a9bff4dc.

2018-07-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/67e3bccd/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
index 6cd6a17..85dd23b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
@@ -2119,6526 +2119,6532 @@
 2111  }
 2112
 2113  status = 
TaskMonitor.get().createStatus("Compacting " + store + " in " + this);
-2114  if (this.closed.get()) {
-2115String msg = "Skipping 
compaction on " + this + " because closed";
-2116LOG.debug(msg);
-2117status.abort(msg);
-2118return false;
-2119  }
-2120  boolean wasStateSet = false;
-2121  try {
-2122synchronized (writestate) {
-2123  if (writestate.writesEnabled) 
{
-2124wasStateSet = true;
-2125
writestate.compacting.incrementAndGet();
-2126  } else {
-2127String msg = "NOT compacting 
region " + this + ". Writes disabled.";
-2128LOG.info(msg);
-2129status.abort(msg);
-2130return false;
-2131  }
-2132}
-2133LOG.info("Starting compaction of 
{} in {}{}", store, this,
-2134
(compaction.getRequest().isOffPeak()?" as an off-peak compaction":""));
-2135doRegionCompactionPrep();
-2136try {
-2137  status.setStatus("Compacting 
store " + store);
-2138  // We no longer need to cancel 
the request on the way out of this
-2139  // method because 
Store#compact will clean up unconditionally
-2140  requestNeedsCancellation = 
false;
-2141  store.compact(compaction, 
throughputController, user);
-2142} catch (InterruptedIOException 
iioe) {
-2143  String msg = "compaction 
interrupted";
-2144  LOG.info(msg, iioe);
-2145  status.abort(msg);
-2146  return false;
-2147}
-2148  } finally {
-2149if (wasStateSet) {
-2150  synchronized (writestate) {
-2151
writestate.compacting.decrementAndGet();
-2152if 
(writestate.compacting.get() = 0) {
-2153  writestate.notifyAll();
-2154}
-2155  }
-2156}
-2157  }
-2158  status.markComplete("Compaction 
complete");
-2159  return true;
-2160} finally {
-2161  if (requestNeedsCancellation) 
store.cancelRequestedCompaction(compaction);
-2162  if (status != null) 
status.cleanup();
-2163}
-2164  }
-2165
-2166  /**
-2167   * Flush the cache.
-2168   *
-2169   * pWhen this method is called 
the cache will be flushed unless:
-2170   * ol
-2171   *   lithe cache is 
empty/li
-2172   *   lithe region is 
closed./li
-2173   *   lia flush is already in 
progress/li
-2174   *   liwrites are 
disabled/li
-2175   * /ol
-2176   *
-2177   * pThis method may block for 
some time, so it should not be called from a
-2178   * time-sensitive thread.
-2179   * @param force whether we want to 
force a flush of all stores
-2180   * @return FlushResult indicating 
whether the flush was successful or not and if
-2181   * the region needs compacting
-2182   *
-2183   * @throws IOException general io 
exceptions
-2184   * because a snapshot was not properly 
persisted.
-2185   */
-2186  // TODO HBASE-18905. We might have to 
expose a requestFlush API for CPs
-2187  public FlushResult flush(boolean 
force) throws IOException {
-2188return flushcache(force, false, 
FlushLifeCycleTracker.DUMMY);
-2189  }
-2190
-2191  public interface FlushResult {
-2192enum Result {
-2193  FLUSHED_NO_COMPACTION_NEEDED,
-2194  FLUSHED_COMPACTION_NEEDED,
-2195  // Special case where a flush 
didn't run because there's nothing in the memstores. Used when
-2196  // bulk loading to know when we 
can still load even if a flush didn't happen.
-2197  CANNOT_FLUSH_MEMSTORE_EMPTY,
-2198  CANNOT_FLUSH
-2199}
-2200
-2201/** @return the detailed result code 
*/
-2202Result getResult();
-2203
-2204/** @return true if the memstores 
were flushed, else false */
-2205boolean isFlushSucceeded();
-2206
-2207/** @return True if the flush 
requested a compaction, else false */
-2208boolean isCompactionNeeded();
-2209  }
+2114  
status.enableStatusJournal(false);
+2115  if (this.closed.get()) {
+2116String msg = "Skipping 
compaction on " + this + " because closed";
+2117LOG.debug(msg);
+2118status.abort(msg);
+2119return false;
+2120  }
+2121  boolean wasStateSet = false;
+2122  try {
+2123synchronized (writestate) {
+2124  if 

[08/30] hbase-site git commit: Published site at .

2018-01-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e80e3339/hbase-build-configuration/hbase-spark/plugins.html
--
diff --git a/hbase-build-configuration/hbase-spark/plugins.html 
b/hbase-build-configuration/hbase-spark/plugins.html
index 9d7a14d..ff0c7eb 100644
--- a/hbase-build-configuration/hbase-spark/plugins.html
+++ b/hbase-build-configuration/hbase-spark/plugins.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Spark  Project Plugins
 
@@ -238,7 +238,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-01-14
+  Last Published: 
2018-01-15
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e80e3339/hbase-build-configuration/hbase-spark/project-info.html
--
diff --git a/hbase-build-configuration/hbase-spark/project-info.html 
b/hbase-build-configuration/hbase-spark/project-info.html
index 8ff76b5..1075f6b 100644
--- a/hbase-build-configuration/hbase-spark/project-info.html
+++ b/hbase-build-configuration/hbase-spark/project-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Spark  Project Information
 
@@ -167,7 +167,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-01-14
+  Last Published: 
2018-01-15
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e80e3339/hbase-build-configuration/hbase-spark/project-reports.html
--
diff --git a/hbase-build-configuration/hbase-spark/project-reports.html 
b/hbase-build-configuration/hbase-spark/project-reports.html
index 4a23769..fd7ba03 100644
--- a/hbase-build-configuration/hbase-spark/project-reports.html
+++ b/hbase-build-configuration/hbase-spark/project-reports.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Spark  Generated Reports
 
@@ -131,7 +131,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-01-14
+  Last Published: 
2018-01-15
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e80e3339/hbase-build-configuration/hbase-spark/project-summary.html
--
diff --git a/hbase-build-configuration/hbase-spark/project-summary.html 
b/hbase-build-configuration/hbase-spark/project-summary.html
index f7a222d..034b2d7 100644
--- a/hbase-build-configuration/hbase-spark/project-summary.html
+++ b/hbase-build-configuration/hbase-spark/project-summary.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Spark  Project Summary
 
@@ -166,7 +166,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-01-14
+  Last Published: 
2018-01-15
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e80e3339/hbase-build-configuration/hbase-spark/source-repository.html
--
diff --git a/hbase-build-configuration/hbase-spark/source-repository.html 
b/hbase-build-configuration/hbase-spark/source-repository.html
index 3a6f851..d09ad64 100644
--- a/hbase-build-configuration/hbase-spark/source-repository.html
+++ b/hbase-build-configuration/hbase-spark/source-repository.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Spark  Source Code Management
 
@@ -134,7 +134,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-01-14
+  Last Published: 
2018-01-15
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e80e3339/hbase-build-configuration/hbase-spark/team-list.html
--
diff --git a/hbase-build-configuration/hbase-spark/team-list.html 
b/hbase-build-configuration/hbase-spark/team-list.html
index c841e35..e49457f 100644
--- a/hbase-build-configuration/hbase-spark/team-list.html
+++ b/hbase-build-configuration/hbase-spark/team-list.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Spark  Project Team
 
@@ -553,7 +553,7 @@
 

[08/30] hbase-site git commit: Published site at .

2017-09-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dc5c2985/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
index 6848d28..69caaf6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
@@ -212,7941 +212,7899 @@
 204  public static final String 
LOAD_CFS_ON_DEMAND_CONFIG_KEY =
 205
"hbase.hregion.scan.loadColumnFamiliesOnDemand";
 206
-207  public static final String 
HREGION_UNASSIGN_FOR_FNFE = "hbase.hregion.unassign.for.fnfe";
-208  public static final boolean 
DEFAULT_HREGION_UNASSIGN_FOR_FNFE = true;
+207  public static final String 
HBASE_MAX_CELL_SIZE_KEY = "hbase.server.keyvalue.maxsize";
+208  public static final int 
DEFAULT_MAX_CELL_SIZE = 10485760;
 209
-210  public static final String 
HBASE_MAX_CELL_SIZE_KEY = "hbase.server.keyvalue.maxsize";
-211  public static final int 
DEFAULT_MAX_CELL_SIZE = 10485760;
-212
-213  /**
-214   * This is the global default value for 
durability. All tables/mutations not
-215   * defining a durability or using 
USE_DEFAULT will default to this value.
-216   */
-217  private static final Durability 
DEFAULT_DURABILITY = Durability.SYNC_WAL;
-218
-219  final AtomicBoolean closed = new 
AtomicBoolean(false);
-220
-221  /* Closing can take some time; use the 
closing flag if there is stuff we don't
-222   * want to do while in closing state; 
e.g. like offer this region up to the
-223   * master as a region to close if the 
carrying regionserver is overloaded.
-224   * Once set, it is never cleared.
-225   */
-226  final AtomicBoolean closing = new 
AtomicBoolean(false);
-227
-228  /**
-229   * The max sequence id of flushed data 
on this region. There is no edit in memory that is
-230   * less that this sequence id.
-231   */
-232  private volatile long maxFlushedSeqId = 
HConstants.NO_SEQNUM;
-233
-234  /**
-235   * Record the sequence id of last flush 
operation. Can be in advance of
-236   * {@link #maxFlushedSeqId} when 
flushing a single column family. In this case,
-237   * {@link #maxFlushedSeqId} will be 
older than the oldest edit in memory.
-238   */
-239  private volatile long lastFlushOpSeqId 
= HConstants.NO_SEQNUM;
-240
-241  /**
-242   * The sequence id of the last replayed 
open region event from the primary region. This is used
-243   * to skip entries before this due to 
the possibility of replay edits coming out of order from
-244   * replication.
-245   */
-246  protected volatile long 
lastReplayedOpenRegionSeqId = -1L;
-247  protected volatile long 
lastReplayedCompactionSeqId = -1L;
-248
-249  
//
-250  // Members
-251  
//
-252
-253  // map from a locked row to the context 
for that lock including:
-254  // - CountDownLatch for threads waiting 
on that row
-255  // - the thread that owns the lock 
(allow reentrancy)
-256  // - reference count of (reentrant) 
locks held by the thread
-257  // - the row itself
-258  private final 
ConcurrentHashMapHashedBytes, RowLockContext lockedRows =
-259  new ConcurrentHashMap();
+210  /**
+211   * This is the global default value for 
durability. All tables/mutations not
+212   * defining a durability or using 
USE_DEFAULT will default to this value.
+213   */
+214  private static final Durability 
DEFAULT_DURABILITY = Durability.SYNC_WAL;
+215
+216  final AtomicBoolean closed = new 
AtomicBoolean(false);
+217
+218  /* Closing can take some time; use the 
closing flag if there is stuff we don't
+219   * want to do while in closing state; 
e.g. like offer this region up to the
+220   * master as a region to close if the 
carrying regionserver is overloaded.
+221   * Once set, it is never cleared.
+222   */
+223  final AtomicBoolean closing = new 
AtomicBoolean(false);
+224
+225  /**
+226   * The max sequence id of flushed data 
on this region. There is no edit in memory that is
+227   * less that this sequence id.
+228   */
+229  private volatile long maxFlushedSeqId = 
HConstants.NO_SEQNUM;
+230
+231  /**
+232   * Record the sequence id of last flush 
operation. Can be in advance of
+233   * {@link #maxFlushedSeqId} when 
flushing a single column family. In this case,
+234   * {@link #maxFlushedSeqId} will be 
older than the oldest edit in memory.
+235   */
+236  private volatile long lastFlushOpSeqId 
= HConstants.NO_SEQNUM;
+237
+238  /**
+239   * The sequence id of the last replayed 
open region event from the primary region. This is used
+240   * to skip entries before this due to 
the possibility of replay edits 

[08/30] hbase-site git commit: Published site at .

2017-09-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2bb8bd01/testdevapidocs/src-html/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.CustomObserver.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.CustomObserver.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.CustomObserver.html
index 82eb9fd..8eb952a 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.CustomObserver.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.CustomObserver.html
@@ -71,668 +71,655 @@
 063import 
org.apache.hadoop.hbase.coprocessor.ObserverContext;
 064import 
org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 065import 
org.apache.hadoop.hbase.coprocessor.RegionObserver;
-066import 
org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment;
-067import 
org.apache.hadoop.hbase.coprocessor.RegionServerObserver;
-068import 
org.apache.hadoop.hbase.master.HMaster;
-069import 
org.apache.hadoop.hbase.master.MasterCoprocessorHost;
-070import 
org.apache.hadoop.hbase.master.TableNamespaceManager;
-071import 
org.apache.hadoop.hbase.quotas.MasterQuotaManager;
-072import 
org.apache.hadoop.hbase.quotas.QuotaExceededException;
-073import 
org.apache.hadoop.hbase.quotas.QuotaUtil;
-074import 
org.apache.hadoop.hbase.regionserver.Region;
-075import 
org.apache.hadoop.hbase.regionserver.Store;
-076import 
org.apache.hadoop.hbase.regionserver.StoreFile;
-077import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
-078import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
-079import 
org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
-080import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-081import 
org.apache.hadoop.hbase.util.Bytes;
-082import 
org.apache.hadoop.hbase.util.FSUtils;
-083import 
org.apache.hadoop.hbase.util.Threads;
-084import 
org.apache.zookeeper.KeeperException;
-085import org.junit.After;
-086import org.junit.AfterClass;
-087import org.junit.BeforeClass;
-088import org.junit.Rule;
-089import org.junit.Test;
-090import 
org.junit.experimental.categories.Category;
-091import org.junit.rules.TestRule;
-092
-093@Category(MediumTests.class)
-094public class TestNamespaceAuditor {
-095  @Rule public final TestRule timeout = 
CategoryBasedTimeout.builder().
-096  
withTimeout(this.getClass()).withLookingForStuckThread(true).build();
-097  private static final Log LOG = 
LogFactory.getLog(TestNamespaceAuditor.class);
-098  private static final 
HBaseTestingUtility UTIL = new HBaseTestingUtility();
-099  private static Admin ADMIN;
-100  private String prefix = 
"TestNamespaceAuditor";
-101
-102  @BeforeClass
-103  public static void before() throws 
Exception {
-104Configuration conf = 
UTIL.getConfiguration();
-105
conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, 
CustomObserver.class.getName());
-106conf.setStrings(
-107  
CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
-108  MasterSyncObserver.class.getName(), 
CPMasterObserver.class.getName());
-109
conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 5);
-110
conf.setBoolean(QuotaUtil.QUOTA_CONF_KEY, true);
-111
conf.setClass("hbase.coprocessor.regionserver.classes", 
CPRegionServerObserver.class,
-112  RegionServerObserver.class);
-113UTIL.startMiniCluster(1, 1);
-114waitForQuotaInitialize(UTIL);
-115ADMIN = UTIL.getAdmin();
-116  }
-117
-118  @AfterClass
-119  public static void tearDown() throws 
Exception {
-120UTIL.shutdownMiniCluster();
-121  }
-122
-123  @After
-124  public void cleanup() throws Exception, 
KeeperException {
-125for (HTableDescriptor table : 
ADMIN.listTables()) {
-126  
ADMIN.disableTable(table.getTableName());
-127  
deleteTable(table.getTableName());
-128}
-129for (NamespaceDescriptor ns : 
ADMIN.listNamespaceDescriptors()) {
-130  if 
(ns.getName().startsWith(prefix)) {
-131
ADMIN.deleteNamespace(ns.getName());
-132  }
-133}
-134assertTrue("Quota manager not 
initialized", UTIL.getHBaseCluster().getMaster()
-135
.getMasterQuotaManager().isQuotaInitialized());
-136  }
-137
-138  @Test
-139  public void testTableOperations() 
throws Exception {
-140String nsp = prefix + "_np2";
-141NamespaceDescriptor nspDesc =
-142
NamespaceDescriptor.create(nsp).addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS,
 "5")
-143
.addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2").build();
-144ADMIN.createNamespace(nspDesc);
-145assertNotNull("Namespace descriptor 
found null.", ADMIN.getNamespaceDescriptor(nsp));
-146
assertEquals(ADMIN.listNamespaceDescriptors().length, 3);
-147HColumnDescriptor fam1 = new 
HColumnDescriptor("fam1");
-148
-149HTableDescriptor 

[08/30] hbase-site git commit: Published site at .

2017-08-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/495ddb86/testdevapidocs/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDeletes.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDeletes.html
 
b/testdevapidocs/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDeletes.html
index db3cffc..e9224de 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDeletes.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDeletes.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":9,"i1":9,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":9,"i11":10,"i12":9,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10};
+var methods = 
{"i0":9,"i1":9,"i2":9,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":9,"i12":10,"i13":9,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -50,7 +50,7 @@ var activeTableTab = "activeTableTab";
 
 
 PrevClass
-NextClass
+NextClass
 
 
 Frames
@@ -74,7 +74,7 @@ var activeTableTab = "activeTableTab";
 
 
 Summary:
-Nested|
+Nested|
 Field|
 Constr|
 Method
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class TestVisibilityLabelsWithDeletes
+public class TestVisibilityLabelsWithDeletes
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Tests visibility labels with deletes
 
@@ -122,6 +122,25 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 
+
+
+
+
+
+Nested Class Summary
+
+Nested Classes
+
+Modifier and Type
+Class and Description
+
+
+private static class
+TestVisibilityLabelsWithDeletes.DeleteMark
+
+
+
+
 
 
 
@@ -147,58 +166,62 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 fam
 
 
+private static 
org.apache.commons.logging.Log
+LOG
+
+
 private static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 PRIVATE
 
-
+
 private static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 PUBLIC
 
-
+
 protected static byte[]
 qual
 
-
+
 private static byte[]
 qual1
 
-
+
 private static byte[]
 qual2
 
-
+
 private static byte[]
 row1
 
-
+
 private static byte[]
 row2
 
-
+
 private static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 SECRET
 
-
+
 static 
org.apache.hadoop.hbase.security.User
 SUPERUSER
 
-
+
 org.junit.rules.TestName
 TEST_NAME
 
-
+
 static HBaseTestingUtility
 TEST_UTIL
 
-
+
 private static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 TOPSECRET
 
-
+
 protected static byte[]
 value
 
-
+
 private static byte[]
 value1
 
@@ -235,231 +258,257 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 Method and Description
 
 
+private static 
org.apache.hadoop.hbase.client.Delete
+addDeleteMark(org.apache.hadoop.hbase.client.Deleted,
+ TestVisibilityLabelsWithDeletes.DeleteMarkmark,
+ longnow)
+
+
 static void
 addLabels()
 
-
+
 static Thttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListT
 createList(T...ts)
 
-
+
 protected 
org.apache.hadoop.hbase.client.Table
 createTable(org.apache.hadoop.hbase.HColumnDescriptorfam)
 
-
+
 org.apache.hadoop.hbase.client.Table
 createTableAndWriteDataWithLabels(long[]timestamp,
  http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">String...labelExps)
 
-
+
 org.apache.hadoop.hbase.client.Table
 

[08/30] hbase-site git commit: Published site at .

2017-08-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/cef8af03/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.html
index bc8bc72..70f4e9d 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.html
@@ -56,1126 +56,1127 @@
 048import 
org.apache.hadoop.hbase.regionserver.querymatcher.LegacyScanQueryMatcher;
 049import 
org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher;
 050import 
org.apache.hadoop.hbase.regionserver.querymatcher.UserScanQueryMatcher;
-051import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-052import 
org.apache.hadoop.hbase.util.CollectionUtils;
-053import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-054
-055/**
-056 * Scanner scans both the memstore and 
the Store. Coalesce KeyValue stream into Listlt;KeyValuegt;
-057 * for a single row.
-058 * p
-059 * The implementation is not thread safe. 
So there will be no race between next and close. The only
-060 * exception is updateReaders, it will be 
called in the memstore flush thread to indicate that there
-061 * is a flush.
-062 */
-063@InterfaceAudience.Private
-064public class StoreScanner extends 
NonReversedNonLazyKeyValueScanner
-065implements KeyValueScanner, 
InternalScanner, ChangedReadersObserver {
-066  private static final Log LOG = 
LogFactory.getLog(StoreScanner.class);
-067  // In unit tests, the store could be 
null
-068  protected final Store store;
-069  private ScanQueryMatcher matcher;
-070  protected KeyValueHeap heap;
-071  private boolean cacheBlocks;
-072
-073  private long countPerRow = 0;
-074  private int storeLimit = -1;
-075  private int storeOffset = 0;
-076
-077  // Used to indicate that the scanner 
has closed (see HBASE-1107)
-078  // Do not need to be volatile because 
it's always accessed via synchronized methods
-079  private boolean closing = false;
-080  private final boolean get;
-081  private final boolean 
explicitColumnQuery;
-082  private final boolean useRowColBloom;
-083  /**
-084   * A flag that enables StoreFileScanner 
parallel-seeking
-085   */
-086  private boolean parallelSeekEnabled = 
false;
-087  private ExecutorService executor;
-088  private final Scan scan;
-089  private final long oldestUnexpiredTS;
-090  private final long now;
-091  private final int minVersions;
-092  private final long maxRowSize;
-093  private final long 
cellsPerHeartbeatCheck;
-094
-095  // 1) Collects all the KVHeap that are 
eagerly getting closed during the
-096  //course of a scan
-097  // 2) Collects the unused memstore 
scanners. If we close the memstore scanners
-098  //before sending data to client, 
the chunk may be reclaimed by other
-099  //updates and the data will be 
corrupt.
-100  private final 
ListKeyValueScanner scannersForDelayedClose = new 
ArrayList();
-101
-102  /**
-103   * The number of KVs seen by the 
scanner. Includes explicitly skipped KVs, but not
-104   * KVs skipped via seeking to next 
row/column. TODO: estimate them?
-105   */
-106  private long kvsScanned = 0;
-107  protected Cell prevCell = null;
-108
-109  private final long preadMaxBytes;
-110  private long bytesRead;
-111
-112  /** We don't ever expect to change 
this, the constant is just for clarity. */
-113  static final boolean 
LAZY_SEEK_ENABLED_BY_DEFAULT = true;
-114  public static final String 
STORESCANNER_PARALLEL_SEEK_ENABLE =
-115  
"hbase.storescanner.parallel.seek.enable";
-116
-117  /** Used during unit testing to ensure 
that lazy seek does save seek ops */
-118  private static boolean 
lazySeekEnabledGlobally = LAZY_SEEK_ENABLED_BY_DEFAULT;
-119
-120  /**
-121   * The number of cells scanned in 
between timeout checks. Specifying a larger value means that
-122   * timeout checks will occur less 
frequently. Specifying a small value will lead to more frequent
-123   * timeout checks.
-124   */
-125  public static final String 
HBASE_CELLS_SCANNED_PER_HEARTBEAT_CHECK =
-126  
"hbase.cells.scanned.per.heartbeat.check";
-127
-128  /**
-129   * Default value of {@link 
#HBASE_CELLS_SCANNED_PER_HEARTBEAT_CHECK}.
-130   */
-131  public static final long 
DEFAULT_HBASE_CELLS_SCANNED_PER_HEARTBEAT_CHECK = 1;
-132
-133  /**
-134   * If the read type if 
Scan.ReadType.DEFAULT, we will start with pread, and if the kvs we scanned
-135   * reaches this limit, we will reopen 
the scanner with stream. The default value is 4 times of
-136   * block size for this store.
-137   */
-138  public static final String 
STORESCANNER_PREAD_MAX_BYTES = "hbase.storescanner.pread.max.bytes";
-139
-140  private final Scan.ReadType readType;
-141
-142  // A flag whether use pread for 

[08/30] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7f23ee04/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.html
new file mode 100644
index 000..876fec9
--- /dev/null
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReader.html
@@ -0,0 +1,574 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+Source code
+
+
+
+
+001/**
+002 *
+003 * Licensed to the Apache Software 
Foundation (ASF) under one
+004 * or more contributor license 
agreements.  See the NOTICE file
+005 * distributed with this work for 
additional information
+006 * regarding copyright ownership.  The 
ASF licenses this file
+007 * to you under the Apache License, 
Version 2.0 (the
+008 * "License"); you may not use this file 
except in compliance
+009 * with the License.  You may obtain a 
copy of the License at
+010 *
+011 * 
http://www.apache.org/licenses/LICENSE-2.0
+012 *
+013 * Unless required by applicable law or 
agreed to in writing, software
+014 * distributed under the License is 
distributed on an "AS IS" BASIS,
+015 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+016 * See the License for the specific 
language governing permissions and
+017 * limitations under the License.
+018 */
+019package 
org.apache.hadoop.hbase.replication.regionserver;
+020
+021import java.io.EOFException;
+022import java.io.IOException;
+023import java.util.ArrayList;
+024import java.util.HashMap;
+025import java.util.List;
+026import java.util.Map;
+027import 
java.util.concurrent.BlockingQueue;
+028import 
java.util.concurrent.LinkedBlockingQueue;
+029import 
java.util.concurrent.PriorityBlockingQueue;
+030import 
java.util.concurrent.atomic.AtomicLong;
+031
+032import org.apache.commons.logging.Log;
+033import 
org.apache.commons.logging.LogFactory;
+034import 
org.apache.hadoop.conf.Configuration;
+035import org.apache.hadoop.fs.FileSystem;
+036import org.apache.hadoop.fs.Path;
+037import org.apache.hadoop.hbase.Cell;
+038import 
org.apache.hadoop.hbase.CellUtil;
+039import 
org.apache.hadoop.hbase.HConstants;
+040import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+041import 
org.apache.hadoop.hbase.classification.InterfaceStability;
+042import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
+043import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.BulkLoadDescriptor;
+044import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescriptor;
+045import 
org.apache.hadoop.hbase.regionserver.RSRpcServices;
+046import 
org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+047import 
org.apache.hadoop.hbase.replication.ReplicationQueueInfo;
+048import 
org.apache.hadoop.hbase.replication.WALEntryFilter;
+049import 
org.apache.hadoop.hbase.replication.regionserver.WALEntryStream.WALEntryStreamRuntimeException;
+050import 
org.apache.hadoop.hbase.util.Bytes;
+051import 
org.apache.hadoop.hbase.util.Pair;
+052import 
org.apache.hadoop.hbase.util.Threads;
+053import 
org.apache.hadoop.hbase.wal.WAL.Entry;
+054
+055/**
+056 * Reads and filters WAL entries, groups 
the filtered entries into batches, and puts the batches onto a queue
+057 *
+058 */
+059@InterfaceAudience.Private
+060@InterfaceStability.Evolving
+061public class ReplicationSourceWALReader 
extends Thread {
+062  private static final Log LOG = 
LogFactory.getLog(ReplicationSourceWALReader.class);
+063
+064  private final 
PriorityBlockingQueuePath logQueue;
+065  private final FileSystem fs;
+066  private final Configuration conf;
+067  private final WALEntryFilter filter;
+068  private final ReplicationSource 
source;
+069
+070  protected final 
BlockingQueueWALEntryBatch entryBatchQueue;
+071  // max (heap) size of each batch - 
multiply by number of batches in queue to get total
+072  private final long 
replicationBatchSizeCapacity;
+073  // max count of each batch - multiply 
by number of batches in queue to get total
+074  protected final int 
replicationBatchCountCapacity;
+075  // position in the WAL to start reading 
at
+076  private long currentPosition;
+077  private final long sleepForRetries;
+078  private final int 
maxRetriesMultiplier;
+079  private final boolean 
eofAutoRecovery;
+080
+081  //Indicates whether this particular 
worker is running
+082  private boolean isReaderRunning = 
true;
+083
+084  private AtomicLong totalBufferUsed;
+085  private long totalBufferQuota;
+086
+087  /**
+088   * Creates a reader worker for a given 
WAL queue. Reads WAL entries off a given queue, batches the
+089   * entries, and puts them on a batch 
queue.
+090   * @param fs the files system to use
+091   * @param conf 

[08/30] hbase-site git commit: Published site at 28cd48b673ca743d193874b2951bc995699e8e89.

2016-02-24 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/89b638a4/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.html
index 1bdbf69..f83e138 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.html
@@ -86,478 +86,483 @@
 078import org.junit.Rule;
 079import org.junit.Test;
 080import 
org.junit.experimental.categories.Category;
-081import org.junit.rules.TestRule;
-082
-083import com.google.common.base.Joiner;
-084import 
com.google.protobuf.RpcController;
-085import 
com.google.protobuf.ServiceException;
-086
-087/**
-088 * Like {@link 
TestRegionMergeTransaction} in that we're testing
-089 * {@link RegionMergeTransactionImpl} 
only the below tests are against a running
-090 * cluster where {@link 
TestRegionMergeTransaction} is tests against bare
-091 * {@link HRegion}.
-092 */
-093@Category({RegionServerTests.class, 
LargeTests.class})
-094public class 
TestRegionMergeTransactionOnCluster {
-095  private static final Log LOG = 
LogFactory
-096  
.getLog(TestRegionMergeTransactionOnCluster.class);
-097  @Rule public final TestRule timeout = 
CategoryBasedTimeout.builder().withTimeout(this.getClass()).
-098  
withLookingForStuckThread(true).build();
-099  private static final int NB_SERVERS = 
3;
-100
-101  private static final byte[] FAMILYNAME 
= Bytes.toBytes("fam");
-102  private static final byte[] QUALIFIER = 
Bytes.toBytes("q");
-103
-104  private static byte[] ROW = 
Bytes.toBytes("testRow");
-105  private static final int 
INITIAL_REGION_NUM = 10;
-106  private static final int ROWSIZE = 
200;
-107  private static byte[][] ROWS = 
makeN(ROW, ROWSIZE);
-108
-109  private static int waitTime = 60 * 
1000;
+081import org.junit.rules.TestName;
+082import org.junit.rules.TestRule;
+083
+084import com.google.common.base.Joiner;
+085import 
com.google.protobuf.RpcController;
+086import 
com.google.protobuf.ServiceException;
+087
+088/**
+089 * Like {@link 
TestRegionMergeTransaction} in that we're testing
+090 * {@link RegionMergeTransactionImpl} 
only the below tests are against a running
+091 * cluster where {@link 
TestRegionMergeTransaction} is tests against bare
+092 * {@link HRegion}.
+093 */
+094@Category({RegionServerTests.class, 
LargeTests.class})
+095public class 
TestRegionMergeTransactionOnCluster {
+096  private static final Log LOG = 
LogFactory
+097  
.getLog(TestRegionMergeTransactionOnCluster.class);
+098  @Rule public TestName name = new 
TestName();
+099  @Rule public final TestRule timeout = 
CategoryBasedTimeout.builder().withTimeout(this.getClass()).
+100  
withLookingForStuckThread(true).build();
+101  private static final int NB_SERVERS = 
3;
+102
+103  private static final byte[] FAMILYNAME 
= Bytes.toBytes("fam");
+104  private static final byte[] QUALIFIER = 
Bytes.toBytes("q");
+105
+106  private static byte[] ROW = 
Bytes.toBytes("testRow");
+107  private static final int 
INITIAL_REGION_NUM = 10;
+108  private static final int ROWSIZE = 
200;
+109  private static byte[][] ROWS = 
makeN(ROW, ROWSIZE);
 110
-111  static final HBaseTestingUtility 
TEST_UTIL = new HBaseTestingUtility();
+111  private static int waitTime = 60 * 
1000;
 112
-113  private static HMaster MASTER;
-114  private static Admin ADMIN;
-115
-116  @BeforeClass
-117  public static void beforeAllTests() 
throws Exception {
-118// Start a cluster
-119TEST_UTIL.startMiniCluster(1, 
NB_SERVERS, null, MyMaster.class, null);
-120MiniHBaseCluster cluster = 
TEST_UTIL.getHBaseCluster();
-121MASTER = cluster.getMaster();
-122MASTER.balanceSwitch(false);
-123ADMIN = 
TEST_UTIL.getConnection().getAdmin();
-124  }
-125
-126  @AfterClass
-127  public static void afterAllTests() 
throws Exception {
-128TEST_UTIL.shutdownMiniCluster();
-129if (ADMIN != null) ADMIN.close();
-130  }
-131
-132  @Test
-133  public void testWholesomeMerge() throws 
Exception {
-134LOG.info("Starting 
testWholesomeMerge");
-135final TableName tableName =
-136
TableName.valueOf("testWholesomeMerge");
-137
-138// Create table and load data.
-139Table table = 
createTableAndLoadData(MASTER, tableName);
-140// Merge 1st and 2nd region
-141
mergeRegionsAndVerifyRegionNum(MASTER, tableName, 0, 1,
-142INITIAL_REGION_NUM - 1);
-143
-144// Merge 2nd and 3th region
-145PairOfSameTypeHRegionInfo 
mergedRegions =
-146  
mergeRegionsAndVerifyRegionNum(MASTER, tableName, 1, 2,
-147INITIAL_REGION_NUM - 2);
-148
-149verifyRowCount(table, ROWSIZE);

[08/30] hbase-site git commit: Published site at 845d00a16bc22cced0a2eead3d0ba48989968fb6.

2016-01-27 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6d411951/testdevapidocs/org/apache/hadoop/hbase/security/TestSecureIPC.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/security/TestSecureIPC.html 
b/testdevapidocs/org/apache/hadoop/hbase/security/TestSecureIPC.html
index 91c461a..83ac59d 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/security/TestSecureIPC.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/security/TestSecureIPC.html
@@ -60,7 +60,7 @@
 
 
 Summary:
-Nested|
+Nested|
 Field|
 Constr|
 Method
@@ -108,6 +108,21 @@ extends 
 
 
+
+
+
+
+
+Nested Class Summary
+
+
+
+
+Nested classes/interfaces inherited from 
classorg.apache.hadoop.hbase.security.AbstractTestSecureIPC
+AbstractTestSecureIPC.TestThread
+
+
+
 
 
 
@@ -119,7 +134,7 @@ extends AbstractTestSecureIPC
-clientConf,
 exception,
 krbKeytab,
 krbPrincipal,
 serverConf,
 ugi
+clientConf,
 exception,
 krbKeytab,
 krbPrincipal,
 serverConf,
 SERVICE,
 ugi
 
 
 
@@ -266,7 +281,7 @@ extends 
 Summary:
-Nested|
+Nested|
 Field|
 Constr|
 Method

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6d411951/testdevapidocs/org/apache/hadoop/hbase/security/class-use/AbstractTestSecureIPC.TestThread.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/security/class-use/AbstractTestSecureIPC.TestThread.html
 
b/testdevapidocs/org/apache/hadoop/hbase/security/class-use/AbstractTestSecureIPC.TestThread.html
new file mode 100644
index 000..57cd626
--- /dev/null
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/security/class-use/AbstractTestSecureIPC.TestThread.html
@@ -0,0 +1,115 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+Uses of Class 
org.apache.hadoop.hbase.security.AbstractTestSecureIPC.TestThread (Apache HBase 
2.0.0-SNAPSHOT Test API)
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+No 
Frames
+
+
+All Classes
+
+
+
+
+
+
+
+
+
+
+Uses of 
Classorg.apache.hadoop.hbase.security.AbstractTestSecureIPC.TestThread
+
+No usage of 
org.apache.hadoop.hbase.security.AbstractTestSecureIPC.TestThread
+
+
+
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+No 
Frames
+
+
+All Classes
+
+
+
+
+
+
+
+
+
+Copyright  20072016 http://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+
+

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6d411951/testdevapidocs/org/apache/hadoop/hbase/security/package-frame.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/security/package-frame.html 
b/testdevapidocs/org/apache/hadoop/hbase/security/package-frame.html
index 6d23726..c7157d0 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/security/package-frame.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/security/package-frame.html
@@ -12,6 +12,7 @@
 Classes
 
 AbstractTestSecureIPC
+AbstractTestSecureIPC.TestThread
 HBaseKerberosUtils
 TestAsyncSecureIPC
 TestEncryptionUtil

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6d411951/testdevapidocs/org/apache/hadoop/hbase/security/package-summary.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/security/package-summary.html 
b/testdevapidocs/org/apache/hadoop/hbase/security/package-summary.html
index 066085b..7e67d26 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/security/package-summary.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/security/package-summary.html
@@ -79,34 +79,38 @@
 
 
 
-HBaseKerberosUtils
+AbstractTestSecureIPC.TestThread
 
 
 
-TestAsyncSecureIPC
+HBaseKerberosUtils
 
 
 
-TestEncryptionUtil
+TestAsyncSecureIPC
 
 
 
-TestHBaseSaslRpcClient
+TestEncryptionUtil
 
 
 
-TestSaslUtil
+TestHBaseSaslRpcClient
 
 
 
-TestSecureIPC
+TestSaslUtil
 
 
 
-TestUser
+TestSecureIPC
 
 
 
+TestUser
+
+
+
 TestUsersOperationsWithSecureHadoop
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6d411951/testdevapidocs/org/apache/hadoop/hbase/security/package-tree.html