hbase-site git commit: INFRA-10751 Empty commit

2018-03-17 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site bd675fa38 -> 67b0f751b


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/67b0f751
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/67b0f751
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/67b0f751

Branch: refs/heads/asf-site
Commit: 67b0f751b836330c1148415f6a6063b52a459e46
Parents: bd675fa
Author: jenkins 
Authored: Sat Mar 17 14:53:57 2018 +
Committer: jenkins 
Committed: Sat Mar 17 14:53:57 2018 +

--

--




[39/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
index 7b8e220..e602642 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
@@ -208,9 +208,9 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.master.procedure.PeerProcedureInterface.PeerOperationType
-org.apache.hadoop.hbase.master.procedure.ServerProcedureInterface.ServerOperationType
 org.apache.hadoop.hbase.master.procedure.TableProcedureInterface.TableOperationType
+org.apache.hadoop.hbase.master.procedure.ServerProcedureInterface.ServerOperationType
+org.apache.hadoop.hbase.master.procedure.PeerProcedureInterface.PeerOperationType
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/org/apache/hadoop/hbase/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/package-tree.html
index 1abc2a5..ffe2db4 100644
--- a/devapidocs/org/apache/hadoop/hbase/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/package-tree.html
@@ -441,19 +441,19 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.MetaTableAccessor.QueryType
-org.apache.hadoop.hbase.HealthChecker.HealthCheckerExitStatus
-org.apache.hadoop.hbase.CellBuilderType
-org.apache.hadoop.hbase.ClusterMetrics.Option
-org.apache.hadoop.hbase.HConstants.OperationStatusCode
-org.apache.hadoop.hbase.MemoryCompactionPolicy
+org.apache.hadoop.hbase.KeepDeletedCells
 org.apache.hadoop.hbase.KeyValue.Type
-org.apache.hadoop.hbase.CompareOperator
 org.apache.hadoop.hbase.CompatibilitySingletonFactory.SingletonStorage
-org.apache.hadoop.hbase.KeepDeletedCells
-org.apache.hadoop.hbase.Coprocessor.State
 org.apache.hadoop.hbase.Cell.Type
 org.apache.hadoop.hbase.Size.Unit
+org.apache.hadoop.hbase.MemoryCompactionPolicy
+org.apache.hadoop.hbase.Coprocessor.State
+org.apache.hadoop.hbase.HConstants.OperationStatusCode
+org.apache.hadoop.hbase.MetaTableAccessor.QueryType
+org.apache.hadoop.hbase.CompareOperator
+org.apache.hadoop.hbase.CellBuilderType
+org.apache.hadoop.hbase.HealthChecker.HealthCheckerExitStatus
+org.apache.hadoop.hbase.ClusterMetrics.Option
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.KeepAliveWorkerThread.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.KeepAliveWorkerThread.html
 
b/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.KeepAliveWorkerThread.html
index 632d837..b432f6e 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.KeepAliveWorkerThread.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.KeepAliveWorkerThread.html
@@ -132,7 +132,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private final class ProcedureExecutor.KeepAliveWorkerThread
+private final class ProcedureExecutor.KeepAliveWorkerThread
 extends ProcedureExecutor.WorkerThread
 
 
@@ -252,7 +252,7 @@ extends 
 
 KeepAliveWorkerThread
-publicKeepAliveWorkerThread(https://docs.oracle.com/javase/8/docs/api/java/lang/ThreadGroup.html?is-external=true;
 title="class or interface in java.lang">ThreadGroupgroup)
+publicKeepAliveWorkerThread(https://docs.oracle.com/javase/8/docs/api/java/lang/ThreadGroup.html?is-external=true;
 title="class or interface in java.lang">ThreadGroupgroup)
 
 
 
@@ -269,7 +269,7 @@ extends 
 
 keepAlive
-protectedbooleankeepAlive(longlastUpdate)
+protectedbooleankeepAlive(longlastUpdate)
 
 Overrides:
 keepAlivein
 classProcedureExecutor.WorkerThread


[12/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.html
index d654af2..3cec2fd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.html
@@ -29,312 +29,289 @@
 021import java.io.IOException;
 022import java.util.ArrayList;
 023import java.util.Collection;
-024import java.util.HashMap;
-025import java.util.List;
-026import java.util.concurrent.Callable;
-027import 
java.util.concurrent.ExecutionException;
-028import java.util.concurrent.Executors;
-029import java.util.concurrent.TimeUnit;
-030import 
org.apache.hadoop.conf.Configuration;
-031import 
org.apache.hadoop.hbase.ClusterMetrics;
-032import 
org.apache.hadoop.hbase.HDFSBlocksDistribution;
-033import 
org.apache.hadoop.hbase.ServerName;
-034import 
org.apache.hadoop.hbase.TableName;
-035import 
org.apache.hadoop.hbase.client.RegionInfo;
-036import 
org.apache.hadoop.hbase.client.TableDescriptor;
-037import 
org.apache.hadoop.hbase.master.MasterServices;
-038import 
org.apache.hadoop.hbase.master.assignment.AssignmentManager;
-039import 
org.apache.hadoop.hbase.regionserver.HRegion;
-040import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-041import 
org.apache.yetus.audience.InterfaceAudience;
-042import org.slf4j.Logger;
-043import org.slf4j.LoggerFactory;
-044import 
org.apache.hbase.thirdparty.com.google.common.cache.CacheBuilder;
-045import 
org.apache.hbase.thirdparty.com.google.common.cache.CacheLoader;
-046import 
org.apache.hbase.thirdparty.com.google.common.cache.LoadingCache;
-047import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-048import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.Futures;
-049import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ListenableFuture;
-050import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ListeningExecutorService;
-051import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.MoreExecutors;
-052import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
-053
-054/**
-055 * This will find where data for a region 
is located in HDFS. It ranks
-056 * {@link ServerName}'s by the size of 
the store files they are holding for a
-057 * given region.
-058 *
-059 */
-060@InterfaceAudience.Private
-061class RegionLocationFinder {
-062  private static final Logger LOG = 
LoggerFactory.getLogger(RegionLocationFinder.class);
-063  private static final long CACHE_TIME = 
240 * 60 * 1000;
-064  private static final 
HDFSBlocksDistribution EMPTY_BLOCK_DISTRIBUTION = new 
HDFSBlocksDistribution();
-065  private Configuration conf;
-066  private volatile ClusterMetrics 
status;
-067  private MasterServices services;
-068  private final ListeningExecutorService 
executor;
-069  // Do not scheduleFullRefresh at master 
startup
-070  private long lastFullRefresh = 
EnvironmentEdgeManager.currentTime();
-071
-072  private CacheLoaderRegionInfo, 
HDFSBlocksDistribution loader =
-073  new CacheLoaderRegionInfo, 
HDFSBlocksDistribution() {
-074
-075@Override
-076public 
ListenableFutureHDFSBlocksDistribution reload(final RegionInfo hri,
-077HDFSBlocksDistribution oldValue) 
throws Exception {
-078  return executor.submit(new 
CallableHDFSBlocksDistribution() {
-079@Override
-080public HDFSBlocksDistribution 
call() throws Exception {
-081  return 
internalGetTopBlockLocation(hri);
-082}
-083  });
-084}
-085
-086@Override
-087public HDFSBlocksDistribution 
load(RegionInfo key) throws Exception {
-088  return 
internalGetTopBlockLocation(key);
-089}
-090  };
+024import java.util.Collections;
+025import java.util.HashMap;
+026import java.util.List;
+027import java.util.Map;
+028import java.util.concurrent.Callable;
+029import 
java.util.concurrent.ExecutionException;
+030import java.util.concurrent.Executors;
+031import java.util.concurrent.TimeUnit;
+032
+033import 
org.apache.commons.collections4.CollectionUtils;
+034import 
org.apache.commons.collections4.MultiValuedMap;
+035import 
org.apache.commons.collections4.multimap.ArrayListValuedHashMap;
+036import 
org.apache.hadoop.conf.Configuration;
+037import 
org.apache.hadoop.hbase.ClusterMetrics;
+038import 
org.apache.hadoop.hbase.HDFSBlocksDistribution;
+039import 
org.apache.hadoop.hbase.ServerName;
+040import 
org.apache.hadoop.hbase.TableName;
+041import 
org.apache.hadoop.hbase.client.RegionInfo;
+042import 
org.apache.hadoop.hbase.client.TableDescriptor;
+043import 

[42/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.StoreFileSplitter.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.StoreFileSplitter.html
 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.StoreFileSplitter.html
index f65d734..af02b5e 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.StoreFileSplitter.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.StoreFileSplitter.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private class SplitTableRegionProcedure.StoreFileSplitter
+private class SplitTableRegionProcedure.StoreFileSplitter
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Callable.html?is-external=true;
 title="class or interface in java.util.concurrent">CallablePairorg.apache.hadoop.fs.Path,org.apache.hadoop.fs.Path
 Utility class used to do the file splitting / reference 
writing
@@ -220,7 +220,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 
 
 regionFs
-private finalHRegionFileSystem regionFs
+private finalHRegionFileSystem regionFs
 
 
 
@@ -229,7 +229,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 
 
 family
-private finalbyte[] family
+private finalbyte[] family
 
 
 
@@ -238,7 +238,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 
 
 sf
-private finalHStoreFile sf
+private finalHStoreFile sf
 
 
 
@@ -255,7 +255,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 
 
 StoreFileSplitter
-publicStoreFileSplitter(HRegionFileSystemregionFs,
+publicStoreFileSplitter(HRegionFileSystemregionFs,
  byte[]family,
  HStoreFilesf)
 Constructor that takes what it needs to split
@@ -281,7 +281,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 
 
 call
-publicPairorg.apache.hadoop.fs.Path,org.apache.hadoop.fs.Pathcall()
+publicPairorg.apache.hadoop.fs.Path,org.apache.hadoop.fs.Pathcall()
throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Specified by:

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
index 251b42d..61363f0 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
@@ -492,7 +492,7 @@ extends AbstractStateMachineTableProcedure
-getRegionDir,
 getUser,
 preflightChecks,
 releaseSyncLatch,
 setUser
+checkOnline,
 getRegionDir,
 getUser,
 preflightChecks,
 releaseSyncLatch,
 
 setUser
 
 
 
@@ -589,7 +589,7 @@ extends 
 
 EXPECTED_SPLIT_STATES
-private static finalRegionState.State[] EXPECTED_SPLIT_STATES
+private static finalRegionState.State[] EXPECTED_SPLIT_STATES
 
 
 
@@ -639,7 +639,7 @@ extends 
 
 checkSplittable
-privatevoidcheckSplittable(MasterProcedureEnvenv,
+privatevoidcheckSplittable(MasterProcedureEnvenv,
  RegionInforegionToSplit,
  byte[]splitRow)
   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -660,7 +660,7 @@ extends 
 
 getDaughterRegionIdTimestamp
-private staticlonggetDaughterRegionIdTimestamp(RegionInfohri)
+private staticlonggetDaughterRegionIdTimestamp(RegionInfohri)
 Calculate daughter regionid to use.
 
 Parameters:
@@ -676,7 +676,7 @@ extends 
 
 executeFromState
-protectedStateMachineProcedure.FlowexecuteFromState(MasterProcedureEnvenv,
+protectedStateMachineProcedure.FlowexecuteFromState(MasterProcedureEnvenv,
   
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStatestate)
throws https://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
 title="class or interface in java.lang">InterruptedException
 Description copied from 
class:StateMachineProcedure
@@ -699,7 +699,7 @@ extends 
 
 

[22/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.SingleServerRequestRunnable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.SingleServerRequestRunnable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.SingleServerRequestRunnable.html
index c27b109..4160a88 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.SingleServerRequestRunnable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.SingleServerRequestRunnable.html
@@ -105,7 +105,7 @@
 097try {
 098  done = waitUntilDone(startTime 
* 1000L + asyncProcess.primaryCallTimeoutMicroseconds);
 099} catch (InterruptedException ex) 
{
-100  LOG.error("Replica thread was 
interrupted - no replica calls: " + ex.getMessage());
+100  LOG.error("Replica thread 
interrupted - no replica calls {}", ex.getMessage());
 101  return;
 102}
 103  }
@@ -149,7 +149,7 @@
 141  if (loc == null) return;
 142  HRegionLocation[] locs = 
loc.getRegionLocations();
 143  if (locs.length == 1) {
-144LOG.warn("No replicas found for " 
+ action.getAction());
+144LOG.warn("No replicas found for 
{}", action.getAction());
 145return;
 146  }
 147  synchronized (replicaResultLock) 
{
@@ -230,8 +230,8 @@
 222  return;
 223} catch (Throwable t) {
 224  // This should not happen. 
Let's log  retry anyway.
-225  LOG.error("#" + asyncProcess.id 
+ ", Caught throwable while calling. This is unexpected." +
-226  " Retrying. Server is " + 
server + ", tableName=" + tableName, t);
+225  LOG.error("id=" + 
asyncProcess.id + ", caught throwable. Unexpected." +
+226  " Retrying. Server=" + 
server + ", tableName=" + tableName, t);
 227  
receiveGlobalFailure(multiAction, server, numAttempt, t);
 228  return;
 229}
@@ -247,1036 +247,1035 @@
 239}
 240  } catch (Throwable t) {
 241// Something really bad happened. 
We are on the send thread that will now die.
-242LOG.error("Internal AsyncProcess 
#" + asyncProcess.id + " error for "
-243+ tableName + " processing 
for " + server, t);
-244throw new RuntimeException(t);
-245  } finally {
-246
asyncProcess.decTaskCounters(multiAction.getRegions(), server);
-247if (callsInProgress != null 
 callable != null  res != null) {
-248  
callsInProgress.remove(callable);
-249}
-250  }
-251}
-252  }
-253
-254  private final 
Batch.CallbackCResult callback;
-255  private final BatchErrors errors;
-256  private final 
ConnectionImplementation.ServerErrorTracker errorsByServer;
-257  private final ExecutorService pool;
-258  private final 
SetCancellableRegionServerCallable callsInProgress;
+242LOG.error("id=" + asyncProcess.id 
+ " error for " + tableName + " processing " + server, t);
+243throw new RuntimeException(t);
+244  } finally {
+245
asyncProcess.decTaskCounters(multiAction.getRegions(), server);
+246if (callsInProgress != null 
 callable != null  res != null) {
+247  
callsInProgress.remove(callable);
+248}
+249  }
+250}
+251  }
+252
+253  private final 
Batch.CallbackCResult callback;
+254  private final BatchErrors errors;
+255  private final 
ConnectionImplementation.ServerErrorTracker errorsByServer;
+256  private final ExecutorService pool;
+257  private final 
SetCancellableRegionServerCallable callsInProgress;
+258
 259
-260
-261  private final TableName tableName;
-262  private final AtomicLong 
actionsInProgress = new AtomicLong(-1);
-263  /**
-264   * The lock controls access to results. 
It is only held when populating results where
-265   * there might be several callers 
(eventual consistency gets). For other requests,
-266   * there's one unique call going on per 
result index.
-267   */
-268  private final Object replicaResultLock 
= new Object();
-269  /**
-270   * Result array.  Null if results are 
not needed. Otherwise, each index corresponds to
-271   * the action index in initial actions 
submitted. For most request types, has null-s for
-272   * requests that are not done, and 
result/exception for those that are done.
-273   * For eventual-consistency gets, 
initially the same applies; at some point, replica calls
-274   * might be started, and 
ReplicaResultState is put at the corresponding indices. The
-275   * returning calls check the type to 
detect when this is the case. After all calls are done,
-276   * ReplicaResultState-s are replaced 
with results for the user.
-277   */
-278  private final Object[] results;
-279  /**
-280   * Indices of replica gets in results. 
If null, all or no 

[31/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.FileConverter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.FileConverter.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.FileConverter.html
index f47d627..c3d225c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.FileConverter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.FileConverter.html
@@ -117,219 +117,219 @@
 109   */
 110  public static boolean 
archiveRegion(FileSystem fs, Path rootdir, Path tableDir, Path regionDir)
 111  throws IOException {
-112if (LOG.isDebugEnabled()) {
-113  LOG.debug("ARCHIVING " + 
regionDir.toString());
-114}
-115
-116// otherwise, we archive the files
-117// make sure we can archive
-118if (tableDir == null || regionDir == 
null) {
-119  LOG.error("No archive directory 
could be found because tabledir (" + tableDir
-120  + ") or regiondir (" + 
regionDir + "was null. Deleting files instead.");
-121  deleteRegionWithoutArchiving(fs, 
regionDir);
-122  // we should have archived, but 
failed to. Doesn't matter if we deleted
-123  // the archived files correctly or 
not.
-124  return false;
-125}
-126
-127// make sure the regiondir lives 
under the tabledir
-128
Preconditions.checkArgument(regionDir.toString().startsWith(tableDir.toString()));
-129Path regionArchiveDir = 
HFileArchiveUtil.getRegionArchiveDir(rootdir,
-130FSUtils.getTableName(tableDir),
-131regionDir.getName());
-132
-133FileStatusConverter getAsFile = new 
FileStatusConverter(fs);
-134// otherwise, we attempt to archive 
the store files
-135
-136// build collection of just the store 
directories to archive
-137CollectionFile toArchive = 
new ArrayList();
-138final PathFilter dirFilter = new 
FSUtils.DirFilter(fs);
-139PathFilter nonHidden = new 
PathFilter() {
-140  @Override
-141  public boolean accept(Path file) 
{
-142return dirFilter.accept(file) 
 !file.getName().toString().startsWith(".");
-143  }
-144};
-145FileStatus[] storeDirs = 
FSUtils.listStatus(fs, regionDir, nonHidden);
-146// if there no files, we can just 
delete the directory and return;
-147if (storeDirs == null) {
-148  LOG.debug("Region directory " + 
regionDir + " empty.");
-149  return 
deleteRegionWithoutArchiving(fs, regionDir);
-150}
-151
-152// convert the files in the region to 
a File
-153
toArchive.addAll(Lists.transform(Arrays.asList(storeDirs), getAsFile));
-154LOG.debug("Archiving " + 
toArchive);
-155ListFile failedArchive = 
resolveAndArchive(fs, regionArchiveDir, toArchive,
-156
EnvironmentEdgeManager.currentTime());
-157if (!failedArchive.isEmpty()) {
-158  throw new 
FailedArchiveException("Failed to archive/delete all the files for region:"
-159  + regionDir.getName() + " into 
" + regionArchiveDir
-160  + ". Something is probably awry 
on the filesystem.",
-161  
Collections2.transform(failedArchive, FUNC_FILE_TO_PATH));
-162}
-163// if that was successful, then we 
delete the region
-164return 
deleteRegionWithoutArchiving(fs, regionDir);
-165  }
-166
-167  /**
-168   * Remove from the specified region the 
store files of the specified column family,
-169   * either by archiving them or outright 
deletion
-170   * @param fs the filesystem where the 
store files live
-171   * @param conf {@link Configuration} to 
examine to determine the archive directory
-172   * @param parent Parent region hosting 
the store files
-173   * @param tableDir {@link Path} to 
where the table is being stored (for building the archive path)
-174   * @param family the family hosting the 
store files
-175   * @throws IOException if the files 
could not be correctly disposed.
-176   */
-177  public static void 
archiveFamily(FileSystem fs, Configuration conf,
-178  RegionInfo parent, Path tableDir, 
byte[] family) throws IOException {
-179Path familyDir = new Path(tableDir, 
new Path(parent.getEncodedName(), Bytes.toString(family)));
-180archiveFamilyByFamilyDir(fs, conf, 
parent, familyDir, family);
-181  }
-182
-183  /**
-184   * Removes from the specified region 
the store files of the specified column family,
-185   * either by archiving them or outright 
deletion
-186   * @param fs the filesystem where the 
store files live
-187   * @param conf {@link Configuration} to 
examine to determine the archive directory
-188   * @param parent Parent region hosting 
the store files
-189   * @param familyDir {@link Path} to 
where the family is being stored
-190   * @param family the family hosting the 
store files
-191   * @throws IOException if the files 
could not be correctly disposed.

[07/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.KeepAliveWorkerThread.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.KeepAliveWorkerThread.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.KeepAliveWorkerThread.html
index 3bc66bb..97aa79c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.KeepAliveWorkerThread.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.KeepAliveWorkerThread.html
@@ -1435,459 +1435,460 @@
 1427   */
 1428  private void execProcedure(final 
RootProcedureState procStack,
 1429  final 
ProcedureTEnvironment procedure) {
-1430
Preconditions.checkArgument(procedure.getState() == ProcedureState.RUNNABLE);
-1431
-1432// Procedures can suspend 
themselves. They skip out by throwing a ProcedureSuspendedException.
-1433// The exception is caught below and 
then we hurry to the exit without disturbing state. The
-1434// idea is that the processing of 
this procedure will be unsuspended later by an external event
-1435// such the report of a region open. 
TODO: Currently, its possible for two worker threads
-1436// to be working on the same 
procedure concurrently (locking in procedures is NOT about
-1437// concurrency but about tying an 
entity to a procedure; i.e. a region to a particular
-1438// procedure instance). This can 
make for issues if both threads are changing state.
-1439// See 
env.getProcedureScheduler().wakeEvent(regionNode.getProcedureEvent());
-1440// in 
RegionTransitionProcedure#reportTransition for example of Procedure putting
-1441// itself back on the scheduler 
making it possible for two threads running against
-1442// the one Procedure. Might be ok if 
they are both doing different, idempotent sections.
-1443boolean suspended = false;
-1444
-1445// Whether to 're-' -execute; run 
through the loop again.
-1446boolean reExecute = false;
-1447
-1448ProcedureTEnvironment[] 
subprocs = null;
-1449do {
-1450  reExecute = false;
-1451  try {
-1452subprocs = 
procedure.doExecute(getEnvironment());
-1453if (subprocs != null  
subprocs.length == 0) {
-1454  subprocs = null;
-1455}
-1456  } catch 
(ProcedureSuspendedException e) {
-1457if (LOG.isTraceEnabled()) {
-1458  LOG.trace("Suspend " + 
procedure);
-1459}
-1460suspended = true;
-1461  } catch (ProcedureYieldException 
e) {
-1462if (LOG.isTraceEnabled()) {
-1463  LOG.trace("Yield " + procedure 
+ ": " + e.getMessage(), e);
-1464}
-1465scheduler.yield(procedure);
-1466return;
-1467  } catch (InterruptedException e) 
{
-1468if (LOG.isTraceEnabled()) {
-1469  LOG.trace("Yield interrupt " + 
procedure + ": " + e.getMessage(), e);
-1470}
-1471
handleInterruptedException(procedure, e);
-1472scheduler.yield(procedure);
-1473return;
-1474  } catch (Throwable e) {
-1475// Catch NullPointerExceptions 
or similar errors...
-1476String msg = "CODE-BUG: Uncaught 
runtime exception: " + procedure;
-1477LOG.error(msg, e);
-1478procedure.setFailure(new 
RemoteProcedureException(msg, e));
-1479  }
-1480
-1481  if (!procedure.isFailed()) {
-1482if (subprocs != null) {
-1483  if (subprocs.length == 1 
 subprocs[0] == procedure) {
-1484// Procedure returned 
itself. Quick-shortcut for a state machine-like procedure;
-1485// i.e. we go around this 
loop again rather than go back out on the scheduler queue.
-1486subprocs = null;
-1487reExecute = true;
-1488if (LOG.isTraceEnabled()) 
{
-1489  LOG.trace("Short-circuit 
to next step on pid=" + procedure.getProcId());
-1490}
-1491  } else {
-1492// Yield the current 
procedure, and make the subprocedure runnable
-1493// subprocs may come back 
'null'.
-1494subprocs = 
initializeChildren(procStack, procedure, subprocs);
-1495LOG.info("Initialized 
subprocedures=" +
-1496  (subprocs == null? null:
-1497
Stream.of(subprocs).map(e - "{" + e.toString() + "}").
-1498
collect(Collectors.toList()).toString()));
-1499  }
-1500} else if (procedure.getState() 
== ProcedureState.WAITING_TIMEOUT) {
-1501  if (LOG.isTraceEnabled()) {
-1502LOG.trace("Added to 
timeoutExecutor " + procedure);
-1503  }
-1504  
timeoutExecutor.add(procedure);
-1505} else if (!suspended) {
-1506  // No subtask, so we are 
done
-1507  
procedure.setState(ProcedureState.SUCCESS);

[17/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.html
index 74fbf67..33418d0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.html
@@ -27,287 +27,296 @@
 019package 
org.apache.hadoop.hbase.io.hfile.bucket;
 020
 021import java.io.File;
-022import java.io.FileNotFoundException;
-023import java.io.IOException;
-024import java.io.RandomAccessFile;
-025import java.nio.ByteBuffer;
-026import 
java.nio.channels.ClosedChannelException;
-027import java.nio.channels.FileChannel;
-028import java.util.Arrays;
-029import 
org.apache.hadoop.hbase.io.hfile.Cacheable;
-030import 
org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
-031import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
-032import 
org.apache.hadoop.hbase.nio.ByteBuff;
-033import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
-034import 
org.apache.hadoop.util.StringUtils;
-035import 
org.apache.yetus.audience.InterfaceAudience;
-036import org.slf4j.Logger;
-037import org.slf4j.LoggerFactory;
-038
-039import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-040import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-041
-042/**
-043 * IO engine that stores data to a file 
on the local file system.
-044 */
-045@InterfaceAudience.Private
-046public class FileIOEngine implements 
IOEngine {
-047  private static final Logger LOG = 
LoggerFactory.getLogger(FileIOEngine.class);
-048  public static final String 
FILE_DELIMITER = ",";
-049  private final String[] filePaths;
-050  private final FileChannel[] 
fileChannels;
-051  private final RandomAccessFile[] 
rafs;
-052
-053  private final long sizePerFile;
-054  private final long capacity;
-055
-056  private FileReadAccessor readAccessor = 
new FileReadAccessor();
-057  private FileWriteAccessor writeAccessor 
= new FileWriteAccessor();
-058
-059  public FileIOEngine(long capacity, 
boolean maintainPersistence, String... filePaths)
-060  throws IOException {
-061this.sizePerFile = capacity / 
filePaths.length;
-062this.capacity = this.sizePerFile * 
filePaths.length;
-063this.filePaths = filePaths;
-064this.fileChannels = new 
FileChannel[filePaths.length];
-065if (!maintainPersistence) {
-066  for (String filePath : filePaths) 
{
-067File file = new File(filePath);
-068if (file.exists()) {
-069  if (LOG.isDebugEnabled()) {
-070LOG.debug("File " + filePath 
+ " already exists. Deleting!!");
-071  }
-072  file.delete();
-073  // If deletion fails still we 
can manage with the writes
-074}
-075  }
-076}
-077this.rafs = new 
RandomAccessFile[filePaths.length];
-078for (int i = 0; i  
filePaths.length; i++) {
-079  String filePath = filePaths[i];
-080  try {
-081rafs[i] = new 
RandomAccessFile(filePath, "rw");
-082long totalSpace = new 
File(filePath).getTotalSpace();
-083if (totalSpace  sizePerFile) 
{
-084  // The next setting length will 
throw exception,logging this message
-085  // is just used for the detail 
reason of exception,
-086  String msg = "Only " + 
StringUtils.byteDesc(totalSpace)
-087  + " total space under " + 
filePath + ", not enough for requested "
-088  + 
StringUtils.byteDesc(sizePerFile);
-089  LOG.warn(msg);
-090}
-091rafs[i].setLength(sizePerFile);
-092fileChannels[i] = 
rafs[i].getChannel();
-093LOG.info("Allocating cache " + 
StringUtils.byteDesc(sizePerFile)
-094+ ", on the path:" + 
filePath);
-095  } catch (IOException fex) {
-096LOG.error("Failed allocating 
cache on " + filePath, fex);
-097shutdown();
-098throw fex;
-099  }
-100}
-101  }
-102
-103  @Override
-104  public String toString() {
-105return "ioengine=" + 
this.getClass().getSimpleName() + ", paths="
-106+ Arrays.asList(filePaths) + ", 
capacity=" + String.format("%,d", this.capacity);
-107  }
-108
-109  /**
-110   * File IO engine is always able to 
support persistent storage for the cache
-111   * @return true
-112   */
-113  @Override
-114  public boolean isPersistent() {
-115return true;
-116  }
-117
-118  /**
-119   * Transfers data from file to the 
given byte buffer
-120   * @param offset The offset in the file 
where the first byte to be read
-121   * @param length The length of buffer 
that should be allocated for reading
-122   *   from the file 
channel
-123   * @return number of bytes read
-124   

[48/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/index-all.html
--
diff --git a/devapidocs/index-all.html b/devapidocs/index-all.html
index a47005c..ca9ab07 100644
--- a/devapidocs/index-all.html
+++ b/devapidocs/index-all.html
@@ -1759,8 +1759,6 @@
 
 Add new hfile references to the queue.
 
-addHFileRefs(String,
 ListPairPath, Path) - Method in class 
org.apache.hadoop.hbase.replication.TableReplicationQueueStorage
-
 addHFileRefs(String,
 ListPairPath, Path) - Method in class 
org.apache.hadoop.hbase.replication.ZKReplicationQueueStorage
 
 addHFileRefsToQueue(TableName,
 byte[], ListPairPath, Path) - Method in class 
org.apache.hadoop.hbase.replication.regionserver.Replication
@@ -2045,8 +2043,6 @@
 
 Add a replication peer.
 
-addPeer(String,
 ReplicationPeerConfig, boolean) - Method in class 
org.apache.hadoop.hbase.replication.TableReplicationPeerStorage
-
 addPeer(String,
 ReplicationPeerConfig, boolean) - Method in class 
org.apache.hadoop.hbase.replication.ZKReplicationPeerStorage
 
 AddPeerProcedure - Class in org.apache.hadoop.hbase.master.replication
@@ -2061,8 +2057,6 @@
 
 Add a peer to hfile reference queue if peer does not 
exist.
 
-addPeerToHFileRefs(String)
 - Method in class org.apache.hadoop.hbase.replication.TableReplicationQueueStorage
-
 addPeerToHFileRefs(String)
 - Method in class org.apache.hadoop.hbase.replication.ZKReplicationQueueStorage
 
 addPrimaryAssignment(RegionInfo,
 ServerName) - Method in class org.apache.hadoop.hbase.master.SnapshotOfRegionAssignmentFromMeta
@@ -2592,8 +2586,6 @@
 
 Add a new WAL file to the given queue for a given 
regionserver.
 
-addWAL(ServerName,
 String, String) - Method in class 
org.apache.hadoop.hbase.replication.TableReplicationQueueStorage
-
 addWAL(ServerName,
 String, String) - Method in class 
org.apache.hadoop.hbase.replication.ZKReplicationQueueStorage
 
 addWALActionsListener(WALActionsListener)
 - Method in class org.apache.hadoop.hbase.wal.AbstractFSWALProvider
@@ -9644,8 +9636,6 @@
 
 cellsSizeCompactedToMob
 - Variable in class org.apache.hadoop.hbase.regionserver.MetricsRegionServerWrapperImpl
 
-cellTimestamp
 - Variable in class org.apache.hadoop.hbase.replication.TableReplicationQueueStorage.WALCell
-
 cellToBackupInfo(Cell)
 - Method in class org.apache.hadoop.hbase.backup.impl.BackupSystemTable
 
 Converts cell to backup info instance.
@@ -10603,6 +10593,10 @@
 
 checkNoTableNames()
 - Method in class org.apache.hadoop.hbase.tool.Canary.RegionServerMonitor
 
+checkOnline(MasterProcedureEnv,
 RegionInfo) - Static method in class 
org.apache.hadoop.hbase.master.procedure.AbstractStateMachineTableProcedure
+
+Check region is online.
+
 checkOnlineRegionsReport(RegionStates.ServerStateNode,
 Setbyte[]) - Method in class 
org.apache.hadoop.hbase.master.assignment.AssignmentManager
 
 checkOnlineRegionsReportForMeta(RegionStates.ServerStateNode,
 Setbyte[]) - Method in class 
org.apache.hadoop.hbase.master.assignment.AssignmentManager
@@ -10758,10 +10752,12 @@
 
 checkRegionsAndGetTableName(byte[],
 byte[]) - Method in class org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin
 
-checkRegionsToMerge(RegionInfo[],
 boolean) - Static method in class 
org.apache.hadoop.hbase.master.assignment.MergeTableRegionsProcedure
-
-checkRegionsToMerge(RegionInfo,
 RegionInfo, boolean) - Static method in class 
org.apache.hadoop.hbase.master.assignment.MergeTableRegionsProcedure
+checkRegionsToMerge(MasterProcedureEnv,
 RegionInfo[], boolean) - Static method in class 
org.apache.hadoop.hbase.master.assignment.MergeTableRegionsProcedure
 
+checkRegionsToMerge(MasterProcedureEnv,
 RegionInfo, RegionInfo, boolean) - Static method in class 
org.apache.hadoop.hbase.master.assignment.MergeTableRegionsProcedure
+
+One time checks.
+
 checkRemoveBackupImages(FileSystem,
 String, String[]) - Static method in class 
org.apache.hadoop.hbase.backup.impl.BackupCommands.RepairCommand
 
 checkReplicaId(int)
 - Static method in class org.apache.hadoop.hbase.client.RegionInfoBuilder.MutableRegionInfo
@@ -11270,8 +11266,6 @@
 
 Change ownership for the queue identified by queueId and 
belongs to a dead region server.
 
-claimQueue(ServerName,
 String, ServerName) - Method in class 
org.apache.hadoop.hbase.replication.TableReplicationQueueStorage
-
 claimQueue(ServerName,
 String, ServerName) - Method in class 
org.apache.hadoop.hbase.replication.ZKReplicationQueueStorage
 
 CLASS
 - Static variable in class org.apache.hadoop.hbase.util.CommonFSUtils.StreamCapabilities
@@ -16110,8 +16104,6 @@
 
 conf
 - Variable in class org.apache.hadoop.hbase.replication.ReplicationPeers
 
-conf
 - Variable in class org.apache.hadoop.hbase.replication.TableReplicationStorageBase
-
 conf
 - Variable in class org.apache.hadoop.hbase.replication.ZKReplicationStorageBase
 
 conf
 - Variable in class org.apache.hadoop.hbase.rest.client.RemoteAdmin
@@ -16625,8 +16617,6 @@
 
 connection
 - 

[05/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.Testing.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.Testing.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.Testing.html
index 3bc66bb..97aa79c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.Testing.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.Testing.html
@@ -1435,459 +1435,460 @@
 1427   */
 1428  private void execProcedure(final 
RootProcedureState procStack,
 1429  final 
ProcedureTEnvironment procedure) {
-1430
Preconditions.checkArgument(procedure.getState() == ProcedureState.RUNNABLE);
-1431
-1432// Procedures can suspend 
themselves. They skip out by throwing a ProcedureSuspendedException.
-1433// The exception is caught below and 
then we hurry to the exit without disturbing state. The
-1434// idea is that the processing of 
this procedure will be unsuspended later by an external event
-1435// such the report of a region open. 
TODO: Currently, its possible for two worker threads
-1436// to be working on the same 
procedure concurrently (locking in procedures is NOT about
-1437// concurrency but about tying an 
entity to a procedure; i.e. a region to a particular
-1438// procedure instance). This can 
make for issues if both threads are changing state.
-1439// See 
env.getProcedureScheduler().wakeEvent(regionNode.getProcedureEvent());
-1440// in 
RegionTransitionProcedure#reportTransition for example of Procedure putting
-1441// itself back on the scheduler 
making it possible for two threads running against
-1442// the one Procedure. Might be ok if 
they are both doing different, idempotent sections.
-1443boolean suspended = false;
-1444
-1445// Whether to 're-' -execute; run 
through the loop again.
-1446boolean reExecute = false;
-1447
-1448ProcedureTEnvironment[] 
subprocs = null;
-1449do {
-1450  reExecute = false;
-1451  try {
-1452subprocs = 
procedure.doExecute(getEnvironment());
-1453if (subprocs != null  
subprocs.length == 0) {
-1454  subprocs = null;
-1455}
-1456  } catch 
(ProcedureSuspendedException e) {
-1457if (LOG.isTraceEnabled()) {
-1458  LOG.trace("Suspend " + 
procedure);
-1459}
-1460suspended = true;
-1461  } catch (ProcedureYieldException 
e) {
-1462if (LOG.isTraceEnabled()) {
-1463  LOG.trace("Yield " + procedure 
+ ": " + e.getMessage(), e);
-1464}
-1465scheduler.yield(procedure);
-1466return;
-1467  } catch (InterruptedException e) 
{
-1468if (LOG.isTraceEnabled()) {
-1469  LOG.trace("Yield interrupt " + 
procedure + ": " + e.getMessage(), e);
-1470}
-1471
handleInterruptedException(procedure, e);
-1472scheduler.yield(procedure);
-1473return;
-1474  } catch (Throwable e) {
-1475// Catch NullPointerExceptions 
or similar errors...
-1476String msg = "CODE-BUG: Uncaught 
runtime exception: " + procedure;
-1477LOG.error(msg, e);
-1478procedure.setFailure(new 
RemoteProcedureException(msg, e));
-1479  }
-1480
-1481  if (!procedure.isFailed()) {
-1482if (subprocs != null) {
-1483  if (subprocs.length == 1 
 subprocs[0] == procedure) {
-1484// Procedure returned 
itself. Quick-shortcut for a state machine-like procedure;
-1485// i.e. we go around this 
loop again rather than go back out on the scheduler queue.
-1486subprocs = null;
-1487reExecute = true;
-1488if (LOG.isTraceEnabled()) 
{
-1489  LOG.trace("Short-circuit 
to next step on pid=" + procedure.getProcId());
-1490}
-1491  } else {
-1492// Yield the current 
procedure, and make the subprocedure runnable
-1493// subprocs may come back 
'null'.
-1494subprocs = 
initializeChildren(procStack, procedure, subprocs);
-1495LOG.info("Initialized 
subprocedures=" +
-1496  (subprocs == null? null:
-1497
Stream.of(subprocs).map(e - "{" + e.toString() + "}").
-1498
collect(Collectors.toList()).toString()));
-1499  }
-1500} else if (procedure.getState() 
== ProcedureState.WAITING_TIMEOUT) {
-1501  if (LOG.isTraceEnabled()) {
-1502LOG.trace("Added to 
timeoutExecutor " + procedure);
-1503  }
-1504  
timeoutExecutor.add(procedure);
-1505} else if (!suspended) {
-1506  // No subtask, so we are 
done
-1507  
procedure.setState(ProcedureState.SUCCESS);
-1508}
-1509  }
-1510
-1511  // Add the procedure to 

[34/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/org/apache/hadoop/hbase/replication/class-use/ReplicationException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/replication/class-use/ReplicationException.html
 
b/devapidocs/org/apache/hadoop/hbase/replication/class-use/ReplicationException.html
index 78b1fe9..5aed7ca 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/replication/class-use/ReplicationException.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/replication/class-use/ReplicationException.html
@@ -477,25 +477,14 @@
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairorg.apache.hadoop.fs.Path,org.apache.hadoop.fs.Pathpairs)
 
 
-void
-TableReplicationQueueStorage.addHFileRefs(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId,
-https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairorg.apache.hadoop.fs.Path,org.apache.hadoop.fs.Pathpairs)
-
-
 boolean
 ReplicationPeers.addPeer(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId)
 Method called after a peer has been connected.
 
 
-
-void
-ZKReplicationPeerStorage.addPeer(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId,
-   ReplicationPeerConfigpeerConfig,
-   booleanenabled)
-
 
 void
-TableReplicationPeerStorage.addPeer(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId,
+ZKReplicationPeerStorage.addPeer(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId,
ReplicationPeerConfigpeerConfig,
booleanenabled)
 
@@ -519,25 +508,15 @@
 
 
 void
-TableReplicationQueueStorage.addPeerToHFileRefs(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringpeerId)
-
-
-void
 ReplicationQueueStorage.addWAL(ServerNameserverName,
   https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringqueueId,
   https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringfileName)
 Add a new WAL file to the given queue for a given 
regionserver.
 
 
-
-void
-ZKReplicationQueueStorage.addWAL(ServerNameserverName,
-  https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringqueueId,
-  https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringfileName)
-
 
 void
-TableReplicationQueueStorage.addWAL(ServerNameserverName,
+ZKReplicationQueueStorage.addWAL(ServerNameserverName,
   https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringqueueId,
   https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringfileName)
 
@@ -556,30 +535,20 @@
   ServerNamedestServerName)
 
 
-Pairhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,https://docs.oracle.com/javase/8/docs/api/java/util/SortedSet.html?is-external=true;
 title="class or interface in java.util">SortedSethttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
-TableReplicationQueueStorage.claimQueue(ServerNamesourceServerName,
-  https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringqueueId,
-  ServerNamedestServerName)
-
-
 private ReplicationPeerImpl
 ReplicationPeers.createPeer(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId)
 Helper method to connect to a peer
 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">Sethttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 ReplicationQueueStorage.getAllHFileRefs()
 Load all hfile references in all replication queues.
 
 
-
-https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in 

[10/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureCleaner.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureCleaner.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureCleaner.html
index 3bc66bb..97aa79c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureCleaner.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureCleaner.html
@@ -1435,459 +1435,460 @@
 1427   */
 1428  private void execProcedure(final 
RootProcedureState procStack,
 1429  final 
ProcedureTEnvironment procedure) {
-1430
Preconditions.checkArgument(procedure.getState() == ProcedureState.RUNNABLE);
-1431
-1432// Procedures can suspend 
themselves. They skip out by throwing a ProcedureSuspendedException.
-1433// The exception is caught below and 
then we hurry to the exit without disturbing state. The
-1434// idea is that the processing of 
this procedure will be unsuspended later by an external event
-1435// such the report of a region open. 
TODO: Currently, its possible for two worker threads
-1436// to be working on the same 
procedure concurrently (locking in procedures is NOT about
-1437// concurrency but about tying an 
entity to a procedure; i.e. a region to a particular
-1438// procedure instance). This can 
make for issues if both threads are changing state.
-1439// See 
env.getProcedureScheduler().wakeEvent(regionNode.getProcedureEvent());
-1440// in 
RegionTransitionProcedure#reportTransition for example of Procedure putting
-1441// itself back on the scheduler 
making it possible for two threads running against
-1442// the one Procedure. Might be ok if 
they are both doing different, idempotent sections.
-1443boolean suspended = false;
-1444
-1445// Whether to 're-' -execute; run 
through the loop again.
-1446boolean reExecute = false;
-1447
-1448ProcedureTEnvironment[] 
subprocs = null;
-1449do {
-1450  reExecute = false;
-1451  try {
-1452subprocs = 
procedure.doExecute(getEnvironment());
-1453if (subprocs != null  
subprocs.length == 0) {
-1454  subprocs = null;
-1455}
-1456  } catch 
(ProcedureSuspendedException e) {
-1457if (LOG.isTraceEnabled()) {
-1458  LOG.trace("Suspend " + 
procedure);
-1459}
-1460suspended = true;
-1461  } catch (ProcedureYieldException 
e) {
-1462if (LOG.isTraceEnabled()) {
-1463  LOG.trace("Yield " + procedure 
+ ": " + e.getMessage(), e);
-1464}
-1465scheduler.yield(procedure);
-1466return;
-1467  } catch (InterruptedException e) 
{
-1468if (LOG.isTraceEnabled()) {
-1469  LOG.trace("Yield interrupt " + 
procedure + ": " + e.getMessage(), e);
-1470}
-1471
handleInterruptedException(procedure, e);
-1472scheduler.yield(procedure);
-1473return;
-1474  } catch (Throwable e) {
-1475// Catch NullPointerExceptions 
or similar errors...
-1476String msg = "CODE-BUG: Uncaught 
runtime exception: " + procedure;
-1477LOG.error(msg, e);
-1478procedure.setFailure(new 
RemoteProcedureException(msg, e));
-1479  }
-1480
-1481  if (!procedure.isFailed()) {
-1482if (subprocs != null) {
-1483  if (subprocs.length == 1 
 subprocs[0] == procedure) {
-1484// Procedure returned 
itself. Quick-shortcut for a state machine-like procedure;
-1485// i.e. we go around this 
loop again rather than go back out on the scheduler queue.
-1486subprocs = null;
-1487reExecute = true;
-1488if (LOG.isTraceEnabled()) 
{
-1489  LOG.trace("Short-circuit 
to next step on pid=" + procedure.getProcId());
-1490}
-1491  } else {
-1492// Yield the current 
procedure, and make the subprocedure runnable
-1493// subprocs may come back 
'null'.
-1494subprocs = 
initializeChildren(procStack, procedure, subprocs);
-1495LOG.info("Initialized 
subprocedures=" +
-1496  (subprocs == null? null:
-1497
Stream.of(subprocs).map(e - "{" + e.toString() + "}").
-1498
collect(Collectors.toList()).toString()));
-1499  }
-1500} else if (procedure.getState() 
== ProcedureState.WAITING_TIMEOUT) {
-1501  if (LOG.isTraceEnabled()) {
-1502LOG.trace("Added to 
timeoutExecutor " + procedure);
-1503  }
-1504  
timeoutExecutor.add(procedure);
-1505} else if (!suspended) {
-1506  // No subtask, so we are 
done
-1507  

[19/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileReadAccessor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileReadAccessor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileReadAccessor.html
index 74fbf67..33418d0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileReadAccessor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileReadAccessor.html
@@ -27,287 +27,296 @@
 019package 
org.apache.hadoop.hbase.io.hfile.bucket;
 020
 021import java.io.File;
-022import java.io.FileNotFoundException;
-023import java.io.IOException;
-024import java.io.RandomAccessFile;
-025import java.nio.ByteBuffer;
-026import 
java.nio.channels.ClosedChannelException;
-027import java.nio.channels.FileChannel;
-028import java.util.Arrays;
-029import 
org.apache.hadoop.hbase.io.hfile.Cacheable;
-030import 
org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
-031import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
-032import 
org.apache.hadoop.hbase.nio.ByteBuff;
-033import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
-034import 
org.apache.hadoop.util.StringUtils;
-035import 
org.apache.yetus.audience.InterfaceAudience;
-036import org.slf4j.Logger;
-037import org.slf4j.LoggerFactory;
-038
-039import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-040import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-041
-042/**
-043 * IO engine that stores data to a file 
on the local file system.
-044 */
-045@InterfaceAudience.Private
-046public class FileIOEngine implements 
IOEngine {
-047  private static final Logger LOG = 
LoggerFactory.getLogger(FileIOEngine.class);
-048  public static final String 
FILE_DELIMITER = ",";
-049  private final String[] filePaths;
-050  private final FileChannel[] 
fileChannels;
-051  private final RandomAccessFile[] 
rafs;
-052
-053  private final long sizePerFile;
-054  private final long capacity;
-055
-056  private FileReadAccessor readAccessor = 
new FileReadAccessor();
-057  private FileWriteAccessor writeAccessor 
= new FileWriteAccessor();
-058
-059  public FileIOEngine(long capacity, 
boolean maintainPersistence, String... filePaths)
-060  throws IOException {
-061this.sizePerFile = capacity / 
filePaths.length;
-062this.capacity = this.sizePerFile * 
filePaths.length;
-063this.filePaths = filePaths;
-064this.fileChannels = new 
FileChannel[filePaths.length];
-065if (!maintainPersistence) {
-066  for (String filePath : filePaths) 
{
-067File file = new File(filePath);
-068if (file.exists()) {
-069  if (LOG.isDebugEnabled()) {
-070LOG.debug("File " + filePath 
+ " already exists. Deleting!!");
-071  }
-072  file.delete();
-073  // If deletion fails still we 
can manage with the writes
-074}
-075  }
-076}
-077this.rafs = new 
RandomAccessFile[filePaths.length];
-078for (int i = 0; i  
filePaths.length; i++) {
-079  String filePath = filePaths[i];
-080  try {
-081rafs[i] = new 
RandomAccessFile(filePath, "rw");
-082long totalSpace = new 
File(filePath).getTotalSpace();
-083if (totalSpace  sizePerFile) 
{
-084  // The next setting length will 
throw exception,logging this message
-085  // is just used for the detail 
reason of exception,
-086  String msg = "Only " + 
StringUtils.byteDesc(totalSpace)
-087  + " total space under " + 
filePath + ", not enough for requested "
-088  + 
StringUtils.byteDesc(sizePerFile);
-089  LOG.warn(msg);
-090}
-091rafs[i].setLength(sizePerFile);
-092fileChannels[i] = 
rafs[i].getChannel();
-093LOG.info("Allocating cache " + 
StringUtils.byteDesc(sizePerFile)
-094+ ", on the path:" + 
filePath);
-095  } catch (IOException fex) {
-096LOG.error("Failed allocating 
cache on " + filePath, fex);
-097shutdown();
-098throw fex;
-099  }
-100}
-101  }
-102
-103  @Override
-104  public String toString() {
-105return "ioengine=" + 
this.getClass().getSimpleName() + ", paths="
-106+ Arrays.asList(filePaths) + ", 
capacity=" + String.format("%,d", this.capacity);
-107  }
-108
-109  /**
-110   * File IO engine is always able to 
support persistent storage for the cache
-111   * @return true
-112   */
-113  @Override
-114  public boolean isPersistent() {
-115return true;
-116  }
-117
-118  /**
-119   * Transfers data from file to the 
given byte buffer
-120   * @param offset The offset in the file 
where the first byte to be read
-121   * @param length The length of buffer 
that should be allocated for reading
-122  

[09/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureRetainer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureRetainer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureRetainer.html
index 3bc66bb..97aa79c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureRetainer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureRetainer.html
@@ -1435,459 +1435,460 @@
 1427   */
 1428  private void execProcedure(final 
RootProcedureState procStack,
 1429  final 
ProcedureTEnvironment procedure) {
-1430
Preconditions.checkArgument(procedure.getState() == ProcedureState.RUNNABLE);
-1431
-1432// Procedures can suspend 
themselves. They skip out by throwing a ProcedureSuspendedException.
-1433// The exception is caught below and 
then we hurry to the exit without disturbing state. The
-1434// idea is that the processing of 
this procedure will be unsuspended later by an external event
-1435// such the report of a region open. 
TODO: Currently, its possible for two worker threads
-1436// to be working on the same 
procedure concurrently (locking in procedures is NOT about
-1437// concurrency but about tying an 
entity to a procedure; i.e. a region to a particular
-1438// procedure instance). This can 
make for issues if both threads are changing state.
-1439// See 
env.getProcedureScheduler().wakeEvent(regionNode.getProcedureEvent());
-1440// in 
RegionTransitionProcedure#reportTransition for example of Procedure putting
-1441// itself back on the scheduler 
making it possible for two threads running against
-1442// the one Procedure. Might be ok if 
they are both doing different, idempotent sections.
-1443boolean suspended = false;
-1444
-1445// Whether to 're-' -execute; run 
through the loop again.
-1446boolean reExecute = false;
-1447
-1448ProcedureTEnvironment[] 
subprocs = null;
-1449do {
-1450  reExecute = false;
-1451  try {
-1452subprocs = 
procedure.doExecute(getEnvironment());
-1453if (subprocs != null  
subprocs.length == 0) {
-1454  subprocs = null;
-1455}
-1456  } catch 
(ProcedureSuspendedException e) {
-1457if (LOG.isTraceEnabled()) {
-1458  LOG.trace("Suspend " + 
procedure);
-1459}
-1460suspended = true;
-1461  } catch (ProcedureYieldException 
e) {
-1462if (LOG.isTraceEnabled()) {
-1463  LOG.trace("Yield " + procedure 
+ ": " + e.getMessage(), e);
-1464}
-1465scheduler.yield(procedure);
-1466return;
-1467  } catch (InterruptedException e) 
{
-1468if (LOG.isTraceEnabled()) {
-1469  LOG.trace("Yield interrupt " + 
procedure + ": " + e.getMessage(), e);
-1470}
-1471
handleInterruptedException(procedure, e);
-1472scheduler.yield(procedure);
-1473return;
-1474  } catch (Throwable e) {
-1475// Catch NullPointerExceptions 
or similar errors...
-1476String msg = "CODE-BUG: Uncaught 
runtime exception: " + procedure;
-1477LOG.error(msg, e);
-1478procedure.setFailure(new 
RemoteProcedureException(msg, e));
-1479  }
-1480
-1481  if (!procedure.isFailed()) {
-1482if (subprocs != null) {
-1483  if (subprocs.length == 1 
 subprocs[0] == procedure) {
-1484// Procedure returned 
itself. Quick-shortcut for a state machine-like procedure;
-1485// i.e. we go around this 
loop again rather than go back out on the scheduler queue.
-1486subprocs = null;
-1487reExecute = true;
-1488if (LOG.isTraceEnabled()) 
{
-1489  LOG.trace("Short-circuit 
to next step on pid=" + procedure.getProcId());
-1490}
-1491  } else {
-1492// Yield the current 
procedure, and make the subprocedure runnable
-1493// subprocs may come back 
'null'.
-1494subprocs = 
initializeChildren(procStack, procedure, subprocs);
-1495LOG.info("Initialized 
subprocedures=" +
-1496  (subprocs == null? null:
-1497
Stream.of(subprocs).map(e - "{" + e.toString() + "}").
-1498
collect(Collectors.toList()).toString()));
-1499  }
-1500} else if (procedure.getState() 
== ProcedureState.WAITING_TIMEOUT) {
-1501  if (LOG.isTraceEnabled()) {
-1502LOG.trace("Added to 
timeoutExecutor " + procedure);
-1503  }
-1504  
timeoutExecutor.add(procedure);
-1505} else if (!suspended) {
-1506  // No subtask, so we are 
done
-1507  

[46/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.ReplicaResultState.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.ReplicaResultState.html
 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.ReplicaResultState.html
index aa4efda..bc800b5 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.ReplicaResultState.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.ReplicaResultState.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private static class AsyncRequestFutureImpl.ReplicaResultState
+private static class AsyncRequestFutureImpl.ReplicaResultState
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Sync point for calls to multiple replicas for the same user 
request (Get).
  Created and put in the results array (we assume replica calls require 
results) when
@@ -213,7 +213,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 callCount
-int callCount
+int callCount
 Number of calls outstanding, or 0 if a call succeeded (even 
with others outstanding).
 
 
@@ -223,7 +223,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 replicaErrors
-BatchErrors replicaErrors
+BatchErrors replicaErrors
 Errors for which it is not decided whether we will report 
them to user. If one of the
  calls succeeds, we will discard the errors that may have happened in the 
other calls.
 
@@ -242,7 +242,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 ReplicaResultState
-publicReplicaResultState(intcallCount)
+publicReplicaResultState(intcallCount)
 
 
 
@@ -259,7 +259,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 toString
-publichttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtoString()
+publichttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtoString()
 
 Overrides:
 https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--;
 title="class or interface in java.lang">toStringin 
classhttps://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.Retry.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.Retry.html 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.Retry.html
index 63ce03d..f7ce8e5 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.Retry.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.Retry.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static enum AsyncRequestFutureImpl.Retry
+public static enum AsyncRequestFutureImpl.Retry
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumAsyncRequestFutureImpl.Retry
 For AsyncRequestFutureImpl.manageError(int,
 Row, Retry, Throwable, ServerName). Only
  used to make logging more clear, we don't actually care why we don't 
retry.
@@ -221,7 +221,7 @@ the order they are declared.
 
 
 YES
-public static finalAsyncRequestFutureImpl.Retry YES
+public static finalAsyncRequestFutureImpl.Retry YES
 
 
 
@@ -230,7 +230,7 @@ the order they are declared.
 
 
 NO_LOCATION_PROBLEM
-public static finalAsyncRequestFutureImpl.Retry NO_LOCATION_PROBLEM
+public static finalAsyncRequestFutureImpl.Retry NO_LOCATION_PROBLEM
 
 
 
@@ -239,7 +239,7 @@ the order they are declared.
 
 
 NO_NOT_RETRIABLE
-public static finalAsyncRequestFutureImpl.Retry NO_NOT_RETRIABLE
+public static finalAsyncRequestFutureImpl.Retry NO_NOT_RETRIABLE
 
 
 
@@ -248,7 +248,7 @@ the order they are declared.
 
 
 NO_RETRIES_EXHAUSTED
-public static finalAsyncRequestFutureImpl.Retry NO_RETRIES_EXHAUSTED
+public static finalAsyncRequestFutureImpl.Retry NO_RETRIES_EXHAUSTED
 
 
 
@@ -257,7 +257,7 @@ the order they are declared.
 
 
 NO_OTHER_SUCCEEDED
-public static finalAsyncRequestFutureImpl.Retry NO_OTHER_SUCCEEDED
+public static finalAsyncRequestFutureImpl.Retry NO_OTHER_SUCCEEDED
 
 
 
@@ -274,7 +274,7 @@ the order they are declared.
 
 
 values
-public staticAsyncRequestFutureImpl.Retry[]values()
+public staticAsyncRequestFutureImpl.Retry[]values()
 Returns an array containing the constants of this enum 
type, in
 the order they are declared.  This method may be used 

[51/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/bd675fa3
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/bd675fa3
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/bd675fa3

Branch: refs/heads/asf-site
Commit: bd675fa3838baf9838ace3e1234dfee0d0f5af90
Parents: b5f95ca
Author: jenkins 
Authored: Sat Mar 17 14:53:17 2018 +
Committer: jenkins 
Committed: Sat Mar 17 14:53:17 2018 +

--
 acid-semantics.html | 4 +-
 apache_hbase_reference_guide.pdf| 6 +-
 apidocs/index-all.html  | 4 +
 .../hbase/client/DoNotRetryRegionException.html |14 +-
 .../hbase/exceptions/MergeRegionException.html  |14 +-
 .../hbase/client/DoNotRetryRegionException.html | 5 +-
 .../hbase/exceptions/MergeRegionException.html  | 6 +-
 book.html   | 2 +-
 bulk-loads.html | 4 +-
 checkstyle-aggregate.html   | 27308 -
 checkstyle.rss  |   180 +-
 coc.html| 4 +-
 cygwin.html | 4 +-
 dependencies.html   | 4 +-
 dependency-convergence.html | 4 +-
 dependency-info.html| 4 +-
 dependency-management.html  | 4 +-
 devapidocs/allclasses-frame.html| 4 -
 devapidocs/allclasses-noframe.html  | 4 -
 devapidocs/constant-values.html |34 +-
 devapidocs/index-all.html   |   184 +-
 .../hadoop/hbase/backup/HFileArchiver.File.html |24 +-
 .../backup/HFileArchiver.FileConverter.html | 6 +-
 .../HFileArchiver.FileStatusConverter.html  | 6 +-
 .../backup/HFileArchiver.FileablePath.html  |20 +-
 .../backup/HFileArchiver.FileableStoreFile.html |18 +-
 .../hbase/backup/HFileArchiver.StoreToFile.html | 6 +-
 .../hadoop/hbase/backup/HFileArchiver.html  |14 +-
 .../hadoop/hbase/backup/package-tree.html   | 2 +-
 .../org/apache/hadoop/hbase/class-use/Cell.html |10 +-
 .../hadoop/hbase/class-use/ServerName.html  |   110 +-
 .../hadoop/hbase/class-use/TableName.html   |13 -
 ...yncRequestFutureImpl.ReplicaResultState.html |10 +-
 .../client/AsyncRequestFutureImpl.Retry.html|16 +-
 .../hbase/client/AsyncRequestFutureImpl.html|   106 +-
 .../hbase/client/DoNotRetryRegionException.html |14 +-
 .../hbase/client/class-use/Connection.html  |40 +-
 .../class-use/DoNotRetryRegionException.html|25 +
 .../hbase/client/class-use/RegionInfo.html  |31 +-
 .../hadoop/hbase/client/class-use/Result.html   |38 +-
 .../hadoop/hbase/client/class-use/Table.html|66 +-
 .../class-use/TableDescriptorBuilder.html   |24 -
 .../hadoop/hbase/client/package-tree.html   |22 +-
 .../apache/hadoop/hbase/client/package-use.html |84 +-
 .../hbase/exceptions/MergeRegionException.html  |14 +-
 .../class-use/MergeRegionException.html |10 +-
 .../hadoop/hbase/executor/package-tree.html | 2 +-
 .../hadoop/hbase/filter/package-tree.html   |10 +-
 .../hfile/bucket/FileIOEngine.FileAccessor.html | 4 +-
 .../bucket/FileIOEngine.FileReadAccessor.html   | 6 +-
 .../bucket/FileIOEngine.FileWriteAccessor.html  | 6 +-
 .../hbase/io/hfile/bucket/FileIOEngine.html |83 +-
 .../hadoop/hbase/io/hfile/package-tree.html | 4 +-
 .../apache/hadoop/hbase/ipc/package-tree.html   | 2 +-
 .../hadoop/hbase/mapreduce/package-tree.html| 2 +-
 .../assignment/GCMergedRegionsProcedure.html| 2 +-
 .../master/assignment/GCRegionProcedure.html| 2 +-
 .../assignment/MergeTableRegionsProcedure.html  |   127 +-
 .../master/assignment/MoveRegionProcedure.html  |24 +-
 ...tTableRegionProcedure.StoreFileSplitter.html |12 +-
 .../assignment/SplitTableRegionProcedure.html   |72 +-
 .../master/balancer/RegionLocationFinder.html   |56 +-
 .../hbase/master/balancer/package-tree.html | 2 +-
 .../hadoop/hbase/master/package-tree.html   | 4 +-
 .../AbstractStateMachineRegionProcedure.html| 2 +-
 .../AbstractStateMachineTableProcedure.html |83 +-
 .../procedure/CloneSnapshotProcedure.html   | 2 +-
 .../master/procedure/CreateTableProcedure.html  | 2 +-
 .../master/procedure/DeleteTableProcedure.html  | 2 +-
 .../master/procedure/DisableTableProcedure.html | 2 +-
 .../master/procedure/EnableTableProcedure.html  | 2 +-
 .../master/procedure/ModifyTableProcedure.html  | 2 +-
 

[45/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
index 56c6c99..40bbdbf 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
@@ -2927,14 +2927,18 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 
 
 private static void
-MergeTableRegionsProcedure.checkRegionsToMerge(RegionInfo[]regionsToMerge,
+MergeTableRegionsProcedure.checkRegionsToMerge(MasterProcedureEnvenv,
+   RegionInfo[]regionsToMerge,
booleanforcible)
 
 
 private static void
-MergeTableRegionsProcedure.checkRegionsToMerge(RegionInforegionToMergeA,
+MergeTableRegionsProcedure.checkRegionsToMerge(MasterProcedureEnvenv,
+   RegionInforegionToMergeA,
RegionInforegionToMergeB,
-   booleanforcible)
+   booleanforcible)
+One time checks.
+
 
 
 private void
@@ -4221,11 +4225,18 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 
 
 
+protected static void
+AbstractStateMachineTableProcedure.checkOnline(MasterProcedureEnvenv,
+   RegionInfori)
+Check region is online.
+
+
+
 protected org.apache.hadoop.fs.Path
 AbstractStateMachineTableProcedure.getRegionDir(MasterProcedureEnvenv,
 RegionInforegion)
 
-
+
 private void
 RecoverMetaProcedure.handleRIT(MasterProcedureEnvenv,
  RegionInfori,
@@ -4235,24 +4246,24 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
  to carry.
 
 
-
+
 private boolean
 ServerCrashProcedure.isDefaultMetaRegion(RegionInfohri)
 
-
+
 protected void
 AbstractStateMachineRegionProcedure.setRegion(RegionInfohri)
 Used when deserializing.
 
 
-
+
 boolean
 MasterProcedureScheduler.waitRegion(Procedureprocedure,
   RegionInforegionInfo)
 Suspend the procedure if the specified region is already 
locked.
 
 
-
+
 boolean
 MasterProcedureScheduler.waitRegions(Procedureprocedure,
TableNametable,
@@ -4260,14 +4271,14 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 Suspend the procedure if the specified set of regions are 
already locked.
 
 
-
+
 void
 MasterProcedureScheduler.wakeRegion(Procedureprocedure,
   RegionInforegionInfo)
 Wake the procedures waiting for the specified region
 
 
-
+
 void
 MasterProcedureScheduler.wakeRegions(Procedureprocedure,
TableNametable,

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html
index 2521b5b..a85184f 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html
@@ -145,44 +145,38 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
-org.apache.hadoop.hbase.replication
-
-Multi Cluster Replication
-
-
-
 org.apache.hadoop.hbase.rest
 
 HBase REST
 
 
-
+
 org.apache.hadoop.hbase.rest.client
 
 
-
+
 org.apache.hadoop.hbase.security.access
 
 
-
+
 org.apache.hadoop.hbase.security.visibility
 
 
-
+
 org.apache.hadoop.hbase.thrift
 
 Provides an HBase http://incubator.apache.org/thrift/;>Thrift
 service.
 
 
-
+
 org.apache.hadoop.hbase.thrift2
 
 Provides an HBase http://thrift.apache.org/;>Thrift
 service.
 
 
-
+
 org.apache.hadoop.hbase.util
 
 
@@ -2056,26 +2050,6 @@ service.
 
 
 
-
-
-
-Uses of Result in org.apache.hadoop.hbase.replication
-
-Methods in org.apache.hadoop.hbase.replication
 with parameters of type Result
-
-Modifier and Type
-Method and Description
-
-
-
-private https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableReplicationQueueStorage.WALCell
-TableReplicationQueueStorage.result2WALCells(Resultr)
-Parse the WALCell list from a HBase result.
-
-
-
-
-
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/org/apache/hadoop/hbase/client/class-use/Table.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Table.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/Table.html
index 8af2dd8..e4548d3 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Table.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Table.html
@@ -138,56 +138,50 @@ Input/OutputFormats, a table 

[49/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/checkstyle.rss
--
diff --git a/checkstyle.rss b/checkstyle.rss
index 5bd8a2e..f57b234 100644
--- a/checkstyle.rss
+++ b/checkstyle.rss
@@ -25,8 +25,8 @@ under the License.
 en-us
 2007 - 2018 The Apache Software Foundation
 
-  File: 3589,
- Errors: 16093,
+  File: 3585,
+ Errors: 16085,
  Warnings: 0,
  Infos: 0
   
@@ -3275,7 +3275,7 @@ under the License.
   0
 
 
-  3
+  1
 
   
   
@@ -6052,20 +6052,6 @@ under the License.
   
   
 
-  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.replication.storage.TestZKReplicationQueueStorage.java;>org/apache/hadoop/hbase/replication/storage/TestZKReplicationQueueStorage.java
-
-
-  0
-
-
-  0
-
-
-  0
-
-  
-  
-
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.spark.example.hbasecontext.JavaHBaseDistributedScan.java;>org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseDistributedScan.java
 
 
@@ -9398,20 +9384,6 @@ under the License.
   
   
 
-  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.replication.storage.TestReplicationStateBasic.java;>org/apache/hadoop/hbase/replication/storage/TestReplicationStateBasic.java
-
-
-  0
-
-
-  0
-
-
-  0
-
-  
-  
-
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.filter.TestNullComparator.java;>org/apache/hadoop/hbase/filter/TestNullComparator.java
 
 
@@ -20052,20 +20024,6 @@ under the License.
   
   
 
-  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.replication.TableReplicationStorageBase.java;>org/apache/hadoop/hbase/replication/TableReplicationStorageBase.java
-
-
-  0
-
-
-  0
-
-
-  0
-
-  
-  
-
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.io.TestByteBufferOutputStream.java;>org/apache/hadoop/hbase/io/TestByteBufferOutputStream.java
 
 
@@ -21620,20 +21578,6 @@ under the License.
   
   
 
-  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.replication.storage.TestReplicationStateZKImpl.java;>org/apache/hadoop/hbase/replication/storage/TestReplicationStateZKImpl.java
-
-
-  0
-
-
-  0
-
-
-  0
-
-  
-  
-
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.regionserver.TestStoreFileRefresherChore.java;>org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java
 
 
@@ -22670,20 +22614,6 @@ under the License.
   
   
 
-  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.replication.storage.TestZKReplicationPeerStorage.java;>org/apache/hadoop/hbase/replication/storage/TestZKReplicationPeerStorage.java
-
-
-  0
-
-
-  0
-
-
-  0
-
-  
-  
-
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.util.MultiHConnection.java;>org/apache/hadoop/hbase/util/MultiHConnection.java
 
 
@@ -22931,7 +22861,7 @@ under the License.
   0
 
 
-  24
+  21
 
   
   
@@ -23972,20 +23902,6 @@ under the License.
   
   
 
-  

[37/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/org/apache/hadoop/hbase/replication/TableReplicationPeerStorage.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/replication/TableReplicationPeerStorage.html
 
b/devapidocs/org/apache/hadoop/hbase/replication/TableReplicationPeerStorage.html
deleted file mode 100644
index d59a2b0..000
--- 
a/devapidocs/org/apache/hadoop/hbase/replication/TableReplicationPeerStorage.html
+++ /dev/null
@@ -1,497 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-
-
-
-TableReplicationPeerStorage (Apache HBase 3.0.0-SNAPSHOT API)
-
-
-
-
-
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10};
-var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
-var altColor = "altColor";
-var rowColor = "rowColor";
-var tableTab = "tableTab";
-var activeTableTab = "activeTableTab";
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-PrevClass
-NextClass
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-Summary:
-Nested|
-Field|
-Constr|
-Method
-
-
-Detail:
-Field|
-Constr|
-Method
-
-
-
-
-
-
-
-
-org.apache.hadoop.hbase.replication
-Class 
TableReplicationPeerStorage
-
-
-
-https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
-
-
-org.apache.hadoop.hbase.replication.TableReplicationStorageBase
-
-
-org.apache.hadoop.hbase.replication.TableReplicationPeerStorage
-
-
-
-
-
-
-
-
-
-All Implemented Interfaces:
-ReplicationPeerStorage
-
-
-
-@InterfaceAudience.Private
-public class TableReplicationPeerStorage
-extends TableReplicationStorageBase
-implements ReplicationPeerStorage
-Table based replication peer storage.
-
-
-
-
-
-
-
-
-
-
-
-Field Summary
-
-
-
-
-Fields inherited from classorg.apache.hadoop.hbase.replication.TableReplicationStorageBase
-conf,
 FAMILY_HFILE_REFS,
 FAMILY_PEER,
 FAMILY_QUEUE,
 FAMILY_REGIONS,
 FAMILY_RS_STATE,
 FAMILY_WAL,
 QU
 ALIFIER_PEER_CONFIG, QUALIFIER_PEER_STATE,
 QUALIFIER_STATE_ENABLED,
 REPLICATION_TABLE,
 zookeeper
-
-
-
-
-
-
-
-
-Constructor Summary
-
-Constructors
-
-Constructor and Description
-
-
-TableReplicationPeerStorage(ZKWatcherzookeeper,
-   
org.apache.hadoop.conf.Configurationconf)
-
-
-
-
-
-
-
-
-
-Method Summary
-
-All MethodsInstance MethodsConcrete Methods
-
-Modifier and Type
-Method and Description
-
-
-void
-addPeer(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId,
-   ReplicationPeerConfigpeerConfig,
-   booleanenabled)
-Add a replication peer.
-
-
-
-ReplicationPeerConfig
-getPeerConfig(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId)
-Get the peer config of a replication peer.
-
-
-
-boolean
-isPeerEnabled(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId)
-Test whether a replication peer is enabled.
-
-
-
-https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
-listPeerIds()
-Return the peer ids of all replication peers.
-
-
-
-private boolean
-peerExist(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId,
- Tabletable)
-
-
-void
-removePeer(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId)
-Remove a replication peer.
-
-
-
-void
-setPeerState(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId,
-booleanenabled)
-Set the state of peer, true to 
ENABLED, otherwise to DISABLED.
-
-
-
-void
-updatePeerConfig(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId,
-ReplicationPeerConfigpeerConfig)
-Update the config a replication peer.
-
-
-
-
-
-
-
-Methods inherited from 

[47/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/org/apache/hadoop/hbase/backup/HFileArchiver.File.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/backup/HFileArchiver.File.html 
b/devapidocs/org/apache/hadoop/hbase/backup/HFileArchiver.File.html
index adcfac5..2a3760d 100644
--- a/devapidocs/org/apache/hadoop/hbase/backup/HFileArchiver.File.html
+++ b/devapidocs/org/apache/hadoop/hbase/backup/HFileArchiver.File.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private abstract static class HFileArchiver.File
+private abstract static class HFileArchiver.File
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Wrapper to handle file operations uniformly
 
@@ -246,7 +246,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 fs
-protected finalorg.apache.hadoop.fs.FileSystem fs
+protected finalorg.apache.hadoop.fs.FileSystem fs
 
 
 
@@ -263,7 +263,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 File
-publicFile(org.apache.hadoop.fs.FileSystemfs)
+publicFile(org.apache.hadoop.fs.FileSystemfs)
 
 
 
@@ -280,7 +280,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 delete
-abstractvoiddelete()
+abstractvoiddelete()
   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Delete the file
 
@@ -295,7 +295,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 isFile
-abstractbooleanisFile()
+abstractbooleanisFile()
  throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Check to see if this is a file or a directory
 
@@ -312,7 +312,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getChildren
-abstracthttps://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionHFileArchiver.FilegetChildren()
+abstracthttps://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionHFileArchiver.FilegetChildren()
  throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Returns:
@@ -329,7 +329,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 close
-abstractvoidclose()
+abstractvoidclose()
  throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 close any outside readers of the file
 
@@ -344,7 +344,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getName
-abstracthttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetName()
+abstracthttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetName()
 
 Returns:
 the name of the file (not the full fs path, just the individual
@@ -358,7 +358,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getPath
-abstractorg.apache.hadoop.fs.PathgetPath()
+abstractorg.apache.hadoop.fs.PathgetPath()
 
 Returns:
 the path to this file
@@ -371,7 +371,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 moveAndClose
-publicbooleanmoveAndClose(org.apache.hadoop.fs.Pathdest)
+publicbooleanmoveAndClose(org.apache.hadoop.fs.Pathdest)
  throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Move the file to the given destination
 
@@ -390,7 +390,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getFileSystem
-publicorg.apache.hadoop.fs.FileSystemgetFileSystem()
+publicorg.apache.hadoop.fs.FileSystemgetFileSystem()
 
 Returns:
 the FileSystem on which this file resides
@@ -403,7 +403,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 toString
-publichttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtoString()
+publichttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtoString()
 
 Overrides:
 https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--;
 title="class or interface in java.lang">toStringin 

[30/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.FileStatusConverter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.FileStatusConverter.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.FileStatusConverter.html
index f47d627..c3d225c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.FileStatusConverter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.FileStatusConverter.html
@@ -117,219 +117,219 @@
 109   */
 110  public static boolean 
archiveRegion(FileSystem fs, Path rootdir, Path tableDir, Path regionDir)
 111  throws IOException {
-112if (LOG.isDebugEnabled()) {
-113  LOG.debug("ARCHIVING " + 
regionDir.toString());
-114}
-115
-116// otherwise, we archive the files
-117// make sure we can archive
-118if (tableDir == null || regionDir == 
null) {
-119  LOG.error("No archive directory 
could be found because tabledir (" + tableDir
-120  + ") or regiondir (" + 
regionDir + "was null. Deleting files instead.");
-121  deleteRegionWithoutArchiving(fs, 
regionDir);
-122  // we should have archived, but 
failed to. Doesn't matter if we deleted
-123  // the archived files correctly or 
not.
-124  return false;
-125}
-126
-127// make sure the regiondir lives 
under the tabledir
-128
Preconditions.checkArgument(regionDir.toString().startsWith(tableDir.toString()));
-129Path regionArchiveDir = 
HFileArchiveUtil.getRegionArchiveDir(rootdir,
-130FSUtils.getTableName(tableDir),
-131regionDir.getName());
-132
-133FileStatusConverter getAsFile = new 
FileStatusConverter(fs);
-134// otherwise, we attempt to archive 
the store files
-135
-136// build collection of just the store 
directories to archive
-137CollectionFile toArchive = 
new ArrayList();
-138final PathFilter dirFilter = new 
FSUtils.DirFilter(fs);
-139PathFilter nonHidden = new 
PathFilter() {
-140  @Override
-141  public boolean accept(Path file) 
{
-142return dirFilter.accept(file) 
 !file.getName().toString().startsWith(".");
-143  }
-144};
-145FileStatus[] storeDirs = 
FSUtils.listStatus(fs, regionDir, nonHidden);
-146// if there no files, we can just 
delete the directory and return;
-147if (storeDirs == null) {
-148  LOG.debug("Region directory " + 
regionDir + " empty.");
-149  return 
deleteRegionWithoutArchiving(fs, regionDir);
-150}
-151
-152// convert the files in the region to 
a File
-153
toArchive.addAll(Lists.transform(Arrays.asList(storeDirs), getAsFile));
-154LOG.debug("Archiving " + 
toArchive);
-155ListFile failedArchive = 
resolveAndArchive(fs, regionArchiveDir, toArchive,
-156
EnvironmentEdgeManager.currentTime());
-157if (!failedArchive.isEmpty()) {
-158  throw new 
FailedArchiveException("Failed to archive/delete all the files for region:"
-159  + regionDir.getName() + " into 
" + regionArchiveDir
-160  + ". Something is probably awry 
on the filesystem.",
-161  
Collections2.transform(failedArchive, FUNC_FILE_TO_PATH));
-162}
-163// if that was successful, then we 
delete the region
-164return 
deleteRegionWithoutArchiving(fs, regionDir);
-165  }
-166
-167  /**
-168   * Remove from the specified region the 
store files of the specified column family,
-169   * either by archiving them or outright 
deletion
-170   * @param fs the filesystem where the 
store files live
-171   * @param conf {@link Configuration} to 
examine to determine the archive directory
-172   * @param parent Parent region hosting 
the store files
-173   * @param tableDir {@link Path} to 
where the table is being stored (for building the archive path)
-174   * @param family the family hosting the 
store files
-175   * @throws IOException if the files 
could not be correctly disposed.
-176   */
-177  public static void 
archiveFamily(FileSystem fs, Configuration conf,
-178  RegionInfo parent, Path tableDir, 
byte[] family) throws IOException {
-179Path familyDir = new Path(tableDir, 
new Path(parent.getEncodedName(), Bytes.toString(family)));
-180archiveFamilyByFamilyDir(fs, conf, 
parent, familyDir, family);
-181  }
-182
-183  /**
-184   * Removes from the specified region 
the store files of the specified column family,
-185   * either by archiving them or outright 
deletion
-186   * @param fs the filesystem where the 
store files live
-187   * @param conf {@link Configuration} to 
examine to determine the archive directory
-188   * @param parent Parent region hosting 
the store files
-189   * @param familyDir {@link Path} to 
where the family is being stored
-190   * @param family the family hosting the 
store files
-191   * @throws IOException if the files 
could 

[35/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/org/apache/hadoop/hbase/replication/TableReplicationStorageBase.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/replication/TableReplicationStorageBase.html
 
b/devapidocs/org/apache/hadoop/hbase/replication/TableReplicationStorageBase.html
deleted file mode 100644
index 70c093f..000
--- 
a/devapidocs/org/apache/hadoop/hbase/replication/TableReplicationStorageBase.html
+++ /dev/null
@@ -1,527 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-
-
-
-TableReplicationStorageBase (Apache HBase 3.0.0-SNAPSHOT API)
-
-
-
-
-
-var methods = {"i0":9,"i1":9,"i2":10,"i3":9};
-var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
-var altColor = "altColor";
-var rowColor = "rowColor";
-var tableTab = "tableTab";
-var activeTableTab = "activeTableTab";
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-PrevClass
-NextClass
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-Summary:
-Nested|
-Field|
-Constr|
-Method
-
-
-Detail:
-Field|
-Constr|
-Method
-
-
-
-
-
-
-
-
-org.apache.hadoop.hbase.replication
-Class 
TableReplicationStorageBase
-
-
-
-https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
-
-
-org.apache.hadoop.hbase.replication.TableReplicationStorageBase
-
-
-
-
-
-
-
-Direct Known Subclasses:
-TableReplicationPeerStorage, TableReplicationQueueStorage
-
-
-
-@InterfaceAudience.Private
-public class TableReplicationStorageBase
-extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
-
-
-
-
-
-
-
-
-
-
-
-Field Summary
-
-Fields
-
-Modifier and Type
-Field and Description
-
-
-protected 
org.apache.hadoop.conf.Configuration
-conf
-
-
-private Connection
-connection
-
-
-static byte[]
-FAMILY_HFILE_REFS
-
-
-static byte[]
-FAMILY_PEER
-
-
-static byte[]
-FAMILY_QUEUE
-
-
-static byte[]
-FAMILY_REGIONS
-
-
-static byte[]
-FAMILY_RS_STATE
-
-
-static byte[]
-FAMILY_WAL
-
-
-static byte[]
-QUALIFIER_PEER_CONFIG
-
-
-static byte[]
-QUALIFIER_PEER_STATE
-
-
-static byte[]
-QUALIFIER_STATE_ENABLED
-
-
-static TableName
-REPLICATION_TABLE
-
-
-protected ZKWatcher
-zookeeper
-
-
-
-
-
-
-
-
-
-Constructor Summary
-
-Constructors
-
-Modifier
-Constructor and Description
-
-
-protected 
-TableReplicationStorageBase(ZKWatcherzookeeper,
-   
org.apache.hadoop.conf.Configurationconf)
-
-
-
-
-
-
-
-
-
-Method Summary
-
-All MethodsStatic MethodsInstance MethodsConcrete Methods
-
-Modifier and Type
-Method and Description
-
-
-static TableDescriptorBuilder
-createReplicationTableDescBuilder(org.apache.hadoop.conf.Configurationconf)
-
-
-protected static byte[]
-getRegionQualifier(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringencodedRegionName)
-
-
-protected Table
-getReplicationMetaTable()
-
-
-protected static byte[]
-getServerNameRowKey(ServerNameserverName)
-
-
-
-
-
-
-Methods inherited from classjava.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
-https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equals, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
 title="class or interface in java.lang">hashCode, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--;
 title="class or interface in java.lang">notify, https://docs.oracle.com/javase/8/docs/api/ja
 va/lang/Object.html?is-external=true#notifyAll--" title="class or interface in 
java.lang">notifyAll, 

[25/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.ReplicaCallIssuingRunnable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.ReplicaCallIssuingRunnable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.ReplicaCallIssuingRunnable.html
index c27b109..4160a88 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.ReplicaCallIssuingRunnable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.ReplicaCallIssuingRunnable.html
@@ -105,7 +105,7 @@
 097try {
 098  done = waitUntilDone(startTime 
* 1000L + asyncProcess.primaryCallTimeoutMicroseconds);
 099} catch (InterruptedException ex) 
{
-100  LOG.error("Replica thread was 
interrupted - no replica calls: " + ex.getMessage());
+100  LOG.error("Replica thread 
interrupted - no replica calls {}", ex.getMessage());
 101  return;
 102}
 103  }
@@ -149,7 +149,7 @@
 141  if (loc == null) return;
 142  HRegionLocation[] locs = 
loc.getRegionLocations();
 143  if (locs.length == 1) {
-144LOG.warn("No replicas found for " 
+ action.getAction());
+144LOG.warn("No replicas found for 
{}", action.getAction());
 145return;
 146  }
 147  synchronized (replicaResultLock) 
{
@@ -230,8 +230,8 @@
 222  return;
 223} catch (Throwable t) {
 224  // This should not happen. 
Let's log  retry anyway.
-225  LOG.error("#" + asyncProcess.id 
+ ", Caught throwable while calling. This is unexpected." +
-226  " Retrying. Server is " + 
server + ", tableName=" + tableName, t);
+225  LOG.error("id=" + 
asyncProcess.id + ", caught throwable. Unexpected." +
+226  " Retrying. Server=" + 
server + ", tableName=" + tableName, t);
 227  
receiveGlobalFailure(multiAction, server, numAttempt, t);
 228  return;
 229}
@@ -247,1036 +247,1035 @@
 239}
 240  } catch (Throwable t) {
 241// Something really bad happened. 
We are on the send thread that will now die.
-242LOG.error("Internal AsyncProcess 
#" + asyncProcess.id + " error for "
-243+ tableName + " processing 
for " + server, t);
-244throw new RuntimeException(t);
-245  } finally {
-246
asyncProcess.decTaskCounters(multiAction.getRegions(), server);
-247if (callsInProgress != null 
 callable != null  res != null) {
-248  
callsInProgress.remove(callable);
-249}
-250  }
-251}
-252  }
-253
-254  private final 
Batch.CallbackCResult callback;
-255  private final BatchErrors errors;
-256  private final 
ConnectionImplementation.ServerErrorTracker errorsByServer;
-257  private final ExecutorService pool;
-258  private final 
SetCancellableRegionServerCallable callsInProgress;
+242LOG.error("id=" + asyncProcess.id 
+ " error for " + tableName + " processing " + server, t);
+243throw new RuntimeException(t);
+244  } finally {
+245
asyncProcess.decTaskCounters(multiAction.getRegions(), server);
+246if (callsInProgress != null 
 callable != null  res != null) {
+247  
callsInProgress.remove(callable);
+248}
+249  }
+250}
+251  }
+252
+253  private final 
Batch.CallbackCResult callback;
+254  private final BatchErrors errors;
+255  private final 
ConnectionImplementation.ServerErrorTracker errorsByServer;
+256  private final ExecutorService pool;
+257  private final 
SetCancellableRegionServerCallable callsInProgress;
+258
 259
-260
-261  private final TableName tableName;
-262  private final AtomicLong 
actionsInProgress = new AtomicLong(-1);
-263  /**
-264   * The lock controls access to results. 
It is only held when populating results where
-265   * there might be several callers 
(eventual consistency gets). For other requests,
-266   * there's one unique call going on per 
result index.
-267   */
-268  private final Object replicaResultLock 
= new Object();
-269  /**
-270   * Result array.  Null if results are 
not needed. Otherwise, each index corresponds to
-271   * the action index in initial actions 
submitted. For most request types, has null-s for
-272   * requests that are not done, and 
result/exception for those that are done.
-273   * For eventual-consistency gets, 
initially the same applies; at some point, replica calls
-274   * might be started, and 
ReplicaResultState is put at the corresponding indices. The
-275   * returning calls check the type to 
detect when this is the case. After all calls are done,
-276   * ReplicaResultState-s are replaced 
with results for the user.
-277   */
-278  private final Object[] results;
-279  /**
-280   * Indices of replica gets in results. 
If null, all or no 

[28/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.FileableStoreFile.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.FileableStoreFile.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.FileableStoreFile.html
index f47d627..c3d225c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.FileableStoreFile.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.FileableStoreFile.html
@@ -117,219 +117,219 @@
 109   */
 110  public static boolean 
archiveRegion(FileSystem fs, Path rootdir, Path tableDir, Path regionDir)
 111  throws IOException {
-112if (LOG.isDebugEnabled()) {
-113  LOG.debug("ARCHIVING " + 
regionDir.toString());
-114}
-115
-116// otherwise, we archive the files
-117// make sure we can archive
-118if (tableDir == null || regionDir == 
null) {
-119  LOG.error("No archive directory 
could be found because tabledir (" + tableDir
-120  + ") or regiondir (" + 
regionDir + "was null. Deleting files instead.");
-121  deleteRegionWithoutArchiving(fs, 
regionDir);
-122  // we should have archived, but 
failed to. Doesn't matter if we deleted
-123  // the archived files correctly or 
not.
-124  return false;
-125}
-126
-127// make sure the regiondir lives 
under the tabledir
-128
Preconditions.checkArgument(regionDir.toString().startsWith(tableDir.toString()));
-129Path regionArchiveDir = 
HFileArchiveUtil.getRegionArchiveDir(rootdir,
-130FSUtils.getTableName(tableDir),
-131regionDir.getName());
-132
-133FileStatusConverter getAsFile = new 
FileStatusConverter(fs);
-134// otherwise, we attempt to archive 
the store files
-135
-136// build collection of just the store 
directories to archive
-137CollectionFile toArchive = 
new ArrayList();
-138final PathFilter dirFilter = new 
FSUtils.DirFilter(fs);
-139PathFilter nonHidden = new 
PathFilter() {
-140  @Override
-141  public boolean accept(Path file) 
{
-142return dirFilter.accept(file) 
 !file.getName().toString().startsWith(".");
-143  }
-144};
-145FileStatus[] storeDirs = 
FSUtils.listStatus(fs, regionDir, nonHidden);
-146// if there no files, we can just 
delete the directory and return;
-147if (storeDirs == null) {
-148  LOG.debug("Region directory " + 
regionDir + " empty.");
-149  return 
deleteRegionWithoutArchiving(fs, regionDir);
-150}
-151
-152// convert the files in the region to 
a File
-153
toArchive.addAll(Lists.transform(Arrays.asList(storeDirs), getAsFile));
-154LOG.debug("Archiving " + 
toArchive);
-155ListFile failedArchive = 
resolveAndArchive(fs, regionArchiveDir, toArchive,
-156
EnvironmentEdgeManager.currentTime());
-157if (!failedArchive.isEmpty()) {
-158  throw new 
FailedArchiveException("Failed to archive/delete all the files for region:"
-159  + regionDir.getName() + " into 
" + regionArchiveDir
-160  + ". Something is probably awry 
on the filesystem.",
-161  
Collections2.transform(failedArchive, FUNC_FILE_TO_PATH));
-162}
-163// if that was successful, then we 
delete the region
-164return 
deleteRegionWithoutArchiving(fs, regionDir);
-165  }
-166
-167  /**
-168   * Remove from the specified region the 
store files of the specified column family,
-169   * either by archiving them or outright 
deletion
-170   * @param fs the filesystem where the 
store files live
-171   * @param conf {@link Configuration} to 
examine to determine the archive directory
-172   * @param parent Parent region hosting 
the store files
-173   * @param tableDir {@link Path} to 
where the table is being stored (for building the archive path)
-174   * @param family the family hosting the 
store files
-175   * @throws IOException if the files 
could not be correctly disposed.
-176   */
-177  public static void 
archiveFamily(FileSystem fs, Configuration conf,
-178  RegionInfo parent, Path tableDir, 
byte[] family) throws IOException {
-179Path familyDir = new Path(tableDir, 
new Path(parent.getEncodedName(), Bytes.toString(family)));
-180archiveFamilyByFamilyDir(fs, conf, 
parent, familyDir, family);
-181  }
-182
-183  /**
-184   * Removes from the specified region 
the store files of the specified column family,
-185   * either by archiving them or outright 
deletion
-186   * @param fs the filesystem where the 
store files live
-187   * @param conf {@link Configuration} to 
examine to determine the archive directory
-188   * @param parent Parent region hosting 
the store files
-189   * @param familyDir {@link Path} to 
where the family is being stored
-190   * @param family the family hosting the 
store files
-191   * @throws IOException if the files 
could not be 

[18/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileWriteAccessor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileWriteAccessor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileWriteAccessor.html
index 74fbf67..33418d0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileWriteAccessor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileWriteAccessor.html
@@ -27,287 +27,296 @@
 019package 
org.apache.hadoop.hbase.io.hfile.bucket;
 020
 021import java.io.File;
-022import java.io.FileNotFoundException;
-023import java.io.IOException;
-024import java.io.RandomAccessFile;
-025import java.nio.ByteBuffer;
-026import 
java.nio.channels.ClosedChannelException;
-027import java.nio.channels.FileChannel;
-028import java.util.Arrays;
-029import 
org.apache.hadoop.hbase.io.hfile.Cacheable;
-030import 
org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
-031import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
-032import 
org.apache.hadoop.hbase.nio.ByteBuff;
-033import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
-034import 
org.apache.hadoop.util.StringUtils;
-035import 
org.apache.yetus.audience.InterfaceAudience;
-036import org.slf4j.Logger;
-037import org.slf4j.LoggerFactory;
-038
-039import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-040import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-041
-042/**
-043 * IO engine that stores data to a file 
on the local file system.
-044 */
-045@InterfaceAudience.Private
-046public class FileIOEngine implements 
IOEngine {
-047  private static final Logger LOG = 
LoggerFactory.getLogger(FileIOEngine.class);
-048  public static final String 
FILE_DELIMITER = ",";
-049  private final String[] filePaths;
-050  private final FileChannel[] 
fileChannels;
-051  private final RandomAccessFile[] 
rafs;
-052
-053  private final long sizePerFile;
-054  private final long capacity;
-055
-056  private FileReadAccessor readAccessor = 
new FileReadAccessor();
-057  private FileWriteAccessor writeAccessor 
= new FileWriteAccessor();
-058
-059  public FileIOEngine(long capacity, 
boolean maintainPersistence, String... filePaths)
-060  throws IOException {
-061this.sizePerFile = capacity / 
filePaths.length;
-062this.capacity = this.sizePerFile * 
filePaths.length;
-063this.filePaths = filePaths;
-064this.fileChannels = new 
FileChannel[filePaths.length];
-065if (!maintainPersistence) {
-066  for (String filePath : filePaths) 
{
-067File file = new File(filePath);
-068if (file.exists()) {
-069  if (LOG.isDebugEnabled()) {
-070LOG.debug("File " + filePath 
+ " already exists. Deleting!!");
-071  }
-072  file.delete();
-073  // If deletion fails still we 
can manage with the writes
-074}
-075  }
-076}
-077this.rafs = new 
RandomAccessFile[filePaths.length];
-078for (int i = 0; i  
filePaths.length; i++) {
-079  String filePath = filePaths[i];
-080  try {
-081rafs[i] = new 
RandomAccessFile(filePath, "rw");
-082long totalSpace = new 
File(filePath).getTotalSpace();
-083if (totalSpace  sizePerFile) 
{
-084  // The next setting length will 
throw exception,logging this message
-085  // is just used for the detail 
reason of exception,
-086  String msg = "Only " + 
StringUtils.byteDesc(totalSpace)
-087  + " total space under " + 
filePath + ", not enough for requested "
-088  + 
StringUtils.byteDesc(sizePerFile);
-089  LOG.warn(msg);
-090}
-091rafs[i].setLength(sizePerFile);
-092fileChannels[i] = 
rafs[i].getChannel();
-093LOG.info("Allocating cache " + 
StringUtils.byteDesc(sizePerFile)
-094+ ", on the path:" + 
filePath);
-095  } catch (IOException fex) {
-096LOG.error("Failed allocating 
cache on " + filePath, fex);
-097shutdown();
-098throw fex;
-099  }
-100}
-101  }
-102
-103  @Override
-104  public String toString() {
-105return "ioengine=" + 
this.getClass().getSimpleName() + ", paths="
-106+ Arrays.asList(filePaths) + ", 
capacity=" + String.format("%,d", this.capacity);
-107  }
-108
-109  /**
-110   * File IO engine is always able to 
support persistent storage for the cache
-111   * @return true
-112   */
-113  @Override
-114  public boolean isPersistent() {
-115return true;
-116  }
-117
-118  /**
-119   * Transfers data from file to the 
given byte buffer
-120   * @param offset The offset in the file 
where the first byte to be read
-121   * @param length The length of buffer 
that should be allocated for reading

[06/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.ProcedureExecutorListener.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.ProcedureExecutorListener.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.ProcedureExecutorListener.html
index 3bc66bb..97aa79c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.ProcedureExecutorListener.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.ProcedureExecutorListener.html
@@ -1435,459 +1435,460 @@
 1427   */
 1428  private void execProcedure(final 
RootProcedureState procStack,
 1429  final 
ProcedureTEnvironment procedure) {
-1430
Preconditions.checkArgument(procedure.getState() == ProcedureState.RUNNABLE);
-1431
-1432// Procedures can suspend 
themselves. They skip out by throwing a ProcedureSuspendedException.
-1433// The exception is caught below and 
then we hurry to the exit without disturbing state. The
-1434// idea is that the processing of 
this procedure will be unsuspended later by an external event
-1435// such the report of a region open. 
TODO: Currently, its possible for two worker threads
-1436// to be working on the same 
procedure concurrently (locking in procedures is NOT about
-1437// concurrency but about tying an 
entity to a procedure; i.e. a region to a particular
-1438// procedure instance). This can 
make for issues if both threads are changing state.
-1439// See 
env.getProcedureScheduler().wakeEvent(regionNode.getProcedureEvent());
-1440// in 
RegionTransitionProcedure#reportTransition for example of Procedure putting
-1441// itself back on the scheduler 
making it possible for two threads running against
-1442// the one Procedure. Might be ok if 
they are both doing different, idempotent sections.
-1443boolean suspended = false;
-1444
-1445// Whether to 're-' -execute; run 
through the loop again.
-1446boolean reExecute = false;
-1447
-1448ProcedureTEnvironment[] 
subprocs = null;
-1449do {
-1450  reExecute = false;
-1451  try {
-1452subprocs = 
procedure.doExecute(getEnvironment());
-1453if (subprocs != null  
subprocs.length == 0) {
-1454  subprocs = null;
-1455}
-1456  } catch 
(ProcedureSuspendedException e) {
-1457if (LOG.isTraceEnabled()) {
-1458  LOG.trace("Suspend " + 
procedure);
-1459}
-1460suspended = true;
-1461  } catch (ProcedureYieldException 
e) {
-1462if (LOG.isTraceEnabled()) {
-1463  LOG.trace("Yield " + procedure 
+ ": " + e.getMessage(), e);
-1464}
-1465scheduler.yield(procedure);
-1466return;
-1467  } catch (InterruptedException e) 
{
-1468if (LOG.isTraceEnabled()) {
-1469  LOG.trace("Yield interrupt " + 
procedure + ": " + e.getMessage(), e);
-1470}
-1471
handleInterruptedException(procedure, e);
-1472scheduler.yield(procedure);
-1473return;
-1474  } catch (Throwable e) {
-1475// Catch NullPointerExceptions 
or similar errors...
-1476String msg = "CODE-BUG: Uncaught 
runtime exception: " + procedure;
-1477LOG.error(msg, e);
-1478procedure.setFailure(new 
RemoteProcedureException(msg, e));
-1479  }
-1480
-1481  if (!procedure.isFailed()) {
-1482if (subprocs != null) {
-1483  if (subprocs.length == 1 
 subprocs[0] == procedure) {
-1484// Procedure returned 
itself. Quick-shortcut for a state machine-like procedure;
-1485// i.e. we go around this 
loop again rather than go back out on the scheduler queue.
-1486subprocs = null;
-1487reExecute = true;
-1488if (LOG.isTraceEnabled()) 
{
-1489  LOG.trace("Short-circuit 
to next step on pid=" + procedure.getProcId());
-1490}
-1491  } else {
-1492// Yield the current 
procedure, and make the subprocedure runnable
-1493// subprocs may come back 
'null'.
-1494subprocs = 
initializeChildren(procStack, procedure, subprocs);
-1495LOG.info("Initialized 
subprocedures=" +
-1496  (subprocs == null? null:
-1497
Stream.of(subprocs).map(e - "{" + e.toString() + "}").
-1498
collect(Collectors.toList()).toString()));
-1499  }
-1500} else if (procedure.getState() 
== ProcedureState.WAITING_TIMEOUT) {
-1501  if (LOG.isTraceEnabled()) {
-1502LOG.trace("Added to 
timeoutExecutor " + procedure);
-1503  }
-1504  
timeoutExecutor.add(procedure);
-1505} else if (!suspended) {
-1506  // No subtask, so we are 
done
-1507  

[29/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.FileablePath.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.FileablePath.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.FileablePath.html
index f47d627..c3d225c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.FileablePath.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.FileablePath.html
@@ -117,219 +117,219 @@
 109   */
 110  public static boolean 
archiveRegion(FileSystem fs, Path rootdir, Path tableDir, Path regionDir)
 111  throws IOException {
-112if (LOG.isDebugEnabled()) {
-113  LOG.debug("ARCHIVING " + 
regionDir.toString());
-114}
-115
-116// otherwise, we archive the files
-117// make sure we can archive
-118if (tableDir == null || regionDir == 
null) {
-119  LOG.error("No archive directory 
could be found because tabledir (" + tableDir
-120  + ") or regiondir (" + 
regionDir + "was null. Deleting files instead.");
-121  deleteRegionWithoutArchiving(fs, 
regionDir);
-122  // we should have archived, but 
failed to. Doesn't matter if we deleted
-123  // the archived files correctly or 
not.
-124  return false;
-125}
-126
-127// make sure the regiondir lives 
under the tabledir
-128
Preconditions.checkArgument(regionDir.toString().startsWith(tableDir.toString()));
-129Path regionArchiveDir = 
HFileArchiveUtil.getRegionArchiveDir(rootdir,
-130FSUtils.getTableName(tableDir),
-131regionDir.getName());
-132
-133FileStatusConverter getAsFile = new 
FileStatusConverter(fs);
-134// otherwise, we attempt to archive 
the store files
-135
-136// build collection of just the store 
directories to archive
-137CollectionFile toArchive = 
new ArrayList();
-138final PathFilter dirFilter = new 
FSUtils.DirFilter(fs);
-139PathFilter nonHidden = new 
PathFilter() {
-140  @Override
-141  public boolean accept(Path file) 
{
-142return dirFilter.accept(file) 
 !file.getName().toString().startsWith(".");
-143  }
-144};
-145FileStatus[] storeDirs = 
FSUtils.listStatus(fs, regionDir, nonHidden);
-146// if there no files, we can just 
delete the directory and return;
-147if (storeDirs == null) {
-148  LOG.debug("Region directory " + 
regionDir + " empty.");
-149  return 
deleteRegionWithoutArchiving(fs, regionDir);
-150}
-151
-152// convert the files in the region to 
a File
-153
toArchive.addAll(Lists.transform(Arrays.asList(storeDirs), getAsFile));
-154LOG.debug("Archiving " + 
toArchive);
-155ListFile failedArchive = 
resolveAndArchive(fs, regionArchiveDir, toArchive,
-156
EnvironmentEdgeManager.currentTime());
-157if (!failedArchive.isEmpty()) {
-158  throw new 
FailedArchiveException("Failed to archive/delete all the files for region:"
-159  + regionDir.getName() + " into 
" + regionArchiveDir
-160  + ". Something is probably awry 
on the filesystem.",
-161  
Collections2.transform(failedArchive, FUNC_FILE_TO_PATH));
-162}
-163// if that was successful, then we 
delete the region
-164return 
deleteRegionWithoutArchiving(fs, regionDir);
-165  }
-166
-167  /**
-168   * Remove from the specified region the 
store files of the specified column family,
-169   * either by archiving them or outright 
deletion
-170   * @param fs the filesystem where the 
store files live
-171   * @param conf {@link Configuration} to 
examine to determine the archive directory
-172   * @param parent Parent region hosting 
the store files
-173   * @param tableDir {@link Path} to 
where the table is being stored (for building the archive path)
-174   * @param family the family hosting the 
store files
-175   * @throws IOException if the files 
could not be correctly disposed.
-176   */
-177  public static void 
archiveFamily(FileSystem fs, Configuration conf,
-178  RegionInfo parent, Path tableDir, 
byte[] family) throws IOException {
-179Path familyDir = new Path(tableDir, 
new Path(parent.getEncodedName(), Bytes.toString(family)));
-180archiveFamilyByFamilyDir(fs, conf, 
parent, familyDir, family);
-181  }
-182
-183  /**
-184   * Removes from the specified region 
the store files of the specified column family,
-185   * either by archiving them or outright 
deletion
-186   * @param fs the filesystem where the 
store files live
-187   * @param conf {@link Configuration} to 
examine to determine the archive directory
-188   * @param parent Parent region hosting 
the store files
-189   * @param familyDir {@link Path} to 
where the family is being stored
-190   * @param family the family hosting the 
store files
-191   * @throws IOException if the files 
could not be correctly disposed.
-192   

[08/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.FailedProcedure.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.FailedProcedure.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.FailedProcedure.html
index 3bc66bb..97aa79c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.FailedProcedure.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.FailedProcedure.html
@@ -1435,459 +1435,460 @@
 1427   */
 1428  private void execProcedure(final 
RootProcedureState procStack,
 1429  final 
ProcedureTEnvironment procedure) {
-1430
Preconditions.checkArgument(procedure.getState() == ProcedureState.RUNNABLE);
-1431
-1432// Procedures can suspend 
themselves. They skip out by throwing a ProcedureSuspendedException.
-1433// The exception is caught below and 
then we hurry to the exit without disturbing state. The
-1434// idea is that the processing of 
this procedure will be unsuspended later by an external event
-1435// such the report of a region open. 
TODO: Currently, its possible for two worker threads
-1436// to be working on the same 
procedure concurrently (locking in procedures is NOT about
-1437// concurrency but about tying an 
entity to a procedure; i.e. a region to a particular
-1438// procedure instance). This can 
make for issues if both threads are changing state.
-1439// See 
env.getProcedureScheduler().wakeEvent(regionNode.getProcedureEvent());
-1440// in 
RegionTransitionProcedure#reportTransition for example of Procedure putting
-1441// itself back on the scheduler 
making it possible for two threads running against
-1442// the one Procedure. Might be ok if 
they are both doing different, idempotent sections.
-1443boolean suspended = false;
-1444
-1445// Whether to 're-' -execute; run 
through the loop again.
-1446boolean reExecute = false;
-1447
-1448ProcedureTEnvironment[] 
subprocs = null;
-1449do {
-1450  reExecute = false;
-1451  try {
-1452subprocs = 
procedure.doExecute(getEnvironment());
-1453if (subprocs != null  
subprocs.length == 0) {
-1454  subprocs = null;
-1455}
-1456  } catch 
(ProcedureSuspendedException e) {
-1457if (LOG.isTraceEnabled()) {
-1458  LOG.trace("Suspend " + 
procedure);
-1459}
-1460suspended = true;
-1461  } catch (ProcedureYieldException 
e) {
-1462if (LOG.isTraceEnabled()) {
-1463  LOG.trace("Yield " + procedure 
+ ": " + e.getMessage(), e);
-1464}
-1465scheduler.yield(procedure);
-1466return;
-1467  } catch (InterruptedException e) 
{
-1468if (LOG.isTraceEnabled()) {
-1469  LOG.trace("Yield interrupt " + 
procedure + ": " + e.getMessage(), e);
-1470}
-1471
handleInterruptedException(procedure, e);
-1472scheduler.yield(procedure);
-1473return;
-1474  } catch (Throwable e) {
-1475// Catch NullPointerExceptions 
or similar errors...
-1476String msg = "CODE-BUG: Uncaught 
runtime exception: " + procedure;
-1477LOG.error(msg, e);
-1478procedure.setFailure(new 
RemoteProcedureException(msg, e));
-1479  }
-1480
-1481  if (!procedure.isFailed()) {
-1482if (subprocs != null) {
-1483  if (subprocs.length == 1 
 subprocs[0] == procedure) {
-1484// Procedure returned 
itself. Quick-shortcut for a state machine-like procedure;
-1485// i.e. we go around this 
loop again rather than go back out on the scheduler queue.
-1486subprocs = null;
-1487reExecute = true;
-1488if (LOG.isTraceEnabled()) 
{
-1489  LOG.trace("Short-circuit 
to next step on pid=" + procedure.getProcId());
-1490}
-1491  } else {
-1492// Yield the current 
procedure, and make the subprocedure runnable
-1493// subprocs may come back 
'null'.
-1494subprocs = 
initializeChildren(procStack, procedure, subprocs);
-1495LOG.info("Initialized 
subprocedures=" +
-1496  (subprocs == null? null:
-1497
Stream.of(subprocs).map(e - "{" + e.toString() + "}").
-1498
collect(Collectors.toList()).toString()));
-1499  }
-1500} else if (procedure.getState() 
== ProcedureState.WAITING_TIMEOUT) {
-1501  if (LOG.isTraceEnabled()) {
-1502LOG.trace("Added to 
timeoutExecutor " + procedure);
-1503  }
-1504  
timeoutExecutor.add(procedure);
-1505} else if (!suspended) {
-1506  // No subtask, so we are 
done
-1507  
procedure.setState(ProcedureState.SUCCESS);
-1508}
-1509  }

[02/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
index 3bc66bb..97aa79c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
@@ -1435,459 +1435,460 @@
 1427   */
 1428  private void execProcedure(final 
RootProcedureState procStack,
 1429  final 
ProcedureTEnvironment procedure) {
-1430
Preconditions.checkArgument(procedure.getState() == ProcedureState.RUNNABLE);
-1431
-1432// Procedures can suspend 
themselves. They skip out by throwing a ProcedureSuspendedException.
-1433// The exception is caught below and 
then we hurry to the exit without disturbing state. The
-1434// idea is that the processing of 
this procedure will be unsuspended later by an external event
-1435// such the report of a region open. 
TODO: Currently, its possible for two worker threads
-1436// to be working on the same 
procedure concurrently (locking in procedures is NOT about
-1437// concurrency but about tying an 
entity to a procedure; i.e. a region to a particular
-1438// procedure instance). This can 
make for issues if both threads are changing state.
-1439// See 
env.getProcedureScheduler().wakeEvent(regionNode.getProcedureEvent());
-1440// in 
RegionTransitionProcedure#reportTransition for example of Procedure putting
-1441// itself back on the scheduler 
making it possible for two threads running against
-1442// the one Procedure. Might be ok if 
they are both doing different, idempotent sections.
-1443boolean suspended = false;
-1444
-1445// Whether to 're-' -execute; run 
through the loop again.
-1446boolean reExecute = false;
-1447
-1448ProcedureTEnvironment[] 
subprocs = null;
-1449do {
-1450  reExecute = false;
-1451  try {
-1452subprocs = 
procedure.doExecute(getEnvironment());
-1453if (subprocs != null  
subprocs.length == 0) {
-1454  subprocs = null;
-1455}
-1456  } catch 
(ProcedureSuspendedException e) {
-1457if (LOG.isTraceEnabled()) {
-1458  LOG.trace("Suspend " + 
procedure);
-1459}
-1460suspended = true;
-1461  } catch (ProcedureYieldException 
e) {
-1462if (LOG.isTraceEnabled()) {
-1463  LOG.trace("Yield " + procedure 
+ ": " + e.getMessage(), e);
-1464}
-1465scheduler.yield(procedure);
-1466return;
-1467  } catch (InterruptedException e) 
{
-1468if (LOG.isTraceEnabled()) {
-1469  LOG.trace("Yield interrupt " + 
procedure + ": " + e.getMessage(), e);
-1470}
-1471
handleInterruptedException(procedure, e);
-1472scheduler.yield(procedure);
-1473return;
-1474  } catch (Throwable e) {
-1475// Catch NullPointerExceptions 
or similar errors...
-1476String msg = "CODE-BUG: Uncaught 
runtime exception: " + procedure;
-1477LOG.error(msg, e);
-1478procedure.setFailure(new 
RemoteProcedureException(msg, e));
-1479  }
-1480
-1481  if (!procedure.isFailed()) {
-1482if (subprocs != null) {
-1483  if (subprocs.length == 1 
 subprocs[0] == procedure) {
-1484// Procedure returned 
itself. Quick-shortcut for a state machine-like procedure;
-1485// i.e. we go around this 
loop again rather than go back out on the scheduler queue.
-1486subprocs = null;
-1487reExecute = true;
-1488if (LOG.isTraceEnabled()) 
{
-1489  LOG.trace("Short-circuit 
to next step on pid=" + procedure.getProcId());
-1490}
-1491  } else {
-1492// Yield the current 
procedure, and make the subprocedure runnable
-1493// subprocs may come back 
'null'.
-1494subprocs = 
initializeChildren(procStack, procedure, subprocs);
-1495LOG.info("Initialized 
subprocedures=" +
-1496  (subprocs == null? null:
-1497
Stream.of(subprocs).map(e - "{" + e.toString() + "}").
-1498
collect(Collectors.toList()).toString()));
-1499  }
-1500} else if (procedure.getState() 
== ProcedureState.WAITING_TIMEOUT) {
-1501  if (LOG.isTraceEnabled()) {
-1502LOG.trace("Added to 
timeoutExecutor " + procedure);
-1503  }
-1504  
timeoutExecutor.add(procedure);
-1505} else if (!suspended) {
-1506  // No subtask, so we are 
done
-1507  
procedure.setState(ProcedureState.SUCCESS);
-1508}
-1509  }
-1510
-1511  // Add the procedure to the 
stack
-1512  

[43/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
index 2881954..e4cd8bf 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
@@ -129,7 +129,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class MergeTableRegionsProcedure
+public class MergeTableRegionsProcedure
 extends AbstractStateMachineTableProcedureorg.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsState
 The procedure to Merge a region in a table.
  This procedure takes an exclusive table lock since it is working over 
multiple regions.
@@ -281,14 +281,18 @@ extends 
 private static void
-checkRegionsToMerge(RegionInfo[]regionsToMerge,
+checkRegionsToMerge(MasterProcedureEnvenv,
+   RegionInfo[]regionsToMerge,
booleanforcible)
 
 
 private static void
-checkRegionsToMerge(RegionInforegionToMergeA,
+checkRegionsToMerge(MasterProcedureEnvenv,
+   RegionInforegionToMergeA,
RegionInforegionToMergeB,
-   booleanforcible)
+   booleanforcible)
+One time checks.
+
 
 
 private void
@@ -522,7 +526,7 @@ extends AbstractStateMachineTableProcedure
-checkTableModifiable,
 getRegionDir,
 getUser,
 preflightChecks,
 releaseSyncLatch,
 setUser
+checkOnline,
 checkTableModifiable,
 getRegionDir,
 getUser,
 preflightChecks, releaseSyncLatch,
 setUser
 
 
 
@@ -565,7 +569,7 @@ extends 
 
 LOG
-private static finalorg.slf4j.Logger LOG
+private static finalorg.slf4j.Logger LOG
 
 
 
@@ -574,7 +578,7 @@ extends 
 
 traceEnabled
-privatehttps://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean traceEnabled
+privatehttps://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean traceEnabled
 
 
 
@@ -583,7 +587,7 @@ extends 
 
 lock
-private volatileboolean lock
+private volatileboolean lock
 
 
 
@@ -592,7 +596,7 @@ extends 
 
 regionLocation
-privateServerName regionLocation
+privateServerName regionLocation
 
 
 
@@ -601,7 +605,7 @@ extends 
 
 regionsToMerge
-privateRegionInfo[] regionsToMerge
+privateRegionInfo[] regionsToMerge
 
 
 
@@ -610,7 +614,7 @@ extends 
 
 mergedRegion
-privateRegionInfo mergedRegion
+privateRegionInfo mergedRegion
 
 
 
@@ -619,7 +623,7 @@ extends 
 
 forcible
-privateboolean forcible
+privateboolean forcible
 
 
 
@@ -636,7 +640,7 @@ extends 
 
 MergeTableRegionsProcedure
-publicMergeTableRegionsProcedure()
+publicMergeTableRegionsProcedure()
 
 
 
@@ -645,7 +649,7 @@ extends 
 
 MergeTableRegionsProcedure
-publicMergeTableRegionsProcedure(MasterProcedureEnvenv,
+publicMergeTableRegionsProcedure(MasterProcedureEnvenv,
   RegionInforegionToMergeA,
   RegionInforegionToMergeB)
throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -661,7 +665,7 @@ extends 
 
 MergeTableRegionsProcedure
-publicMergeTableRegionsProcedure(MasterProcedureEnvenv,
+publicMergeTableRegionsProcedure(MasterProcedureEnvenv,
   RegionInforegionToMergeA,
   RegionInforegionToMergeB,
   booleanforcible)
@@ -678,7 +682,7 @@ extends 
 
 MergeTableRegionsProcedure
-publicMergeTableRegionsProcedure(MasterProcedureEnvenv,
+publicMergeTableRegionsProcedure(MasterProcedureEnvenv,
   RegionInfo[]regionsToMerge,
   booleanforcible)
throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -696,13 +700,14 @@ extends 
+
 
 
 
 
 checkRegionsToMerge
-private staticvoidcheckRegionsToMerge(RegionInfo[]regionsToMerge,
+private staticvoidcheckRegionsToMerge(MasterProcedureEnvenv,
+RegionInfo[]regionsToMerge,
 booleanforcible)
  throws MergeRegionException
 
@@ -711,16 +716,18 @@ extends 
+
 
 
 
 
 checkRegionsToMerge
-private staticvoidcheckRegionsToMerge(RegionInforegionToMergeA,
+private staticvoidcheckRegionsToMerge(MasterProcedureEnvenv,
+RegionInforegionToMergeA,
   

[40/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
index 159fa7e..355674a 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
@@ -223,6 +223,21 @@
 
 
 
+private static void
+MergeTableRegionsProcedure.checkRegionsToMerge(MasterProcedureEnvenv,
+   RegionInfo[]regionsToMerge,
+   booleanforcible)
+
+
+private static void
+MergeTableRegionsProcedure.checkRegionsToMerge(MasterProcedureEnvenv,
+   RegionInforegionToMergeA,
+   RegionInforegionToMergeB,
+   booleanforcible)
+One time checks.
+
+
+
 private void
 SplitTableRegionProcedure.checkSplittable(MasterProcedureEnvenv,
RegionInforegionToSplit,
@@ -984,18 +999,25 @@
 RSProcedureDispatcher.RegionOpenOperation.buildRegionOpenInfoRequest(MasterProcedureEnvenv)
 
 
+protected static void
+AbstractStateMachineTableProcedure.checkOnline(MasterProcedureEnvenv,
+   RegionInfori)
+Check region is online.
+
+
+
 protected void
 AbstractStateMachineRegionProcedure.checkTableModifiable(MasterProcedureEnvenv)
 Check whether a table is modifiable - exists and either 
offline or online with config set
 
 
-
+
 protected void
 AbstractStateMachineTableProcedure.checkTableModifiable(MasterProcedureEnvenv)
 Check whether a table is modifiable - exists and either 
offline or online with config set
 
 
-
+
 private static void
 DeleteTableProcedure.cleanAnyRemainingRows(MasterProcedureEnvenv,
  TableNametableName)
@@ -1003,26 +1025,26 @@
  info:regioninfo column was empty because of some write error.
 
 
-
+
 protected void
 ModifyTableProcedure.completionCleanup(MasterProcedureEnvenv)
 
-
+
 protected void
 RecoverMetaProcedure.completionCleanup(MasterProcedureEnvenv)
 
-
+
 protected void
 TruncateTableProcedure.completionCleanup(MasterProcedureEnvenv)
 
-
+
 protected static void
 CreateNamespaceProcedure.createDirectory(MasterProcedureEnvenv,
NamespaceDescriptornsDescriptor)
 Create the namespace directory
 
 
-
+
 private https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
 CloneSnapshotProcedure.createFilesystemLayout(MasterProcedureEnvenv,
   TableDescriptortableDescriptor,
@@ -1030,20 +1052,20 @@
 Create regions in file system.
 
 
-
+
 protected static https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
 CreateTableProcedure.createFsLayout(MasterProcedureEnvenv,
   TableDescriptortableDescriptor,
   https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfonewRegions)
 
-
+
 protected static https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
 CreateTableProcedure.createFsLayout(MasterProcedureEnvenv,
   TableDescriptortableDescriptor,
   https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfonewRegions,
   CreateTableProcedure.CreateHdfsRegionshdfsRegionHandler)
 
-
+
 private https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
 CloneSnapshotProcedure.createFsLayout(MasterProcedureEnvenv,
   TableDescriptortableDescriptor,
@@ -1052,19 +1074,19 @@
 Create region layout in file system.
 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
 CreateTableProcedure.CreateHdfsRegions.createHdfsRegions(MasterProcedureEnvenv,
  org.apache.hadoop.fs.PathtableRootDir,
  TableNametableName,
  https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfonewRegions)
 
-
+
 protected static void
 DeleteTableProcedure.deleteAssignmentState(MasterProcedureEnvenv,
  TableNametableName)
 
-
+
 static void
 MasterDDLOperationHelper.deleteColumnFamilyFromFileSystem(MasterProcedureEnvenv,
 TableNametableName,
@@ -1074,14 +1096,14 @@
 Remove the column family from the file system
 
 
-
+
 protected static void
 

[11/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.TableSkewCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.TableSkewCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.TableSkewCostFunction.html
index 1bdfb9a..22bebe1 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.TableSkewCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.TableSkewCostFunction.html
@@ -300,7 +300,7 @@
 292continue;
 293  }
 294  if (!c.isNeeded()) {
-295LOG.debug(c.getClass().getName() 
+ " indicated that its cost should not be considered");
+295LOG.debug("{} not needed", 
c.getClass().getSimpleName());
 296continue;
 297  }
 298  sumMultiplier += multiplier;

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.WriteRequestCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.WriteRequestCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.WriteRequestCostFunction.html
index 1bdfb9a..22bebe1 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.WriteRequestCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.WriteRequestCostFunction.html
@@ -300,7 +300,7 @@
 292continue;
 293  }
 294  if (!c.isNeeded()) {
-295LOG.debug(c.getClass().getName() 
+ " indicated that its cost should not be considered");
+295LOG.debug("{} not needed", 
c.getClass().getSimpleName());
 296continue;
 297  }
 298  sumMultiplier += multiplier;

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html
index 1bdfb9a..22bebe1 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html
@@ -300,7 +300,7 @@
 292continue;
 293  }
 294  if (!c.isNeeded()) {
-295LOG.debug(c.getClass().getName() 
+ " indicated that its cost should not be considered");
+295LOG.debug("{} not needed", 
c.getClass().getSimpleName());
 296continue;
 297  }
 298  sumMultiplier += multiplier;

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.html
index aae0575..e85a77c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.html
@@ -34,150 +34,181 @@
 026import 
org.apache.hadoop.hbase.TableNotDisabledException;
 027import 
org.apache.hadoop.hbase.TableNotEnabledException;
 028import 
org.apache.hadoop.hbase.TableNotFoundException;
-029import 
org.apache.hadoop.hbase.client.RegionInfo;
-030import 
org.apache.hadoop.hbase.client.TableState;
-031import 
org.apache.hadoop.hbase.master.MasterFileSystem;
-032import 
org.apache.hadoop.hbase.master.MasterServices;
-033import 
org.apache.hadoop.hbase.master.TableStateManager;
-034import 
org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
-035import 
org.apache.hadoop.hbase.security.User;
-036import 
org.apache.yetus.audience.InterfaceAudience;
-037
-038/**
-039 * Base class for all the Table 
procedures that want to use a StateMachineProcedure.
-040 * It provides helpers like basic 
locking, sync latch, and toStringClassDetails().
-041 */
-042@InterfaceAudience.Private
-043public abstract class 
AbstractStateMachineTableProcedureTState
-044extends 
StateMachineProcedureMasterProcedureEnv, TState
-045implements TableProcedureInterface 
{
-046
-047  // used for compatibility with old 
clients
-048  private 

[38/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/org/apache/hadoop/hbase/regionserver/StoreScanner.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/StoreScanner.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/StoreScanner.html
index d918552..2b5360c 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/StoreScanner.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/StoreScanner.html
@@ -1773,7 +1773,7 @@ implements 
 
 checkFlushed
-protected finalbooleancheckFlushed()
+protected finalbooleancheckFlushed()
 
 
 
@@ -1782,7 +1782,7 @@ implements 
 
 getScannerOrder
-publiclonggetScannerOrder()
+publiclonggetScannerOrder()
 Description copied from 
interface:KeyValueScanner
 Get the order of this KeyValueScanner. This is only 
relevant for StoreFileScanners and
  MemStoreScanners (other scanners simply return 0). This is required for 
comparing multiple
@@ -1803,7 +1803,7 @@ implements 
 
 parallelSeek
-privatevoidparallelSeek(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">List? extends KeyValueScannerscanners,
+privatevoidparallelSeek(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">List? extends KeyValueScannerscanners,
   Cellkv)
throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Seek storefiles in parallel to optimize IO latency as much 
as possible
@@ -1822,7 +1822,7 @@ implements 
 
 getAllScannersForTesting
-https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScannergetAllScannersForTesting()
+https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScannergetAllScannersForTesting()
 Used in testing.
 
 Returns:
@@ -1836,7 +1836,7 @@ implements 
 
 enableLazySeekGlobally
-staticvoidenableLazySeekGlobally(booleanenable)
+staticvoidenableLazySeekGlobally(booleanenable)
 
 
 
@@ -1845,7 +1845,7 @@ implements 
 
 getEstimatedNumberOfKvsScanned
-publiclonggetEstimatedNumberOfKvsScanned()
+publiclonggetEstimatedNumberOfKvsScanned()
 
 Returns:
 The estimated number of KVs seen by this scanner (includes some skipped 
KVs).
@@ -1858,7 +1858,7 @@ implements 
 
 getNextIndexedKey
-publicCellgetNextIndexedKey()
+publicCellgetNextIndexedKey()
 
 Specified by:
 getNextIndexedKeyin
 interfaceKeyValueScanner
@@ -1877,7 +1877,7 @@ implements 
 
 shipped
-publicvoidshipped()
+publicvoidshipped()
  throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Description copied from 
interface:Shipper
 Called after a batch of rows scanned and set to be returned 
to client. Any in between cleanup

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/ChunkCreator.MemStoreChunkPool.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/ChunkCreator.MemStoreChunkPool.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/ChunkCreator.MemStoreChunkPool.html
index 3ed57d2..ff2dea2 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/ChunkCreator.MemStoreChunkPool.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/ChunkCreator.MemStoreChunkPool.html
@@ -121,7 +121,8 @@
 
 
 private ChunkCreator.MemStoreChunkPool
-ChunkCreator.initializePool(longglobalMemStoreSize,
+ChunkCreator.initializePool(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringlabel,
+  longglobalMemStoreSize,
   floatpoolSizePercentage,
   floatinitialCountPercentage,
   intchunkSize,

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HeapMemoryManager.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HeapMemoryManager.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HeapMemoryManager.html
index 5de5991..1ef5e53 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HeapMemoryManager.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HeapMemoryManager.html
@@ -152,7 +152,8 @@
 
 
 private ChunkCreator.MemStoreChunkPool
-ChunkCreator.initializePool(longglobalMemStoreSize,

[14/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.StoreFileSplitter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.StoreFileSplitter.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.StoreFileSplitter.html
index d984f0e..c1f4d85 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.StoreFileSplitter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.StoreFileSplitter.html
@@ -115,753 +115,756 @@
 107  final RegionInfo regionToSplit, 
final byte[] splitRow) throws IOException {
 108super(env, regionToSplit);
 109preflightChecks(env, true);
-110this.bestSplitRow = splitRow;
-111checkSplittable(env, regionToSplit, 
bestSplitRow);
-112final TableName table = 
regionToSplit.getTable();
-113final long rid = 
getDaughterRegionIdTimestamp(regionToSplit);
-114this.daughter_1_RI = 
RegionInfoBuilder.newBuilder(table)
-115
.setStartKey(regionToSplit.getStartKey())
-116.setEndKey(bestSplitRow)
-117.setSplit(false)
-118.setRegionId(rid)
-119.build();
-120this.daughter_2_RI = 
RegionInfoBuilder.newBuilder(table)
-121.setStartKey(bestSplitRow)
-122
.setEndKey(regionToSplit.getEndKey())
-123.setSplit(false)
-124.setRegionId(rid)
-125.build();
-126TableDescriptor htd = 
env.getMasterServices().getTableDescriptors().get(getTableName());
-127
if(htd.getRegionSplitPolicyClassName() != null) {
-128  // Since we don't have region 
reference here, creating the split policy instance without it.
-129  // This can be used to invoke 
methods which don't require Region reference. This instantiation
-130  // of a class on Master-side though 
it only makes sense on the RegionServer-side is
-131  // for Phoenix Local Indexing. 
Refer HBASE-12583 for more information.
-132  Class? extends 
RegionSplitPolicy clazz =
-133  
RegionSplitPolicy.getSplitPolicyClass(htd, env.getMasterConfiguration());
-134  this.splitPolicy = 
ReflectionUtils.newInstance(clazz, env.getMasterConfiguration());
-135}
-136  }
-137
-138  /**
-139   * Check whether the region is 
splittable
-140   * @param env MasterProcedureEnv
-141   * @param regionToSplit parent Region 
to be split
-142   * @param splitRow if splitRow is not 
specified, will first try to get bestSplitRow from RS
-143   * @throws IOException
-144   */
-145  private void checkSplittable(final 
MasterProcedureEnv env,
-146  final RegionInfo regionToSplit, 
final byte[] splitRow) throws IOException {
-147// Ask the remote RS if this region 
is splittable.
-148// If we get an IOE, report it along 
w/ the failure so can see why we are not splittable at this time.
-149if(regionToSplit.getReplicaId() != 
RegionInfo.DEFAULT_REPLICA_ID) {
-150  throw new IllegalArgumentException 
("Can't invoke split on non-default regions directly");
-151}
-152RegionStateNode node =
-153
env.getAssignmentManager().getRegionStates().getRegionStateNode(getParentRegion());
-154IOException splittableCheckIOE = 
null;
-155boolean splittable = false;
-156if (node != null) {
-157  try {
-158if (bestSplitRow == null || 
bestSplitRow.length == 0) {
-159  LOG.info("splitKey isn't 
explicitly specified, " + " will try to find a best split key from RS");
-160}
-161// Always set bestSplitRow 
request as true here,
-162// need to call Region#checkSplit 
to check it splittable or not
-163GetRegionInfoResponse response 
=
-164
Util.getRegionInfoResponse(env, node.getRegionLocation(), node.getRegionInfo(), 
true);
-165if(bestSplitRow == null || 
bestSplitRow.length == 0) {
-166  bestSplitRow = 
response.hasBestSplitRow() ? response.getBestSplitRow().toByteArray() : null;
-167}
-168splittable = 
response.hasSplittable()  response.getSplittable();
-169
-170if (LOG.isDebugEnabled()) {
-171  LOG.debug("Splittable=" + 
splittable + " " + node.toShortString());
-172}
-173  } catch (IOException e) {
-174splittableCheckIOE = e;
-175  }
-176}
-177
-178if (!splittable) {
-179  IOException e = new 
IOException(regionToSplit.getShortNameToLog() + " NOT splittable");
-180  if (splittableCheckIOE != null) 
e.initCause(splittableCheckIOE);
-181  throw e;
-182}
-183
-184if(bestSplitRow == null || 
bestSplitRow.length == 0) {
-185  throw new 
DoNotRetryIOException("Region not splittable because bestSplitPoint = null, "
-186  + "maybe table is too small for 
auto split. For force split, try specifying split 

[41/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.html
index bd28f25..1e2eb5b 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.html
@@ -18,8 +18,8 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":6,"i4":6,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10};
-var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"],8:["t4","Concrete Methods"]};
+var methods = 
{"i0":10,"i1":9,"i2":10,"i3":10,"i4":6,"i5":6,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10};
+var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],4:["t3","Abstract 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
 var tableTab = "tableTab";
@@ -128,7 +128,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public abstract class AbstractStateMachineTableProcedureTState
+public abstract class AbstractStateMachineTableProcedureTState
 extends StateMachineProcedureMasterProcedureEnv,TState
 implements TableProcedureInterface
 Base class for all the Table procedures that want to use a 
StateMachineProcedure.
@@ -233,7 +233,7 @@ implements 
-All MethodsInstance MethodsAbstract MethodsConcrete Methods
+All MethodsStatic MethodsInstance MethodsAbstract MethodsConcrete Methods
 
 Modifier and Type
 Method and Description
@@ -245,52 +245,59 @@ implements 
+protected static void
+checkOnline(MasterProcedureEnvenv,
+   RegionInfori)
+Check region is online.
+
+
+
 protected void
 checkTableModifiable(MasterProcedureEnvenv)
 Check whether a table is modifiable - exists and either 
offline or online with config set
 
 
-
+
 protected org.apache.hadoop.fs.Path
 getRegionDir(MasterProcedureEnvenv,
 RegionInforegion)
 
-
+
 abstract TableName
 getTableName()
 
-
+
 abstract TableProcedureInterface.TableOperationType
 getTableOperationType()
 Given an operation type we can take decisions about what to 
do with pending operations.
 
 
-
+
 protected User
 getUser()
 
-
+
 protected void
 preflightChecks(MasterProcedureEnvenv,
https://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Booleanenabled)
 Check that cluster is up and master is running.
 
 
-
+
 protected void
 releaseLock(MasterProcedureEnvenv)
 The user should override this method, and release lock if 
necessary.
 
 
-
+
 protected void
 releaseSyncLatch()
 
-
+
 protected void
 setUser(Useruser)
 
-
+
 void
 toStringClassDetails(https://docs.oracle.com/javase/8/docs/api/java/lang/StringBuilder.html?is-external=true;
 title="class or interface in java.lang">StringBuildersb)
 Extend the toString() information with the procedure details
@@ -339,7 +346,7 @@ implements 
 
 syncLatch
-private finalProcedurePrepareLatch syncLatch
+private finalProcedurePrepareLatch syncLatch
 
 
 
@@ -348,7 +355,7 @@ implements 
 
 user
-privateUser user
+privateUser user
 
 
 
@@ -365,7 +372,7 @@ implements 
 
 AbstractStateMachineTableProcedure
-protectedAbstractStateMachineTableProcedure()
+protectedAbstractStateMachineTableProcedure()
 
 
 
@@ -374,7 +381,7 @@ implements 
 
 AbstractStateMachineTableProcedure
-protectedAbstractStateMachineTableProcedure(MasterProcedureEnvenv)
+protectedAbstractStateMachineTableProcedure(MasterProcedureEnvenv)
 
 
 
@@ -383,7 +390,7 @@ implements 
 
 AbstractStateMachineTableProcedure
-protectedAbstractStateMachineTableProcedure(MasterProcedureEnvenv,
+protectedAbstractStateMachineTableProcedure(MasterProcedureEnvenv,
  ProcedurePrepareLatchlatch)
 
 Parameters:
@@ -405,7 +412,7 @@ implements 
 
 getTableName
-public abstractTableNamegetTableName()
+public abstractTableNamegetTableName()
 
 Specified by:
 getTableNamein
 interfaceTableProcedureInterface
@@ -420,7 +427,7 @@ implements 
 
 getTableOperationType
-public abstractTableProcedureInterface.TableOperationTypegetTableOperationType()
+public abstractTableProcedureInterface.TableOperationTypegetTableOperationType()
 Description copied from 
interface:TableProcedureInterface
 Given an operation type we can take decisions about what to 
do with pending operations.
  e.g. if we get a delete and we have some table operation pending (e.g. add 
column)
@@ -439,7 +446,7 @@ implements 
 
 toStringClassDetails

[33/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/org/apache/hadoop/hbase/replication/class-use/TableReplicationQueueStorage.WALCell.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/replication/class-use/TableReplicationQueueStorage.WALCell.html
 
b/devapidocs/org/apache/hadoop/hbase/replication/class-use/TableReplicationQueueStorage.WALCell.html
deleted file mode 100644
index 8f5e687..000
--- 
a/devapidocs/org/apache/hadoop/hbase/replication/class-use/TableReplicationQueueStorage.WALCell.html
+++ /dev/null
@@ -1,190 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-
-
-
-Uses of Class 
org.apache.hadoop.hbase.replication.TableReplicationQueueStorage.WALCell 
(Apache HBase 3.0.0-SNAPSHOT API)
-
-
-
-
-
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-Prev
-Next
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-
-
-
-Uses of 
Classorg.apache.hadoop.hbase.replication.TableReplicationQueueStorage.WALCell
-
-
-
-
-
-Packages that use TableReplicationQueueStorage.WALCell
-
-Package
-Description
-
-
-
-org.apache.hadoop.hbase.replication
-
-Multi Cluster Replication
-
-
-
-
-
-
-
-
-
-
-Uses of TableReplicationQueueStorage.WALCell 
in org.apache.hadoop.hbase.replication
-
-Methods in org.apache.hadoop.hbase.replication
 that return TableReplicationQueueStorage.WALCell
-
-Modifier and Type
-Method and Description
-
-
-
-static TableReplicationQueueStorage.WALCell
-TableReplicationQueueStorage.WALCell.create(Cellcell)
-
-
-
-
-Methods in org.apache.hadoop.hbase.replication
 that return types with arguments of type TableReplicationQueueStorage.WALCell
-
-Modifier and Type
-Method and Description
-
-
-
-private https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableReplicationQueueStorage.WALCell
-TableReplicationQueueStorage.getWALsInQueue0(Tabletable,
-   ServerNameserverName,
-   https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringqueueId)
-List all WALs for the specific region server and 
queueId.
-
-
-
-private https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableReplicationQueueStorage.WALCell
-TableReplicationQueueStorage.result2WALCells(Resultr)
-Parse the WALCell list from a HBase result.
-
-
-
-
-
-
-
-
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-Prev
-Next
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-
-
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
-
-

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/org/apache/hadoop/hbase/replication/class-use/TableReplicationQueueStorage.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/replication/class-use/TableReplicationQueueStorage.html
 
b/devapidocs/org/apache/hadoop/hbase/replication/class-use/TableReplicationQueueStorage.html
deleted file mode 100644
index 1ea13a0..000
--- 
a/devapidocs/org/apache/hadoop/hbase/replication/class-use/TableReplicationQueueStorage.html
+++ /dev/null
@@ -1,125 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-
-
-
-Uses of Class 
org.apache.hadoop.hbase.replication.TableReplicationQueueStorage (Apache HBase 
3.0.0-SNAPSHOT API)
-
-
-
-
-
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-Prev
-Next
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":9,"i15":9,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10};
-var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
-var altColor = "altColor";
-var rowColor = "rowColor";
-var tableTab = "tableTab";
-var activeTableTab = "activeTableTab";
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-PrevClass
-NextClass
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-Summary:
-Nested|
-Field|
-Constr|
-Method
-
-
-Detail:
-Field|
-Constr|
-Method
-
-
-
-
-
-
-
-
-org.apache.hadoop.hbase.replication
-Class 
TableReplicationQueueStorage
-
-
-
-https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
-
-
-org.apache.hadoop.hbase.replication.TableReplicationStorageBase
-
-
-org.apache.hadoop.hbase.replication.TableReplicationQueueStorage
-
-
-
-
-
-
-
-
-
-All Implemented Interfaces:
-ReplicationQueueStorage
-
-
-
-@InterfaceAudience.Private
-public class TableReplicationQueueStorage
-extends TableReplicationStorageBase
-implements ReplicationQueueStorage
-Table based replication queue storage.
-
-
-
-
-
-
-
-
-
-
-
-Nested Class Summary
-
-Nested Classes
-
-Modifier and Type
-Class and Description
-
-
-private static class
-TableReplicationQueueStorage.WALCell
-Each cell in column wal:{queueId} will be parsed to a 
WALCell.
-
-
-
-
-
-
-
-
-
-
-Field Summary
-
-Fields
-
-Modifier and Type
-Field and Description
-
-
-private static org.slf4j.Logger
-LOG
-
-
-
-
-
-
-Fields inherited from classorg.apache.hadoop.hbase.replication.TableReplicationStorageBase
-conf,
 FAMILY_HFILE_REFS,
 FAMILY_PEER,
 FAMILY_QUEUE,
 FAMILY_REGIONS,
 FAMILY_RS_STATE,
 FAMILY_WAL,
 QU
 ALIFIER_PEER_CONFIG, QUALIFIER_PEER_STATE,
 QUALIFIER_STATE_ENABLED,
 REPLICATION_TABLE,
 zookeeper
-
-
-
-
-
-
-
-
-Constructor Summary
-
-Constructors
-
-Constructor and Description
-
-
-TableReplicationQueueStorage(ZKWatcherzookeeper,
-
org.apache.hadoop.conf.Configurationconf)
-
-
-
-
-
-
-
-
-
-Method Summary
-
-All MethodsStatic MethodsInstance MethodsConcrete Methods
-
-Modifier and Type
-Method and Description
-
-
-void
-addHFileRefs(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId,
-https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairorg.apache.hadoop.fs.Path,org.apache.hadoop.fs.Pathpairs)
-Add new hfile references to the queue.
-
-
-
-void
-addPeerToHFileRefs(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId)
-Add a peer to hfile reference queue if peer does not 
exist.
-
-
-
-void
-addWAL(ServerNameserverName,
-  https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringqueueId,
-  https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringfileName)
-Add a new WAL file to the given queue for a given 
regionserver.
-
-
-
-Pairhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,https://docs.oracle.com/javase/8/docs/api/java/util/SortedSet.html?is-external=true;
 title="class or interface in java.util">SortedSethttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 

[27/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.StoreToFile.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.StoreToFile.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.StoreToFile.html
index f47d627..c3d225c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.StoreToFile.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.StoreToFile.html
@@ -117,219 +117,219 @@
 109   */
 110  public static boolean 
archiveRegion(FileSystem fs, Path rootdir, Path tableDir, Path regionDir)
 111  throws IOException {
-112if (LOG.isDebugEnabled()) {
-113  LOG.debug("ARCHIVING " + 
regionDir.toString());
-114}
-115
-116// otherwise, we archive the files
-117// make sure we can archive
-118if (tableDir == null || regionDir == 
null) {
-119  LOG.error("No archive directory 
could be found because tabledir (" + tableDir
-120  + ") or regiondir (" + 
regionDir + "was null. Deleting files instead.");
-121  deleteRegionWithoutArchiving(fs, 
regionDir);
-122  // we should have archived, but 
failed to. Doesn't matter if we deleted
-123  // the archived files correctly or 
not.
-124  return false;
-125}
-126
-127// make sure the regiondir lives 
under the tabledir
-128
Preconditions.checkArgument(regionDir.toString().startsWith(tableDir.toString()));
-129Path regionArchiveDir = 
HFileArchiveUtil.getRegionArchiveDir(rootdir,
-130FSUtils.getTableName(tableDir),
-131regionDir.getName());
-132
-133FileStatusConverter getAsFile = new 
FileStatusConverter(fs);
-134// otherwise, we attempt to archive 
the store files
-135
-136// build collection of just the store 
directories to archive
-137CollectionFile toArchive = 
new ArrayList();
-138final PathFilter dirFilter = new 
FSUtils.DirFilter(fs);
-139PathFilter nonHidden = new 
PathFilter() {
-140  @Override
-141  public boolean accept(Path file) 
{
-142return dirFilter.accept(file) 
 !file.getName().toString().startsWith(".");
-143  }
-144};
-145FileStatus[] storeDirs = 
FSUtils.listStatus(fs, regionDir, nonHidden);
-146// if there no files, we can just 
delete the directory and return;
-147if (storeDirs == null) {
-148  LOG.debug("Region directory " + 
regionDir + " empty.");
-149  return 
deleteRegionWithoutArchiving(fs, regionDir);
-150}
-151
-152// convert the files in the region to 
a File
-153
toArchive.addAll(Lists.transform(Arrays.asList(storeDirs), getAsFile));
-154LOG.debug("Archiving " + 
toArchive);
-155ListFile failedArchive = 
resolveAndArchive(fs, regionArchiveDir, toArchive,
-156
EnvironmentEdgeManager.currentTime());
-157if (!failedArchive.isEmpty()) {
-158  throw new 
FailedArchiveException("Failed to archive/delete all the files for region:"
-159  + regionDir.getName() + " into 
" + regionArchiveDir
-160  + ". Something is probably awry 
on the filesystem.",
-161  
Collections2.transform(failedArchive, FUNC_FILE_TO_PATH));
-162}
-163// if that was successful, then we 
delete the region
-164return 
deleteRegionWithoutArchiving(fs, regionDir);
-165  }
-166
-167  /**
-168   * Remove from the specified region the 
store files of the specified column family,
-169   * either by archiving them or outright 
deletion
-170   * @param fs the filesystem where the 
store files live
-171   * @param conf {@link Configuration} to 
examine to determine the archive directory
-172   * @param parent Parent region hosting 
the store files
-173   * @param tableDir {@link Path} to 
where the table is being stored (for building the archive path)
-174   * @param family the family hosting the 
store files
-175   * @throws IOException if the files 
could not be correctly disposed.
-176   */
-177  public static void 
archiveFamily(FileSystem fs, Configuration conf,
-178  RegionInfo parent, Path tableDir, 
byte[] family) throws IOException {
-179Path familyDir = new Path(tableDir, 
new Path(parent.getEncodedName(), Bytes.toString(family)));
-180archiveFamilyByFamilyDir(fs, conf, 
parent, familyDir, family);
-181  }
-182
-183  /**
-184   * Removes from the specified region 
the store files of the specified column family,
-185   * either by archiving them or outright 
deletion
-186   * @param fs the filesystem where the 
store files live
-187   * @param conf {@link Configuration} to 
examine to determine the archive directory
-188   * @param parent Parent region hosting 
the store files
-189   * @param familyDir {@link Path} to 
where the family is being stored
-190   * @param family the family hosting the 
store files
-191   * @throws IOException if the files 
could not be correctly disposed.
-192   */

[21/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.html
index c27b109..4160a88 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.html
@@ -105,7 +105,7 @@
 097try {
 098  done = waitUntilDone(startTime 
* 1000L + asyncProcess.primaryCallTimeoutMicroseconds);
 099} catch (InterruptedException ex) 
{
-100  LOG.error("Replica thread was 
interrupted - no replica calls: " + ex.getMessage());
+100  LOG.error("Replica thread 
interrupted - no replica calls {}", ex.getMessage());
 101  return;
 102}
 103  }
@@ -149,7 +149,7 @@
 141  if (loc == null) return;
 142  HRegionLocation[] locs = 
loc.getRegionLocations();
 143  if (locs.length == 1) {
-144LOG.warn("No replicas found for " 
+ action.getAction());
+144LOG.warn("No replicas found for 
{}", action.getAction());
 145return;
 146  }
 147  synchronized (replicaResultLock) 
{
@@ -230,8 +230,8 @@
 222  return;
 223} catch (Throwable t) {
 224  // This should not happen. 
Let's log  retry anyway.
-225  LOG.error("#" + asyncProcess.id 
+ ", Caught throwable while calling. This is unexpected." +
-226  " Retrying. Server is " + 
server + ", tableName=" + tableName, t);
+225  LOG.error("id=" + 
asyncProcess.id + ", caught throwable. Unexpected." +
+226  " Retrying. Server=" + 
server + ", tableName=" + tableName, t);
 227  
receiveGlobalFailure(multiAction, server, numAttempt, t);
 228  return;
 229}
@@ -247,1036 +247,1035 @@
 239}
 240  } catch (Throwable t) {
 241// Something really bad happened. 
We are on the send thread that will now die.
-242LOG.error("Internal AsyncProcess 
#" + asyncProcess.id + " error for "
-243+ tableName + " processing 
for " + server, t);
-244throw new RuntimeException(t);
-245  } finally {
-246
asyncProcess.decTaskCounters(multiAction.getRegions(), server);
-247if (callsInProgress != null 
 callable != null  res != null) {
-248  
callsInProgress.remove(callable);
-249}
-250  }
-251}
-252  }
-253
-254  private final 
Batch.CallbackCResult callback;
-255  private final BatchErrors errors;
-256  private final 
ConnectionImplementation.ServerErrorTracker errorsByServer;
-257  private final ExecutorService pool;
-258  private final 
SetCancellableRegionServerCallable callsInProgress;
+242LOG.error("id=" + asyncProcess.id 
+ " error for " + tableName + " processing " + server, t);
+243throw new RuntimeException(t);
+244  } finally {
+245
asyncProcess.decTaskCounters(multiAction.getRegions(), server);
+246if (callsInProgress != null 
 callable != null  res != null) {
+247  
callsInProgress.remove(callable);
+248}
+249  }
+250}
+251  }
+252
+253  private final 
Batch.CallbackCResult callback;
+254  private final BatchErrors errors;
+255  private final 
ConnectionImplementation.ServerErrorTracker errorsByServer;
+256  private final ExecutorService pool;
+257  private final 
SetCancellableRegionServerCallable callsInProgress;
+258
 259
-260
-261  private final TableName tableName;
-262  private final AtomicLong 
actionsInProgress = new AtomicLong(-1);
-263  /**
-264   * The lock controls access to results. 
It is only held when populating results where
-265   * there might be several callers 
(eventual consistency gets). For other requests,
-266   * there's one unique call going on per 
result index.
-267   */
-268  private final Object replicaResultLock 
= new Object();
-269  /**
-270   * Result array.  Null if results are 
not needed. Otherwise, each index corresponds to
-271   * the action index in initial actions 
submitted. For most request types, has null-s for
-272   * requests that are not done, and 
result/exception for those that are done.
-273   * For eventual-consistency gets, 
initially the same applies; at some point, replica calls
-274   * might be started, and 
ReplicaResultState is put at the corresponding indices. The
-275   * returning calls check the type to 
detect when this is the case. After all calls are done,
-276   * ReplicaResultState-s are replaced 
with results for the user.
-277   */
-278  private final Object[] results;
-279  /**
-280   * Indices of replica gets in results. 
If null, all or no actions are replica-gets.
-281   */
-282  private final int[] 
replicaGetIndices;
-283  private final boolean 
hasAnyReplicaGets;
-284  

[03/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
index 3bc66bb..97aa79c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
@@ -1435,459 +1435,460 @@
 1427   */
 1428  private void execProcedure(final 
RootProcedureState procStack,
 1429  final 
ProcedureTEnvironment procedure) {
-1430
Preconditions.checkArgument(procedure.getState() == ProcedureState.RUNNABLE);
-1431
-1432// Procedures can suspend 
themselves. They skip out by throwing a ProcedureSuspendedException.
-1433// The exception is caught below and 
then we hurry to the exit without disturbing state. The
-1434// idea is that the processing of 
this procedure will be unsuspended later by an external event
-1435// such the report of a region open. 
TODO: Currently, its possible for two worker threads
-1436// to be working on the same 
procedure concurrently (locking in procedures is NOT about
-1437// concurrency but about tying an 
entity to a procedure; i.e. a region to a particular
-1438// procedure instance). This can 
make for issues if both threads are changing state.
-1439// See 
env.getProcedureScheduler().wakeEvent(regionNode.getProcedureEvent());
-1440// in 
RegionTransitionProcedure#reportTransition for example of Procedure putting
-1441// itself back on the scheduler 
making it possible for two threads running against
-1442// the one Procedure. Might be ok if 
they are both doing different, idempotent sections.
-1443boolean suspended = false;
-1444
-1445// Whether to 're-' -execute; run 
through the loop again.
-1446boolean reExecute = false;
-1447
-1448ProcedureTEnvironment[] 
subprocs = null;
-1449do {
-1450  reExecute = false;
-1451  try {
-1452subprocs = 
procedure.doExecute(getEnvironment());
-1453if (subprocs != null  
subprocs.length == 0) {
-1454  subprocs = null;
-1455}
-1456  } catch 
(ProcedureSuspendedException e) {
-1457if (LOG.isTraceEnabled()) {
-1458  LOG.trace("Suspend " + 
procedure);
-1459}
-1460suspended = true;
-1461  } catch (ProcedureYieldException 
e) {
-1462if (LOG.isTraceEnabled()) {
-1463  LOG.trace("Yield " + procedure 
+ ": " + e.getMessage(), e);
-1464}
-1465scheduler.yield(procedure);
-1466return;
-1467  } catch (InterruptedException e) 
{
-1468if (LOG.isTraceEnabled()) {
-1469  LOG.trace("Yield interrupt " + 
procedure + ": " + e.getMessage(), e);
-1470}
-1471
handleInterruptedException(procedure, e);
-1472scheduler.yield(procedure);
-1473return;
-1474  } catch (Throwable e) {
-1475// Catch NullPointerExceptions 
or similar errors...
-1476String msg = "CODE-BUG: Uncaught 
runtime exception: " + procedure;
-1477LOG.error(msg, e);
-1478procedure.setFailure(new 
RemoteProcedureException(msg, e));
-1479  }
-1480
-1481  if (!procedure.isFailed()) {
-1482if (subprocs != null) {
-1483  if (subprocs.length == 1 
 subprocs[0] == procedure) {
-1484// Procedure returned 
itself. Quick-shortcut for a state machine-like procedure;
-1485// i.e. we go around this 
loop again rather than go back out on the scheduler queue.
-1486subprocs = null;
-1487reExecute = true;
-1488if (LOG.isTraceEnabled()) 
{
-1489  LOG.trace("Short-circuit 
to next step on pid=" + procedure.getProcId());
-1490}
-1491  } else {
-1492// Yield the current 
procedure, and make the subprocedure runnable
-1493// subprocs may come back 
'null'.
-1494subprocs = 
initializeChildren(procStack, procedure, subprocs);
-1495LOG.info("Initialized 
subprocedures=" +
-1496  (subprocs == null? null:
-1497
Stream.of(subprocs).map(e - "{" + e.toString() + "}").
-1498
collect(Collectors.toList()).toString()));
-1499  }
-1500} else if (procedure.getState() 
== ProcedureState.WAITING_TIMEOUT) {
-1501  if (LOG.isTraceEnabled()) {
-1502LOG.trace("Added to 
timeoutExecutor " + procedure);
-1503  }
-1504  
timeoutExecutor.add(procedure);
-1505} else if (!suspended) {
-1506  // No subtask, so we are 
done
-1507  
procedure.setState(ProcedureState.SUCCESS);
-1508}
-1509  }
-1510
-1511  

[44/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.html 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.html
index d739cbe..5823364 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class FileIOEngine
+public class FileIOEngine
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements IOEngine
 IO engine that stores data to a file on the local file 
system.
@@ -251,16 +251,20 @@ implements 
+(package private) https://docs.oracle.com/javase/8/docs/api/java/nio/channels/FileChannel.html?is-external=true;
 title="class or interface in java.nio.channels">FileChannel[]
+getFileChannels()
+
+
 private int
 getFileNum(longoffset)
 
-
+
 boolean
 isPersistent()
 File IO engine is always able to support persistent storage 
for the cache
 
 
-
+
 Cacheable
 read(longoffset,
 intlength,
@@ -268,34 +272,34 @@ implements Transfers data from file to the given byte buffer
 
 
-
-private void
+
+(package private) void
 refreshFileConnection(intaccessFileNum)
 
-
+
 void
 shutdown()
 Close the file
 
 
-
+
 void
 sync()
 Sync the data to file after writing
 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 toString()
 
-
+
 void
 write(https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBuffersrcBuffer,
  longoffset)
 Transfers data from the given byte buffer to file
 
 
-
+
 void
 write(ByteBuffsrcBuffer,
  longoffset)
@@ -330,7 +334,7 @@ implements 
 
 LOG
-private static finalorg.slf4j.Logger LOG
+private static finalorg.slf4j.Logger LOG
 
 
 
@@ -339,7 +343,7 @@ implements 
 
 FILE_DELIMITER
-public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String FILE_DELIMITER
+public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String FILE_DELIMITER
 
 See Also:
 Constant
 Field Values
@@ -352,7 +356,7 @@ implements 
 
 filePaths
-private finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String[] filePaths
+private finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String[] filePaths
 
 
 
@@ -361,7 +365,7 @@ implements 
 
 fileChannels
-private finalhttps://docs.oracle.com/javase/8/docs/api/java/nio/channels/FileChannel.html?is-external=true;
 title="class or interface in java.nio.channels">FileChannel[] fileChannels
+private finalhttps://docs.oracle.com/javase/8/docs/api/java/nio/channels/FileChannel.html?is-external=true;
 title="class or interface in java.nio.channels">FileChannel[] fileChannels
 
 
 
@@ -370,7 +374,7 @@ implements 
 
 rafs
-private finalhttps://docs.oracle.com/javase/8/docs/api/java/io/RandomAccessFile.html?is-external=true;
 title="class or interface in java.io">RandomAccessFile[] rafs
+private finalhttps://docs.oracle.com/javase/8/docs/api/java/io/RandomAccessFile.html?is-external=true;
 title="class or interface in java.io">RandomAccessFile[] rafs
 
 
 
@@ -379,7 +383,7 @@ implements 
 
 sizePerFile
-private finallong sizePerFile
+private finallong sizePerFile
 
 
 
@@ -388,7 +392,7 @@ implements 
 
 capacity
-private finallong capacity
+private finallong capacity
 
 
 
@@ -397,7 +401,7 @@ implements 
 
 readAccessor
-privateFileIOEngine.FileReadAccessor readAccessor
+privateFileIOEngine.FileReadAccessor readAccessor
 
 
 
@@ -406,7 +410,7 @@ implements 
 
 writeAccessor
-privateFileIOEngine.FileWriteAccessor writeAccessor
+privateFileIOEngine.FileWriteAccessor writeAccessor
 
 
 
@@ -423,7 +427,7 @@ implements 
 
 FileIOEngine
-publicFileIOEngine(longcapacity,
+publicFileIOEngine(longcapacity,
 booleanmaintainPersistence,
 

[04/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerMonitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerMonitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerMonitor.html
index 3bc66bb..97aa79c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerMonitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerMonitor.html
@@ -1435,459 +1435,460 @@
 1427   */
 1428  private void execProcedure(final 
RootProcedureState procStack,
 1429  final 
ProcedureTEnvironment procedure) {
-1430
Preconditions.checkArgument(procedure.getState() == ProcedureState.RUNNABLE);
-1431
-1432// Procedures can suspend 
themselves. They skip out by throwing a ProcedureSuspendedException.
-1433// The exception is caught below and 
then we hurry to the exit without disturbing state. The
-1434// idea is that the processing of 
this procedure will be unsuspended later by an external event
-1435// such the report of a region open. 
TODO: Currently, its possible for two worker threads
-1436// to be working on the same 
procedure concurrently (locking in procedures is NOT about
-1437// concurrency but about tying an 
entity to a procedure; i.e. a region to a particular
-1438// procedure instance). This can 
make for issues if both threads are changing state.
-1439// See 
env.getProcedureScheduler().wakeEvent(regionNode.getProcedureEvent());
-1440// in 
RegionTransitionProcedure#reportTransition for example of Procedure putting
-1441// itself back on the scheduler 
making it possible for two threads running against
-1442// the one Procedure. Might be ok if 
they are both doing different, idempotent sections.
-1443boolean suspended = false;
-1444
-1445// Whether to 're-' -execute; run 
through the loop again.
-1446boolean reExecute = false;
-1447
-1448ProcedureTEnvironment[] 
subprocs = null;
-1449do {
-1450  reExecute = false;
-1451  try {
-1452subprocs = 
procedure.doExecute(getEnvironment());
-1453if (subprocs != null  
subprocs.length == 0) {
-1454  subprocs = null;
-1455}
-1456  } catch 
(ProcedureSuspendedException e) {
-1457if (LOG.isTraceEnabled()) {
-1458  LOG.trace("Suspend " + 
procedure);
-1459}
-1460suspended = true;
-1461  } catch (ProcedureYieldException 
e) {
-1462if (LOG.isTraceEnabled()) {
-1463  LOG.trace("Yield " + procedure 
+ ": " + e.getMessage(), e);
-1464}
-1465scheduler.yield(procedure);
-1466return;
-1467  } catch (InterruptedException e) 
{
-1468if (LOG.isTraceEnabled()) {
-1469  LOG.trace("Yield interrupt " + 
procedure + ": " + e.getMessage(), e);
-1470}
-1471
handleInterruptedException(procedure, e);
-1472scheduler.yield(procedure);
-1473return;
-1474  } catch (Throwable e) {
-1475// Catch NullPointerExceptions 
or similar errors...
-1476String msg = "CODE-BUG: Uncaught 
runtime exception: " + procedure;
-1477LOG.error(msg, e);
-1478procedure.setFailure(new 
RemoteProcedureException(msg, e));
-1479  }
-1480
-1481  if (!procedure.isFailed()) {
-1482if (subprocs != null) {
-1483  if (subprocs.length == 1 
 subprocs[0] == procedure) {
-1484// Procedure returned 
itself. Quick-shortcut for a state machine-like procedure;
-1485// i.e. we go around this 
loop again rather than go back out on the scheduler queue.
-1486subprocs = null;
-1487reExecute = true;
-1488if (LOG.isTraceEnabled()) 
{
-1489  LOG.trace("Short-circuit 
to next step on pid=" + procedure.getProcId());
-1490}
-1491  } else {
-1492// Yield the current 
procedure, and make the subprocedure runnable
-1493// subprocs may come back 
'null'.
-1494subprocs = 
initializeChildren(procStack, procedure, subprocs);
-1495LOG.info("Initialized 
subprocedures=" +
-1496  (subprocs == null? null:
-1497
Stream.of(subprocs).map(e - "{" + e.toString() + "}").
-1498
collect(Collectors.toList()).toString()));
-1499  }
-1500} else if (procedure.getState() 
== ProcedureState.WAITING_TIMEOUT) {
-1501  if (LOG.isTraceEnabled()) {
-1502LOG.trace("Added to 
timeoutExecutor " + procedure);
-1503  }
-1504  
timeoutExecutor.add(procedure);
-1505} else if (!suspended) {
-1506  // No subtask, so we are 
done
-1507  
procedure.setState(ProcedureState.SUCCESS);
-1508}
-1509  }
-1510
-1511   

[16/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
index 7df78ff..d523437 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
@@ -41,774 +41,784 @@
 033import 
org.apache.hadoop.hbase.TableName;
 034import 
org.apache.hadoop.hbase.UnknownRegionException;
 035import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-036import 
org.apache.hadoop.hbase.client.MasterSwitchType;
-037import 
org.apache.hadoop.hbase.client.Mutation;
-038import 
org.apache.hadoop.hbase.client.RegionInfo;
-039import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-040import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-041import 
org.apache.hadoop.hbase.client.TableDescriptor;
-042import 
org.apache.hadoop.hbase.exceptions.MergeRegionException;
-043import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
-044import 
org.apache.hadoop.hbase.master.CatalogJanitor;
-045import 
org.apache.hadoop.hbase.master.MasterCoprocessorHost;
-046import 
org.apache.hadoop.hbase.master.MasterFileSystem;
-047import 
org.apache.hadoop.hbase.master.RegionState;
-048import 
org.apache.hadoop.hbase.master.RegionState.State;
-049import 
org.apache.hadoop.hbase.master.normalizer.NormalizationPlan;
-050import 
org.apache.hadoop.hbase.master.procedure.AbstractStateMachineTableProcedure;
-051import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
-052import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
-053import 
org.apache.hadoop.hbase.procedure2.ProcedureMetrics;
-054import 
org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
-055import 
org.apache.hadoop.hbase.quotas.QuotaExceededException;
-056import 
org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
-057import 
org.apache.hadoop.hbase.regionserver.HStoreFile;
-058import 
org.apache.hadoop.hbase.regionserver.StoreFileInfo;
-059import 
org.apache.hadoop.hbase.util.Bytes;
-060import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-061import 
org.apache.hadoop.hbase.util.FSUtils;
-062import 
org.apache.hadoop.hbase.wal.WALSplitter;
-063import 
org.apache.yetus.audience.InterfaceAudience;
-064import org.slf4j.Logger;
-065import org.slf4j.LoggerFactory;
-066import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-067import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-068import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-069import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
-070import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsState;
-071
-072/**
-073 * The procedure to Merge a region in a 
table.
-074 * This procedure takes an exclusive 
table lock since it is working over multiple regions.
-075 * It holds the lock for the life of the 
procedure.
-076 * pThrows exception on 
construction if determines context hostile to merge (cluster going
-077 * down or master is shutting down or 
table is disabled)./p
-078 */
-079@InterfaceAudience.Private
-080public class MergeTableRegionsProcedure
-081extends 
AbstractStateMachineTableProcedureMergeTableRegionsState {
-082  private static final Logger LOG = 
LoggerFactory.getLogger(MergeTableRegionsProcedure.class);
-083  private Boolean traceEnabled;
-084  private volatile boolean lock = 
false;
-085  private ServerName regionLocation;
-086  private RegionInfo[] regionsToMerge;
-087  private RegionInfo mergedRegion;
-088  private boolean forcible;
-089
-090  public MergeTableRegionsProcedure() {
-091// Required by the Procedure 
framework to create the procedure on replay
-092  }
-093
-094  public MergeTableRegionsProcedure(final 
MasterProcedureEnv env,
-095  final RegionInfo regionToMergeA, 
final RegionInfo regionToMergeB) throws IOException {
-096this(env, regionToMergeA, 
regionToMergeB, false);
-097  }
-098
-099  public MergeTableRegionsProcedure(final 
MasterProcedureEnv env,
-100  final RegionInfo regionToMergeA, 
final RegionInfo regionToMergeB,
-101  final boolean forcible) throws 
IOException {
-102this(env, new RegionInfo[] 
{regionToMergeA, regionToMergeB}, forcible);
-103  }
-104
-105  public MergeTableRegionsProcedure(final 
MasterProcedureEnv env,
-106  final RegionInfo[] regionsToMerge, 
final boolean forcible)
-107  throws IOException {
-108super(env);
-109
-110// Check daughter regions and make 
sure that we have valid daughter regions
-111// before 

[01/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site b5f95ca57 -> bd675fa38


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ChunkCreator.ChunkType.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ChunkCreator.ChunkType.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ChunkCreator.ChunkType.html
index 93bb11a..6cd219e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ChunkCreator.ChunkType.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ChunkCreator.ChunkType.html
@@ -106,13 +106,13 @@
 098   float 
poolSizePercentage, float indexChunkSizePercentage,
 099   float 
initialCountPercentage,
 100   
HeapMemoryManager heapMemoryManager) {
-101this.dataChunksPool = 
initializePool(globalMemStoreSize,
+101this.dataChunksPool = 
initializePool("data", globalMemStoreSize,
 102(1 - 
indexChunkSizePercentage) * poolSizePercentage,
 103initialCountPercentage, 
chunkSize, heapMemoryManager);
 104// The index chunks pool is needed 
only when the index type is CCM.
 105// Since the pools are not created at 
all when the index type isn't CCM,
 106// we don't need to check it here.
-107this.indexChunksPool = 
initializePool(globalMemStoreSize,
+107this.indexChunksPool = 
initializePool("index", globalMemStoreSize,
 108indexChunkSizePercentage * 
poolSizePercentage,
 109initialCountPercentage, (int) 
(indexChunkSizePercentage * chunkSize),
 110heapMemoryManager);
@@ -339,313 +339,315 @@
 331private static final int 
statThreadPeriod = 60 * 5;
 332private final AtomicLong chunkCount = 
new AtomicLong();
 333private final LongAdder 
reusedChunkCount = new LongAdder();
-334
-335MemStoreChunkPool(int chunkSize, int 
maxCount, int initialCount, float poolSizePercentage) {
-336  this.chunkSize = chunkSize;
-337  this.maxCount = maxCount;
-338  this.poolSizePercentage = 
poolSizePercentage;
-339  this.reclaimedChunks = new 
LinkedBlockingQueue();
-340  for (int i = 0; i  
initialCount; i++) {
-341Chunk chunk = createChunk(true, 
CompactingMemStore.IndexType.ARRAY_MAP, chunkSize);
-342chunk.init();
-343reclaimedChunks.add(chunk);
-344  }
-345  chunkCount.set(initialCount);
-346  final String n = 
Thread.currentThread().getName();
-347  scheduleThreadPool = 
Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder()
-348  .setNameFormat(n + 
"-MemStoreChunkPool Statistics").setDaemon(true).build());
-349  
this.scheduleThreadPool.scheduleAtFixedRate(new StatisticsThread(), 
statThreadPeriod,
-350  statThreadPeriod, 
TimeUnit.SECONDS);
-351}
-352
-353/**
-354 * Poll a chunk from the pool, reset 
it if not null, else create a new chunk to return if we have
-355 * not yet created max allowed chunks 
count. When we have already created max allowed chunks and
-356 * no free chunks as of now, return 
null. It is the responsibility of the caller to make a chunk
-357 * then.
-358 * Note: Chunks returned by this pool 
must be put back to the pool after its use.
-359 * @return a chunk
-360 * @see #putbackChunks(Chunk)
-361 */
-362Chunk getChunk() {
-363  return 
getChunk(CompactingMemStore.IndexType.ARRAY_MAP);
-364}
-365
-366Chunk 
getChunk(CompactingMemStore.IndexType chunkIndexType) {
-367  Chunk chunk = 
reclaimedChunks.poll();
-368  if (chunk != null) {
-369chunk.reset();
-370reusedChunkCount.increment();
-371  } else {
-372// Make a chunk iff we have not 
yet created the maxCount chunks
-373while (true) {
-374  long created = 
this.chunkCount.get();
-375  if (created  this.maxCount) 
{
-376if 
(this.chunkCount.compareAndSet(created, created + 1)) {
-377  chunk = 
createChunkForPool(chunkIndexType, chunkSize);
-378  break;
-379}
-380  } else {
-381break;
-382  }
-383}
-384  }
-385  return chunk;
-386}
-387
-388int getChunkSize() {
-389  return chunkSize;
-390}
-391
-392/**
-393 * Add the chunks to the pool, when 
the pool achieves the max size, it will skip the remaining
-394 * chunks
-395 * @param c
-396 */
-397private void putbackChunks(Chunk c) 
{
-398  int toAdd = this.maxCount - 
reclaimedChunks.size();
-399  if (c.isFromPool()  
c.size == chunkSize  toAdd  0) {
-400reclaimedChunks.add(c);
-401  } else {
-402// remove the chunk (that is not 
going to pool)
-403// though it is initially from 
the pool or not
-404

[23/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.Retry.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.Retry.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.Retry.html
index c27b109..4160a88 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.Retry.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.Retry.html
@@ -105,7 +105,7 @@
 097try {
 098  done = waitUntilDone(startTime 
* 1000L + asyncProcess.primaryCallTimeoutMicroseconds);
 099} catch (InterruptedException ex) 
{
-100  LOG.error("Replica thread was 
interrupted - no replica calls: " + ex.getMessage());
+100  LOG.error("Replica thread 
interrupted - no replica calls {}", ex.getMessage());
 101  return;
 102}
 103  }
@@ -149,7 +149,7 @@
 141  if (loc == null) return;
 142  HRegionLocation[] locs = 
loc.getRegionLocations();
 143  if (locs.length == 1) {
-144LOG.warn("No replicas found for " 
+ action.getAction());
+144LOG.warn("No replicas found for 
{}", action.getAction());
 145return;
 146  }
 147  synchronized (replicaResultLock) 
{
@@ -230,8 +230,8 @@
 222  return;
 223} catch (Throwable t) {
 224  // This should not happen. 
Let's log  retry anyway.
-225  LOG.error("#" + asyncProcess.id 
+ ", Caught throwable while calling. This is unexpected." +
-226  " Retrying. Server is " + 
server + ", tableName=" + tableName, t);
+225  LOG.error("id=" + 
asyncProcess.id + ", caught throwable. Unexpected." +
+226  " Retrying. Server=" + 
server + ", tableName=" + tableName, t);
 227  
receiveGlobalFailure(multiAction, server, numAttempt, t);
 228  return;
 229}
@@ -247,1036 +247,1035 @@
 239}
 240  } catch (Throwable t) {
 241// Something really bad happened. 
We are on the send thread that will now die.
-242LOG.error("Internal AsyncProcess 
#" + asyncProcess.id + " error for "
-243+ tableName + " processing 
for " + server, t);
-244throw new RuntimeException(t);
-245  } finally {
-246
asyncProcess.decTaskCounters(multiAction.getRegions(), server);
-247if (callsInProgress != null 
 callable != null  res != null) {
-248  
callsInProgress.remove(callable);
-249}
-250  }
-251}
-252  }
-253
-254  private final 
Batch.CallbackCResult callback;
-255  private final BatchErrors errors;
-256  private final 
ConnectionImplementation.ServerErrorTracker errorsByServer;
-257  private final ExecutorService pool;
-258  private final 
SetCancellableRegionServerCallable callsInProgress;
+242LOG.error("id=" + asyncProcess.id 
+ " error for " + tableName + " processing " + server, t);
+243throw new RuntimeException(t);
+244  } finally {
+245
asyncProcess.decTaskCounters(multiAction.getRegions(), server);
+246if (callsInProgress != null 
 callable != null  res != null) {
+247  
callsInProgress.remove(callable);
+248}
+249  }
+250}
+251  }
+252
+253  private final 
Batch.CallbackCResult callback;
+254  private final BatchErrors errors;
+255  private final 
ConnectionImplementation.ServerErrorTracker errorsByServer;
+256  private final ExecutorService pool;
+257  private final 
SetCancellableRegionServerCallable callsInProgress;
+258
 259
-260
-261  private final TableName tableName;
-262  private final AtomicLong 
actionsInProgress = new AtomicLong(-1);
-263  /**
-264   * The lock controls access to results. 
It is only held when populating results where
-265   * there might be several callers 
(eventual consistency gets). For other requests,
-266   * there's one unique call going on per 
result index.
-267   */
-268  private final Object replicaResultLock 
= new Object();
-269  /**
-270   * Result array.  Null if results are 
not needed. Otherwise, each index corresponds to
-271   * the action index in initial actions 
submitted. For most request types, has null-s for
-272   * requests that are not done, and 
result/exception for those that are done.
-273   * For eventual-consistency gets, 
initially the same applies; at some point, replica calls
-274   * might be started, and 
ReplicaResultState is put at the corresponding indices. The
-275   * returning calls check the type to 
detect when this is the case. After all calls are done,
-276   * ReplicaResultState-s are replaced 
with results for the user.
-277   */
-278  private final Object[] results;
-279  /**
-280   * Indices of replica gets in results. 
If null, all or no actions are replica-gets.
-281   */
-282  private final int[] 
replicaGetIndices;
-283  private final boolean 

[2/2] hbase git commit: Revert "Revert "HBASE-19665 Add table based replication peers/queues storage back""

2018-03-17 Thread zhangduo
Revert "Revert "HBASE-19665 Add table based replication peers/queues storage 
back""

This reverts commit 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6d5f2937
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6d5f2937
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6d5f2937

Branch: refs/heads/HBASE-15867
Commit: 6d5f293784bf4cfd5ff74cf8dc9b16f6ff979798
Parents: 00095a2
Author: zhangduo 
Authored: Sat Mar 17 20:32:05 2018 +0800
Committer: zhangduo 
Committed: Sat Mar 17 20:32:05 2018 +0800

--
 .../replication/ReplicationPeerStorage.java |   3 +-
 .../replication/ReplicationStorageFactory.java  |  20 +-
 .../hbase/replication/ReplicationUtils.java |  13 +
 .../TableReplicationPeerStorage.java| 171 ++
 .../TableReplicationQueueStorage.java   | 522 +++
 .../TableReplicationStorageBase.java| 127 +
 .../replication/ZKReplicationPeerStorage.java   |  16 +-
 .../replication/ZKReplicationQueueStorage.java  |   6 +-
 .../replication/TestReplicationStateBasic.java  | 363 -
 .../replication/TestReplicationStateZKImpl.java |  95 
 .../TestZKReplicationPeerStorage.java   | 178 ---
 .../TestZKReplicationQueueStorage.java  | 252 -
 .../TestReplicationSourceManager.java   |   6 +-
 .../storage/TestReplicationStateBasic.java  | 370 +
 .../storage/TestReplicationStateTableImpl.java  | 129 +
 .../storage/TestReplicationStateZKImpl.java |  98 
 .../storage/TestZKReplicationPeerStorage.java   | 182 +++
 .../storage/TestZKReplicationQueueStorage.java  | 255 +
 18 files changed, 1899 insertions(+), 907 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6d5f2937/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java
index 1adda02..4684f08 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java
@@ -42,7 +42,8 @@ public interface ReplicationPeerStorage {
 
   /**
* Set the state of peer, {@code true} to {@code ENABLED}, otherwise to 
{@code DISABLED}.
-   * @throws ReplicationException if there are errors accessing the storage 
service.
+   * @throws ReplicationException if there are errors accessing the storage 
service or peer does not
+   *   exist.
*/
   void setPeerState(String peerId, boolean enabled) throws 
ReplicationException;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/6d5f2937/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java
index 462cfed..cbfec3b 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hbase.replication;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.util.ReflectionUtils;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.yetus.audience.InterfaceAudience;
 
@@ -29,6 +30,15 @@ import org.apache.yetus.audience.InterfaceAudience;
 @InterfaceAudience.Private
 public final class ReplicationStorageFactory {
 
+  public static final String REPLICATION_PEER_STORAGE_IMPL = 
"hbase.replication.peer.storage.impl";
+  public static final String DEFAULT_REPLICATION_PEER_STORAGE_IMPL =
+  ZKReplicationPeerStorage.class.getName();
+
+  public static final String REPLICATION_QUEUE_STORAGE_IMPL =
+  "hbase.replication.queue.storage.impl";
+  public static final String DEFAULT_REPLICATION_QUEUE_STORAGE_IMPL =
+  ZKReplicationQueueStorage.class.getName();
+
   private ReplicationStorageFactory() {
   }
 
@@ -36,7 +46,10 @@ public final class ReplicationStorageFactory {
* Create a new {@link ReplicationPeerStorage}.
*/
   public static ReplicationPeerStorage getReplicationPeerStorage(ZKWatcher zk, 
Configuration conf) {
-return new 

[1/2] hbase git commit: Revert "Revert "HBASE-19665 Add table based replication peers/queues storage back""

2018-03-17 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/HBASE-15867 [created] 6d5f29378


http://git-wip-us.apache.org/repos/asf/hbase/blob/6d5f2937/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
index 6d75fec..4a36e13 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
@@ -70,7 +70,7 @@ import org.apache.hadoop.hbase.replication.ReplicationPeers;
 import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
 import org.apache.hadoop.hbase.replication.ReplicationSourceDummy;
 import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
-import org.apache.hadoop.hbase.replication.ZKReplicationPeerStorage;
+import org.apache.hadoop.hbase.replication.ReplicationUtils;
 import 
org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.NodeFailoverWorker;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
@@ -170,9 +170,9 @@ public abstract class TestReplicationSourceManager {
 + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":/1"));
 ZKUtil.createWithParents(zkw, "/hbase/replication/peers/1/peer-state");
 ZKUtil.setData(zkw, "/hbase/replication/peers/1/peer-state",
-  ZKReplicationPeerStorage.ENABLED_ZNODE_BYTES);
+  ReplicationUtils.PEER_STATE_ENABLED_BYTES);
 ZKUtil.createWithParents(zkw, "/hbase/replication/state");
-ZKUtil.setData(zkw, "/hbase/replication/state", 
ZKReplicationPeerStorage.ENABLED_ZNODE_BYTES);
+ZKUtil.setData(zkw, "/hbase/replication/state", 
ReplicationUtils.PEER_STATE_ENABLED_BYTES);
 
 ZKClusterId.setClusterId(zkw, new ClusterId());
 FSUtils.setRootDir(utility.getConfiguration(), utility.getDataTestDir());

http://git-wip-us.apache.org/repos/asf/hbase/blob/6d5f2937/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/storage/TestReplicationStateBasic.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/storage/TestReplicationStateBasic.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/storage/TestReplicationStateBasic.java
new file mode 100644
index 000..461420e
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/storage/TestReplicationStateBasic.java
@@ -0,0 +1,370 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication.storage;
+
+import static org.hamcrest.CoreMatchers.hasItems;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.replication.ReplicationException;
+import org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState;
+import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
+import org.apache.hadoop.hbase.replication.ReplicationPeerImpl;
+import org.apache.hadoop.hbase.replication.ReplicationPeers;
+import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
+import org.apache.hadoop.hbase.replication.ReplicationUtils;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.zookeeper.ZKConfig;
+import org.apache.zookeeper.KeeperException;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap;

[1/2] hbase git commit: Revert "HBASE-19665 Add table based replication peers/queues storage back"

2018-03-17 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/master 104f58701 -> 00095a2ef


http://git-wip-us.apache.org/repos/asf/hbase/blob/00095a2e/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
index 4a36e13..6d75fec 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
@@ -70,7 +70,7 @@ import org.apache.hadoop.hbase.replication.ReplicationPeers;
 import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
 import org.apache.hadoop.hbase.replication.ReplicationSourceDummy;
 import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
-import org.apache.hadoop.hbase.replication.ReplicationUtils;
+import org.apache.hadoop.hbase.replication.ZKReplicationPeerStorage;
 import 
org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.NodeFailoverWorker;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
@@ -170,9 +170,9 @@ public abstract class TestReplicationSourceManager {
 + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":/1"));
 ZKUtil.createWithParents(zkw, "/hbase/replication/peers/1/peer-state");
 ZKUtil.setData(zkw, "/hbase/replication/peers/1/peer-state",
-  ReplicationUtils.PEER_STATE_ENABLED_BYTES);
+  ZKReplicationPeerStorage.ENABLED_ZNODE_BYTES);
 ZKUtil.createWithParents(zkw, "/hbase/replication/state");
-ZKUtil.setData(zkw, "/hbase/replication/state", 
ReplicationUtils.PEER_STATE_ENABLED_BYTES);
+ZKUtil.setData(zkw, "/hbase/replication/state", 
ZKReplicationPeerStorage.ENABLED_ZNODE_BYTES);
 
 ZKClusterId.setClusterId(zkw, new ClusterId());
 FSUtils.setRootDir(utility.getConfiguration(), utility.getDataTestDir());

http://git-wip-us.apache.org/repos/asf/hbase/blob/00095a2e/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/storage/TestReplicationStateBasic.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/storage/TestReplicationStateBasic.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/storage/TestReplicationStateBasic.java
deleted file mode 100644
index 461420e..000
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/storage/TestReplicationStateBasic.java
+++ /dev/null
@@ -1,370 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.replication.storage;
-
-import static org.hamcrest.CoreMatchers.hasItems;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.replication.ReplicationException;
-import org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState;
-import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-import org.apache.hadoop.hbase.replication.ReplicationPeerImpl;
-import org.apache.hadoop.hbase.replication.ReplicationPeers;
-import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
-import org.apache.hadoop.hbase.replication.ReplicationUtils;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.zookeeper.ZKConfig;
-import org.apache.zookeeper.KeeperException;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import 

[2/2] hbase git commit: Revert "HBASE-19665 Add table based replication peers/queues storage back"

2018-03-17 Thread zhangduo
Revert "HBASE-19665 Add table based replication peers/queues storage back"

This reverts commit 31978c31bbf363d98c50cc6b293105a085888471.

 Conflicts:

hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/TableReplicationStorageBase.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/00095a2e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/00095a2e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/00095a2e

Branch: refs/heads/master
Commit: 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8
Parents: 104f587
Author: zhangduo 
Authored: Sat Mar 17 20:25:27 2018 +0800
Committer: zhangduo 
Committed: Sat Mar 17 20:25:27 2018 +0800

--
 .../replication/ReplicationPeerStorage.java |   3 +-
 .../replication/ReplicationStorageFactory.java  |  20 +-
 .../hbase/replication/ReplicationUtils.java |  13 -
 .../TableReplicationPeerStorage.java| 171 --
 .../TableReplicationQueueStorage.java   | 522 ---
 .../TableReplicationStorageBase.java| 127 -
 .../replication/ZKReplicationPeerStorage.java   |  16 +-
 .../replication/ZKReplicationQueueStorage.java  |   6 +-
 .../replication/TestReplicationStateBasic.java  | 363 +
 .../replication/TestReplicationStateZKImpl.java |  95 
 .../TestZKReplicationPeerStorage.java   | 178 +++
 .../TestZKReplicationQueueStorage.java  | 252 +
 .../TestReplicationSourceManager.java   |   6 +-
 .../storage/TestReplicationStateBasic.java  | 370 -
 .../storage/TestReplicationStateTableImpl.java  | 129 -
 .../storage/TestReplicationStateZKImpl.java |  98 
 .../storage/TestZKReplicationPeerStorage.java   | 182 ---
 .../storage/TestZKReplicationQueueStorage.java  | 255 -
 18 files changed, 907 insertions(+), 1899 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/00095a2e/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java
index 4684f08..1adda02 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java
@@ -42,8 +42,7 @@ public interface ReplicationPeerStorage {
 
   /**
* Set the state of peer, {@code true} to {@code ENABLED}, otherwise to 
{@code DISABLED}.
-   * @throws ReplicationException if there are errors accessing the storage 
service or peer does not
-   *   exist.
+   * @throws ReplicationException if there are errors accessing the storage 
service.
*/
   void setPeerState(String peerId, boolean enabled) throws 
ReplicationException;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/00095a2e/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java
--
diff --git 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java
 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java
index cbfec3b..462cfed 100644
--- 
a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java
+++ 
b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.hbase.replication;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.util.ReflectionUtils;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.yetus.audience.InterfaceAudience;
 
@@ -30,15 +29,6 @@ import org.apache.yetus.audience.InterfaceAudience;
 @InterfaceAudience.Private
 public final class ReplicationStorageFactory {
 
-  public static final String REPLICATION_PEER_STORAGE_IMPL = 
"hbase.replication.peer.storage.impl";
-  public static final String DEFAULT_REPLICATION_PEER_STORAGE_IMPL =
-  ZKReplicationPeerStorage.class.getName();
-
-  public static final String REPLICATION_QUEUE_STORAGE_IMPL =
-  "hbase.replication.queue.storage.impl";
-  public static final String DEFAULT_REPLICATION_QUEUE_STORAGE_IMPL =
-  ZKReplicationQueueStorage.class.getName();
-
   private ReplicationStorageFactory() {
   }
 
@@ -46,10 +36,7 @@ public final class ReplicationStorageFactory {
* Create a new {@link ReplicationPeerStorage}.
*/
   public static