[17/29] hbase-site git commit: Published site at 44dec60054d1c45880d591c74a023f7a534e6d73.

2018-12-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/70f4ddbc/testdevapidocs/org/apache/hadoop/hbase/IntegrationTestingUtility.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/IntegrationTestingUtility.html 
b/testdevapidocs/org/apache/hadoop/hbase/IntegrationTestingUtility.html
index 5374248..e307739 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/IntegrationTestingUtility.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/IntegrationTestingUtility.html
@@ -285,7 +285,7 @@ extends 
 
 Methods inherited from classorg.apache.hadoop.hbase.HBaseTestingUtility
-assertKVListsEqual,
 assertRegionOnlyOnServer,
 assertRegionOnServer,
 assignRegion,
 available,
 checksumRows,
 cleanupDataTestDirOnTestFS,
 cleanupDataTestDirOnTestFS,
 closeRegionAndWAL,
 closeRegionAndWAL,
 compact,
 compact,
 countRows,
 countRows,
 countRows,
 countRows,
 countRows,
 countRows,
 countRows,
 createLocalHRegion,
 createLocalHRegion,
 createLocalHRegion,
 createLocalHRegion,
 createLocalHRegio
 n, createLocalHRegionWithInMemoryFlags,
 createLocalHTU,
 createLocalHTU,
 createMockRegionServerService,
 createMockRegionServerService,
 createMockRegionServerService, createMultiRegionsInMeta,
 createMultiRegionsInMeta,
 createMultiRegionTable,
 createMultiRegionTable,
 createMultiRegionTable,
 createMultiRegionTable,
 createPreSplitLoadTestTable,
 createPreSplitLoadTestTable,
 createPreSplitLoadTestTable, createPreSplitLoadTestTable,
 createPreSplitLoadTestTable,
 createPreS
 plitLoadTestTable, createPreSplitLoadTestTable,
 createRandomTable,
 createRegionAndWAL,
 createRegionAndWAL,
 createRegionAndWAL,
 createRegionAndWAL,
 createRootDir,
 createRootDir,
 createTable,
 createTable,
 createTable,
 createTable,
 createTable,
 createTable, createTable,
 createTable,
 createTable,
 createTable,
 createTable,
 createTable, createTable,
 createTable,
 createTable,
 createTable,
 createTable,
 createTable,
 createTable,
 createTableDescriptor,
 createTableDescriptor,
 createTableDescriptor,
 createTableDescriptor,
 createTableDescriptor,
 createTableDescriptor,
 createTestRegion,
 createTestRegion,
 createWal,
 createWALRootDir,
 deleteNumericRows, deleteTable,
 deleteTableData,
 deleteTableIfAny,
 enableDebug,
 ensureSomeNonStoppedRegionServersAvailable,
 ensureSomeRegionServersAvailable,
 expireMasterS
 ession, expireRegionServerSession,
 expireSession,
 expireSession,
 explainTableAvailability,
 explainTableState,
 findLastTableState,
 flush, flush,
 generateColumnDescriptors,
 generateColumnDescriptors,
 getAdmin,
 getAllOnlineRegions,
 getClosestRowBefore,
 getClusterKey,
 getConfiguration,
 getConnection,
 getDataTestDirOnTestFS,
 getDataTestDirOnTestFS,
 getDefaultRootDirPath,
 getDefaultRootDirPath,
 getDFSCluster,
 getDifferentUser,
 getFromStoreFile,
 getFromStoreFile,
 getHBaseAdmin,
 getHBaseCluster,
 getHBaseClusterInterface,
 getHbck,
 getMetaRSPort,
 getMetaTableDescriptor, 
getMetaTableDescriptorBuilder,
 getMetaTableRows,
 getMetaTableRows,
 getMiniHBaseCluster,
 getNumHFiles,
 getNumHFilesForRS,
 getOtherRegionServer,
 getRegionSplitStartKeys,
 getRSForFirstRegionInTable,
 getSplittableRegion,
 getSupportedCompressionAlgorithms,
 getTestFileSystem,
 isReadShortCircuitOn,
 killMiniHBaseCluster,
 loadNumericRows,
 loadRandomRows,
 loadRegion,
 loadRegion,
 loadRegion,
 loadTable<
 /a>, loadTable,
 loadTable,
 loadTable,
 loadTable,
 memStoreTSTagsAndOffheapCombination,
 modifyTableSync,
 moveRegionAndWait,
 predicateNoRegionsInTransition,
 predicateTableAvailable,
 predicateTableDisabled,
 predicateTableEnabled,
 randomFreePort,
 randomMultiCastAddress,
 restartHBaseCluster,
 restartHBaseCluster,
 safeGetAsStr,
 setDFSCluster,
 setDFSCluster,
 setFileSystemURI,
 setHBaseCluster,
 setMaxRecoveryErrorCount,
 setReplicas,
 setupDataTestDir,
 setupMiniKdc,
 shutdownMiniCluster,
 shutdownMiniDFSCluster,
 shutdownMiniHBaseCluster,
 shutdownMiniMapReduceCluster
 , startMiniCluster,
 startMiniCluster,
 startMiniCluster,
 startMiniCluster,
 startMiniCluster,
 startMiniCluster,
 startMiniCluster,
 startMiniCluster,
 startMiniCluster,
 startMiniCluster,
 startMiniCluster,
 startMiniCluster,
 startMiniCluster,
 startMiniCluster,
 startMiniDFSCluster, startMiniDFSCluster,
 startMiniDFSCluster,
 startMiniDFSCluster,
 startMiniDFSClusterForTestWAL,
 startMiniHBaseCluster,
 startMiniHBaseCluster,
 startMiniHBaseCluster, startMiniHBaseCluster,
 startMiniHBaseCluster,
 startMiniMapReduceCluster,
 truncateTable,
 truncateTable,
 unassignRegion,
 unassignRegion, unassignRegionByRow,
 unassignRegionByRow,
 verifyNumericRows,
 verifyNumericRows,
 verifyNumericRows,
 verifyNumericRows,
 verifyNumericRows,
 verifyTableDescriptorIgnoreTableName,
 waitForHostPort,
 waitLabelAvailable,
 waitTableAvailable,
 waitTableAvailable,
 waitTableAvailable,
 waitTableDisabled,
 waitTableDisabled,
 waitTableDisabled,
 waitTableEnabled,
 waitTableEnabled,
 waitTableEnabled, 

[17/29] hbase-site git commit: Published site at 12786f80c14c6f2c3c111a55bbf431fb2e81e828.

2018-12-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/13ae5225/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.Type.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.Type.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.Type.html
index 6428b67..65c197f 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.Type.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.Type.html
@@ -260,2307 +260,2317 @@
 252}
 253
 254/**
-255 * Cannot rely on enum ordinals . 
They change if item is removed or moved.
-256 * Do our own codes.
-257 * @param b
-258 * @return Type associated with 
passed code.
-259 */
-260public static Type codeToType(final 
byte b) {
-261  Type t = codeArray[b  0xff];
-262  if (t != null) {
-263return t;
-264  }
-265  throw new RuntimeException("Unknown 
code " + b);
-266}
-267  }
-268
-269  /**
-270   * Lowest possible key.
-271   * Makes a Key with highest possible 
Timestamp, empty row and column.  No
-272   * key can be equal or lower than this 
one in memstore or in store file.
-273   */
-274  public static final KeyValue LOWESTKEY 
=
-275new 
KeyValue(HConstants.EMPTY_BYTE_ARRAY, HConstants.LATEST_TIMESTAMP);
-276
-277  
-278  // KeyValue core instance fields.
-279  protected byte [] bytes = null;  // an 
immutable byte array that contains the KV
-280  protected int offset = 0;  // offset 
into bytes buffer KV starts at
-281  protected int length = 0;  // length of 
the KV starting from offset.
-282
-283  /** Here be dragons **/
-284
-285  /**
-286   * used to achieve atomic operations in 
the memstore.
-287   */
-288  @Override
-289  public long getSequenceId() {
-290return seqId;
-291  }
-292
-293  @Override
-294  public void setSequenceId(long seqId) 
{
-295this.seqId = seqId;
-296  }
-297
-298  // multi-version concurrency control 
version.  default value is 0, aka do not care.
-299  private long seqId = 0;
-300
-301  /** Dragon time over, return to normal 
business */
-302
-303
-304  /** Writable Constructor -- DO NOT USE 
*/
-305  public KeyValue() {}
+255 * True to indicate that the byte b 
is a valid type.
+256 * @param b byte to check
+257 * @return true or false
+258 */
+259static boolean isValidType(byte b) 
{
+260  return codeArray[b  0xff] != 
null;
+261}
+262
+263/**
+264 * Cannot rely on enum ordinals . 
They change if item is removed or moved.
+265 * Do our own codes.
+266 * @param b
+267 * @return Type associated with 
passed code.
+268 */
+269public static Type codeToType(final 
byte b) {
+270  Type t = codeArray[b  0xff];
+271  if (t != null) {
+272return t;
+273  }
+274  throw new RuntimeException("Unknown 
code " + b);
+275}
+276  }
+277
+278  /**
+279   * Lowest possible key.
+280   * Makes a Key with highest possible 
Timestamp, empty row and column.  No
+281   * key can be equal or lower than this 
one in memstore or in store file.
+282   */
+283  public static final KeyValue LOWESTKEY 
=
+284new 
KeyValue(HConstants.EMPTY_BYTE_ARRAY, HConstants.LATEST_TIMESTAMP);
+285
+286  
+287  // KeyValue core instance fields.
+288  protected byte [] bytes = null;  // an 
immutable byte array that contains the KV
+289  protected int offset = 0;  // offset 
into bytes buffer KV starts at
+290  protected int length = 0;  // length of 
the KV starting from offset.
+291
+292  /** Here be dragons **/
+293
+294  /**
+295   * used to achieve atomic operations in 
the memstore.
+296   */
+297  @Override
+298  public long getSequenceId() {
+299return seqId;
+300  }
+301
+302  @Override
+303  public void setSequenceId(long seqId) 
{
+304this.seqId = seqId;
+305  }
 306
-307  /**
-308   * Creates a KeyValue from the start of 
the specified byte array.
-309   * Presumes 
codebytes/code content is formatted as a KeyValue blob.
-310   * @param bytes byte array
-311   */
-312  public KeyValue(final byte [] bytes) 
{
-313this(bytes, 0);
-314  }
+307  // multi-version concurrency control 
version.  default value is 0, aka do not care.
+308  private long seqId = 0;
+309
+310  /** Dragon time over, return to normal 
business */
+311
+312
+313  /** Writable Constructor -- DO NOT USE 
*/
+314  public KeyValue() {}
 315
 316  /**
-317   * Creates a KeyValue from the 
specified byte array and offset.
-318   * Presumes 
codebytes/code content starting at 
codeoffset/code is
-319   * formatted as a KeyValue blob.
-320   * @param bytes byte array
-321   * @param offset offset to start of 
KeyValue
-322   */
-323  public KeyValue(final byte [] bytes, 
final int offset) {
-324this(bytes, offset, getLength(bytes, 
offset));
-325  }
-326
-327  /**
-328   * Creates a KeyValue from the 
specified byte array, starting at offset, and
-329   * for length 
codelength/code.
-330   * @param bytes byte 

[17/29] hbase-site git commit: Published site at 79d90c87b5bc6d4aa50e6edc52a3f20da708ee29.

2018-12-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3defc75b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
index 7cdc8d2..fd8ff3b 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
@@ -2639,1308 +2639,1307 @@
 2631.submitProcedure(new 
MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
 2632  @Override
 2633  protected void run() throws 
IOException {
-2634TableDescriptor 
newDescriptor = newDescriptorGetter.get();
-2635
sanityCheckTableDescriptor(newDescriptor);
-2636TableDescriptor 
oldDescriptor = getMaster().getTableDescriptors().get(tableName);
-2637
getMaster().getMasterCoprocessorHost().preModifyTable(tableName, 
oldDescriptor,
-2638  newDescriptor);
-2639
-2640
LOG.info(getClientIdAuditPrefix() + " modify " + tableName);
-2641
-2642// Execute the operation 
synchronously - wait for the operation completes before
-2643// continuing.
-2644//
-2645// We need to wait for the 
procedure to potentially fail due to "prepare" sanity
-2646// checks. This will block 
only the beginning of the procedure. See HBASE-19953.
-2647ProcedurePrepareLatch latch 
= ProcedurePrepareLatch.createBlockingLatch();
-2648submitProcedure(
-2649  new 
ModifyTableProcedure(procedureExecutor.getEnvironment(), newDescriptor, 
latch));
-2650latch.await();
-2651
-2652
getMaster().getMasterCoprocessorHost().postModifyTable(tableName, 
oldDescriptor,
-2653  newDescriptor);
-2654  }
-2655
-2656  @Override
-2657  protected String 
getDescription() {
-2658return 
"ModifyTableProcedure";
-2659  }
-2660});
-2661
-2662  }
-2663
-2664  @Override
-2665  public long modifyTable(final 
TableName tableName, final TableDescriptor newDescriptor,
-2666  final long nonceGroup, final long 
nonce) throws IOException {
-2667checkInitialized();
-2668return modifyTable(tableName, new 
TableDescriptorGetter() {
-2669  @Override
-2670  public TableDescriptor get() 
throws IOException {
-2671return newDescriptor;
-2672  }
-2673}, nonceGroup, nonce);
-2674
-2675  }
-2676
-2677  public long restoreSnapshot(final 
SnapshotDescription snapshotDesc,
-2678  final long nonceGroup, final long 
nonce, final boolean restoreAcl) throws IOException {
-2679checkInitialized();
-2680
getSnapshotManager().checkSnapshotSupport();
-2681
-2682// Ensure namespace exists. Will 
throw exception if non-known NS.
-2683final TableName dstTable = 
TableName.valueOf(snapshotDesc.getTable());
-2684
getClusterSchema().getNamespace(dstTable.getNamespaceAsString());
-2685
-2686return 
MasterProcedureUtil.submitProcedure(
-2687new 
MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
-2688  @Override
-2689  protected void run() throws 
IOException {
-2690  setProcId(
-2691
getSnapshotManager().restoreOrCloneSnapshot(snapshotDesc, getNonceKey(), 
restoreAcl));
-2692  }
-2693
-2694  @Override
-2695  protected String getDescription() 
{
-2696return 
"RestoreSnapshotProcedure";
-2697  }
-2698});
-2699  }
-2700
-2701  private void checkTableExists(final 
TableName tableName)
-2702  throws IOException, 
TableNotFoundException {
-2703if 
(!MetaTableAccessor.tableExists(getConnection(), tableName)) {
-2704  throw new 
TableNotFoundException(tableName);
-2705}
-2706  }
-2707
-2708  @Override
-2709  public void checkTableModifiable(final 
TableName tableName)
-2710  throws IOException, 
TableNotFoundException, TableNotDisabledException {
-2711if (isCatalogTable(tableName)) {
-2712  throw new IOException("Can't 
modify catalog tables");
-2713}
-2714checkTableExists(tableName);
-2715TableState ts = 
getTableStateManager().getTableState(tableName);
-2716if (!ts.isDisabled()) {
-2717  throw new 
TableNotDisabledException("Not DISABLED; " + ts);
-2718}
-2719  }
-2720
-2721  public ClusterMetrics 
getClusterMetricsWithoutCoprocessor() throws InterruptedIOException {
-2722return 
getClusterMetricsWithoutCoprocessor(EnumSet.allOf(Option.class));
-2723  }
-2724
-2725  public ClusterMetrics 
getClusterMetricsWithoutCoprocessor(EnumSetOption options)
-2726  throws InterruptedIOException {
-2727ClusterMetricsBuilder builder = 
ClusterMetricsBuilder.newBuilder();
-2728// given that hbase1 can't submit 
the request with Option,
-2729// we return all information to 
client if the 

[17/29] hbase-site git commit: Published site at 640a5e390b525e1c42f3c46bcc5acc59786900f0.

2018-11-14 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2bd8387f/testdevapidocs/src-html/org/apache/hadoop/hbase/backup/TestBackupBase.FullTableBackupClientForTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/backup/TestBackupBase.FullTableBackupClientForTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/backup/TestBackupBase.FullTableBackupClientForTest.html
index ca790a5..9f82430 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/backup/TestBackupBase.FullTableBackupClientForTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/backup/TestBackupBase.FullTableBackupClientForTest.html
@@ -72,444 +72,449 @@
 064import 
org.apache.hadoop.hbase.security.access.SecureTestUtil;
 065import 
org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
 066import 
org.apache.hadoop.hbase.util.Bytes;
-067import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-068import 
org.apache.hadoop.hbase.wal.WALFactory;
-069import org.junit.AfterClass;
-070import org.junit.Before;
-071import org.slf4j.Logger;
-072import org.slf4j.LoggerFactory;
-073
-074/**
-075 * This class is only a base for other 
integration-level backup tests. Do not add tests here.
-076 * TestBackupSmallTests is where tests 
that don't require bring machines up/down should go All other
-077 * tests should have their own classes 
and extend this one
-078 */
-079public class TestBackupBase {
-080  private static final Logger LOG = 
LoggerFactory.getLogger(TestBackupBase.class);
-081
-082  protected static HBaseTestingUtility 
TEST_UTIL = new HBaseTestingUtility();
-083  protected static HBaseTestingUtility 
TEST_UTIL2;
-084  protected static Configuration conf1 = 
TEST_UTIL.getConfiguration();
-085  protected static Configuration conf2;
-086
-087  protected static TableName table1 = 
TableName.valueOf("table1");
-088  protected static HTableDescriptor 
table1Desc;
-089  protected static TableName table2 = 
TableName.valueOf("table2");
-090  protected static TableName table3 = 
TableName.valueOf("table3");
-091  protected static TableName table4 = 
TableName.valueOf("table4");
-092
-093  protected static TableName 
table1_restore = TableName.valueOf("default:table1");
-094  protected static TableName 
table2_restore = TableName.valueOf("ns2:table2");
-095  protected static TableName 
table3_restore = TableName.valueOf("ns3:table3_restore");
-096  protected static TableName 
table4_restore = TableName.valueOf("ns4:table4_restore");
-097
-098  protected static final int 
NB_ROWS_IN_BATCH = 99;
-099  protected static final byte[] qualName 
= Bytes.toBytes("q1");
-100  protected static final byte[] famName = 
Bytes.toBytes("f");
-101
-102  protected static String BACKUP_ROOT_DIR 
= Path.SEPARATOR +"backupUT";
-103  protected static String 
BACKUP_REMOTE_ROOT_DIR = Path.SEPARATOR + "backupUT";
-104  protected static String provider = 
"defaultProvider";
-105  protected static boolean secure = 
false;
-106
-107  protected static boolean 
autoRestoreOnFailure = true;
-108  protected static boolean setupIsDone = 
false;
-109  protected static boolean 
useSecondCluster = false;
-110
-111  static class 
IncrementalTableBackupClientForTest extends IncrementalTableBackupClient {
-112public 
IncrementalTableBackupClientForTest() {
-113}
-114
-115public 
IncrementalTableBackupClientForTest(Connection conn,
-116String backupId, BackupRequest 
request) throws IOException {
-117  super(conn, backupId, request);
-118}
-119
-120@Override
-121public void execute() throws 
IOException {
-122  // case INCREMENTAL_COPY:
-123  try {
-124// case PREPARE_INCREMENTAL:
-125failStageIf(Stage.stage_0);
-126beginBackup(backupManager, 
backupInfo);
-127
-128failStageIf(Stage.stage_1);
-129
backupInfo.setPhase(BackupPhase.PREPARE_INCREMENTAL);
-130LOG.debug("For incremental 
backup, current table set is "
-131+ 
backupManager.getIncrementalBackupTableSet());
-132newTimestamps = 
((IncrementalBackupManager) backupManager).getIncrBackupLogFileMap();
-133// copy out the table and region 
info files for each table
-134
BackupUtils.copyTableRegionInfo(conn, backupInfo, conf);
-135// convert WAL to HFiles and copy 
them to .tmp under BACKUP_ROOT
-136convertWALsToHFiles();
-137incrementalCopyHFiles(new 
String[] {getBulkOutputDir().toString()},
-138  
backupInfo.getBackupRootDir());
-139failStageIf(Stage.stage_2);
-140// Save list of WAL files 
copied
-141
backupManager.recordWALFiles(backupInfo.getIncrBackupFileList());
-142
-143// case INCR_BACKUP_COMPLETE:
-144// set overall backup status: 
complete. Here we make sure to complete the backup.
-145// After this checkpoint, even if 
entering cancel process, will let the backup finished
-146// Set the 

[17/29] hbase-site git commit: Published site at 5e84997f2ffdbcf5f849d70c30ddbe2db4039ca4.

2018-11-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27f5bfb5/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.ProcedureExecutorListener.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.ProcedureExecutorListener.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.ProcedureExecutorListener.html
index dd0dd0f..9fd7e0b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.ProcedureExecutorListener.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.ProcedureExecutorListener.html
@@ -975,1128 +975,1136 @@
 967store.update(procedure);
 968  }
 969
-970  // If we don't have the lock, we 
can't re-submit the queue,
-971  // since it is already executing. 
To get rid of the stuck situation, we
-972  // need to restart the master. With 
the procedure set to bypass, the procedureExecutor
-973  // will bypass it and won't get 
stuck again.
-974  if (lockEntry != null) {
-975// add the procedure to run 
queue,
-976scheduler.addFront(procedure);
-977LOG.debug("Bypassing {} and its 
ancestors successfully, adding to queue", procedure);
-978  } else {
-979LOG.debug("Bypassing {} and its 
ancestors successfully, but since it is already running, "
-980+ "skipping add to queue", 
procedure);
-981  }
-982  return true;
-983
-984} finally {
-985  if (lockEntry != null) {
-986
procExecutionLock.releaseLockEntry(lockEntry);
-987  }
-988}
-989  }
-990
-991  /**
-992   * Add a new root-procedure to the 
executor.
-993   * @param proc the new procedure to 
execute.
-994   * @param nonceKey the registered 
unique identifier for this operation from the client or process.
-995   * @return the procedure id, that can 
be used to monitor the operation
-996   */
-997  
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH",
-998  justification = "FindBugs is blind 
to the check-for-null")
-999  public long 
submitProcedure(ProcedureTEnvironment proc, NonceKey nonceKey) {
-1000
Preconditions.checkArgument(lastProcId.get() = 0);
-1001
-1002prepareProcedure(proc);
-1003
-1004final Long currentProcId;
-1005if (nonceKey != null) {
-1006  currentProcId = 
nonceKeysToProcIdsMap.get(nonceKey);
-1007  
Preconditions.checkArgument(currentProcId != null,
-1008"Expected nonceKey=" + nonceKey 
+ " to be reserved, use registerNonce(); proc=" + proc);
-1009} else {
-1010  currentProcId = nextProcId();
-1011}
-1012
-1013// Initialize the procedure
-1014proc.setNonceKey(nonceKey);
-1015
proc.setProcId(currentProcId.longValue());
-1016
-1017// Commit the transaction
-1018store.insert(proc, null);
-1019LOG.debug("Stored {}", proc);
+970  // If state of procedure is 
WAITING_TIMEOUT, we can directly submit it to the scheduler.
+971  // Instead we should remove it from 
timeout Executor queue and tranfer its state to RUNNABLE
+972  if (procedure.getState() == 
ProcedureState.WAITING_TIMEOUT) {
+973LOG.debug("transform procedure {} 
from WAITING_TIMEOUT to RUNNABLE", procedure);
+974if 
(timeoutExecutor.remove(procedure)) {
+975  LOG.debug("removed procedure {} 
from timeoutExecutor", procedure);
+976  
timeoutExecutor.executeTimedoutProcedure(procedure);
+977}
+978  } else if (lockEntry != null) {
+979scheduler.addFront(procedure);
+980LOG.debug("Bypassing {} and its 
ancestors successfully, adding to queue", procedure);
+981  } else {
+982// If we don't have the lock, we 
can't re-submit the queue,
+983// since it is already executing. 
To get rid of the stuck situation, we
+984// need to restart the master. 
With the procedure set to bypass, the procedureExecutor
+985// will bypass it and won't get 
stuck again.
+986LOG.debug("Bypassing {} and its 
ancestors successfully, but since it is already running, "
+987+ "skipping add to queue",
+988  procedure);
+989  }
+990  return true;
+991
+992} finally {
+993  if (lockEntry != null) {
+994
procExecutionLock.releaseLockEntry(lockEntry);
+995  }
+996}
+997  }
+998
+999  /**
+1000   * Add a new root-procedure to the 
executor.
+1001   * @param proc the new procedure to 
execute.
+1002   * @param nonceKey the registered 
unique identifier for this operation from the client or process.
+1003   * @return the procedure id, that can 
be used to monitor the operation
+1004   */
+1005  
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH",
+1006  justification = "FindBugs is blind 
to the check-for-null")
+1007  public long 
submitProcedure(ProcedureTEnvironment proc, NonceKey nonceKey) {

[17/29] hbase-site git commit: Published site at 2997b6d0714d5542784baf830e7c16a9ef6b62d6.

2018-07-14 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/19303896/devapidocs/org/apache/hadoop/hbase/rest/model/ScannerModel.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/rest/model/ScannerModel.html 
b/devapidocs/org/apache/hadoop/hbase/rest/model/ScannerModel.html
index 6123e57..b066537 100644
--- a/devapidocs/org/apache/hadoop/hbase/rest/model/ScannerModel.html
+++ b/devapidocs/org/apache/hadoop/hbase/rest/model/ScannerModel.html
@@ -564,7 +564,7 @@ implements 
 
 COLUMN_DIVIDER
-private static finalbyte[] COLUMN_DIVIDER
+private static finalbyte[] COLUMN_DIVIDER
 
 
 
@@ -581,7 +581,7 @@ implements 
 
 ScannerModel
-publicScannerModel()
+publicScannerModel()
 Default constructor
 
 
@@ -591,7 +591,7 @@ implements 
 
 ScannerModel
-publicScannerModel(byte[]startRow,
+publicScannerModel(byte[]startRow,
 byte[]endRow,
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listbyte[]columns,
 intbatch,
@@ -620,7 +620,7 @@ implements 
 
 ScannerModel
-publicScannerModel(byte[]startRow,
+publicScannerModel(byte[]startRow,
 byte[]endRow,
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listbyte[]columns,
 intbatch,
@@ -658,7 +658,7 @@ implements 
 
 getJasonProvider
-private 
staticcom.fasterxml.jackson.jaxrs.json.JacksonJaxbJsonProvidergetJasonProvider()
+private 
staticcom.fasterxml.jackson.jaxrs.json.JacksonJaxbJsonProvidergetJasonProvider()
 Get the JacksonJaxbJsonProvider instance;
 
 Returns:
@@ -672,7 +672,7 @@ implements 
 
 buildFilter
-public staticFilterbuildFilter(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Strings)
+public staticFilterbuildFilter(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Strings)
   throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Parameters:
@@ -690,7 +690,7 @@ implements 
 
 stringifyFilter
-public statichttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringstringifyFilter(Filterfilter)
+public statichttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringstringifyFilter(Filterfilter)
   throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Parameters:
@@ -708,7 +708,7 @@ implements 
 
 fromScan
-public staticScannerModelfromScan(Scanscan)
+public staticScannerModelfromScan(Scanscan)
  throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Parameters:
@@ -724,7 +724,7 @@ implements 
 
 addColumn
-publicvoidaddColumn(byte[]column)
+publicvoidaddColumn(byte[]column)
 Add a column to the column set
 
 Parameters:
@@ -738,7 +738,7 @@ implements 
 
 addLabel
-publicvoidaddLabel(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringlabel)
+publicvoidaddLabel(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringlabel)
 Add a visibility label to the scan
 
 
@@ -748,7 +748,7 @@ implements 
 
 hasStartRow
-publicbooleanhasStartRow()
+publicbooleanhasStartRow()
 
 Returns:
 true if a start row was specified
@@ -761,7 +761,7 @@ implements 
 
 getStartRow
-publicbyte[]getStartRow()
+publicbyte[]getStartRow()
 
 Returns:
 start row
@@ -774,7 +774,7 @@ implements 
 
 hasEndRow
-publicbooleanhasEndRow()
+publicbooleanhasEndRow()
 
 Returns:
 true if an end row was specified
@@ -787,7 +787,7 @@ implements 
 
 getEndRow
-publicbyte[]getEndRow()
+publicbyte[]getEndRow()
 
 Returns:
 end row
@@ -800,7 +800,7 @@ implements 
 
 getColumns
-publichttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listbyte[]getColumns()
+publichttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listbyte[]getColumns()
 
 Returns:
 list of columns of interest in column:qualifier format, or empty for 
all
@@ -813,7 +813,7 @@ implements 
 
 getLabels
-publichttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 

[17/29] hbase-site git commit: Published site at d7561cee50acf2e3a52b8a38c71259d60b653ed3.

2018-07-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbe3a233/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/util/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
index 3eda6cc..b511f93 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
@@ -533,14 +533,14 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.util.Bytes.LexicographicalComparerHolder.PureJavaComparer
 (implements org.apache.hadoop.hbase.util.Bytes.ComparerT)
 org.apache.hadoop.hbase.util.PoolMap.PoolType
-org.apache.hadoop.hbase.util.IdReadWriteLock.ReferenceType
-org.apache.hadoop.hbase.util.ChecksumType
-org.apache.hadoop.hbase.util.Bytes.LexicographicalComparerHolder.UnsafeComparer
 (implements org.apache.hadoop.hbase.util.Bytes.ComparerT)
 org.apache.hadoop.hbase.util.Order
 org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE
+org.apache.hadoop.hbase.util.ChecksumType
+org.apache.hadoop.hbase.util.Bytes.LexicographicalComparerHolder.PureJavaComparer
 (implements org.apache.hadoop.hbase.util.Bytes.ComparerT)
+org.apache.hadoop.hbase.util.Bytes.LexicographicalComparerHolder.UnsafeComparer
 (implements org.apache.hadoop.hbase.util.Bytes.ComparerT)
 org.apache.hadoop.hbase.util.PrettyPrinter.Unit
+org.apache.hadoop.hbase.util.IdReadWriteLock.ReferenceType
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbe3a233/devapidocs/org/apache/hadoop/hbase/wal/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/wal/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/wal/package-tree.html
index bcb1724..c6caa9b 100644
--- a/devapidocs/org/apache/hadoop/hbase/wal/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/wal/package-tree.html
@@ -191,8 +191,8 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.wal.RegionGroupingProvider.Strategies
 org.apache.hadoop.hbase.wal.WALFactory.Providers
+org.apache.hadoop.hbase.wal.RegionGroupingProvider.Strategies
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dbe3a233/devapidocs/src-html/org/apache/hadoop/hbase/ServerMetrics.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/ServerMetrics.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/ServerMetrics.html
index bf6f859..1f6ce4c 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/ServerMetrics.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/ServerMetrics.html
@@ -49,62 +49,69 @@
 041  }
 042
 043  /**
-044   * @return the number of requests per 
second.
+044   * @return the string type version of a 
regionserver.
 045   */
-046  long getRequestCountPerSecond();
-047
-048  /**
-049   * @return total Number of requests 
from the start of the region server.
-050   */
-051  long getRequestCount();
-052
-053  /**
-054   * @return the amount of used heap
-055   */
-056  Size getUsedHeapSize();
-057
-058  /**
-059   * @return the maximum allowable size 
of the heap
-060   */
-061  Size getMaxHeapSize();
-062
-063  int getInfoServerPort();
+046  default String getVersion() {
+047return "0.0.0";
+048  }
+049
+050  /**
+051   * @return the number of requests per 
second.
+052   */
+053  long getRequestCountPerSecond();
+054
+055  /**
+056   * @return total Number of requests 
from the start of the region server.
+057   */
+058  long getRequestCount();
+059
+060  /**
+061   * @return the amount of used heap
+062   */
+063  Size getUsedHeapSize();
 064
 065  /**
-066   * Call directly from client such as 
hbase shell
-067   * @return the list of 
ReplicationLoadSource
-068   */
-069  ListReplicationLoadSource 
getReplicationLoadSourceList();
-070
-071  /**
-072   * Call directly from client such as 
hbase shell
-073   * @return ReplicationLoadSink
-074   */
-075  @Nullable
-076  ReplicationLoadSink 
getReplicationLoadSink();
+066   * @return the maximum allowable size 

[17/29] hbase-site git commit: Published site at 40a73a5ca73c9e9e2ff9be1bf823056b108686af.

2018-05-31 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/46d8bc28/devapidocs/org/apache/hadoop/hbase/security/access/AccessController.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/security/access/AccessController.html 
b/devapidocs/org/apache/hadoop/hbase/security/access/AccessController.html
index e1bac44..cc27250 100644
--- a/devapidocs/org/apache/hadoop/hbase/security/access/AccessController.html
+++ b/devapidocs/org/apache/hadoop/hbase/security/access/AccessController.html
@@ -1371,7 +1371,7 @@ implements MasterObserver
-postAddReplicationPeer,
 postAddRSGroup,
 postAssign,
 postBalance,
 postBalanceRSGroup, postBalanceSwitch,
 postClearDeadServers,
 postCloneSnapshot,
 postCompletedDeleteTableAction,
 postCompletedDisableTableAction,
 postCompletedEnableTableAction,
 postCompletedMergeRegionsAction,
 
 postCompletedModifyTableAction, postCompletedModifyTableAction,
 postCompletedSplitRegionAction,
 postCompletedTruncateTableAction,
 postCreateNamespace,
 postCreateTable,
 postDecommissionRegionServers,
 postDeleteSnapshot,
 postDisableReplicationPeer, postDisableTable,
 postEnableReplicationPeer,
 postEnableTable,
 postGetClusterMetrics,
 postGetLocks,
 postGetNamespaceDescriptor,
 postGetProcedures,
 postGetReplicationPeerConfig,
 postListDecommissionedRegionServers,
 postListReplicationPeers,
 postListSnapshot,
 postLockHeartbeat,
 postMergeRegions,
 postMergeRegionsCommitAction, postModifyNamespace,
 postModifyNamespace,
 postModifyTable,
 postMove,
 postMoveServers,
 postMoveServersAndTables,
 postMoveTables,
 postRecommissionRegionServer,
 postRegionOffline,
 postRemoveReplicationPeer,
 postRemoveRSGroup,
 postRemoveServers,
 postRequestLock,
 postRestoreSnapshot,
 postRollBackMergeRegionsAction,
 postRollBackSplitRegionAction,
 postSetNamespaceQuota,
 postSetSplitOrMergeEnabled,
 postSetTableQuota,
 postSetUserQuota,
 postSetUserQuota,
 postSetUserQuota,
 postSnapshot,
 postTableFlush,
 postUnassign, postUpdateReplicationPeerConfig,
 preAddRSGroup,
 preBalanceRSGroup,
 preCreate
 TableAction, preDeleteTableAction,
 preDisableTableAction,
 preEnableTableAction,
 preGetClusterMetrics,
 preGetTableNames, preListNamespaceDescriptors,
 preMasterInitialization,
 preMergeRegionsAction,
 preMergeRegionsCommitAction,
 preModifyNamespace,
 preModifyTable,
 preModifyTableAction,
 preModif
 yTableAction, preMoveServers,
 preMoveServersAndTables,
 preMoveTables,
 preRemoveRSGroup,
 preRemoveServers, preSplitRegionAction,
 preSplitRegionAfterMETAAction,
 preSplitRegionBeforeMETAAction,
 preTruncateTableAction
+postAddReplicationPeer,
 postAddRSGroup,
 postAssign,
 postBalance,
 postBalanceRSGroup, postBalanceSwitch,
 postClearDeadServers,
 postCloneSnapshot,
 postCompletedDeleteTableAction,
 postCompletedDisableTableAction,
 postCompletedEnableTableAction,
 postCompletedMergeRegionsAction,
 
 postCompletedModifyTableAction, postCompletedModifyTableAction,
 postCompletedSplitRegionAction,
 postCompletedTruncateTableAction,
 postCreateNamespace,
 postCreateTable,
 postDecommissionRegionServers,
 postDeleteSnapshot,
 postDisableReplicationPeer, postDisableTable,
 postEnableReplicationPeer,
 postEnableTable,
 postGetClusterMetrics,
 postGetLocks,
 postGetNamespaceDescriptor,
 postGetProcedures,
 postGetReplicationPeerConfig,
 postGetRSGroupInfo,
 postGetRSGroupInfoOfServer,
 postGetRSGroupInfoOfTable,
 postListDecommissionedRegionServers,
 postListReplicationPeers,
 postListRSGroups,
 postListSnapshot,
 postLockHeartbeat,
 postMergeRegions,
 postMergeRegionsCommitAction,
 postModifyNamespace,
 postModifyNamespace,
 postModifyTable,
 postMove,
 postMoveServers,
 postMoveServersAndTables,
 postMoveTables,
 postRecommissionRegionServer,
 postRegionOffline, postRemoveReplicationPeer,
 postRemoveRSGroup,
 postRemoveServers,
 postRequestLock,
 postRestoreSnapshot,
 postRollBackMergeRegionsAction,
 postRollBackSplitRegionAction,
 postSetNamespaceQuota,
 postSetSplitOrMergeEnabled,
 postSetTableQuota,
 postSetUserQuota,
 postSetUserQuota,
 postSetUserQuota,
 postSnapshot,
 postTableFlush,
 postUnassign,
 postUpdateReplicationPeerConfig,
 preAddRSGroup,
 preBalanceRSGroup,
 preCreateTableAction,
 preDeleteTableAction, preDisableTableAction,
 preEnableTableAction,
 preGetClusterMetrics,
 preGetRSGroupInfo,
 preGetRSGroupInfoOfServer,
 preGetRSGroupInfoOfTable,
 preGetTableNames,
 preListNamespaceDescriptors,
 preListRSGroups,
 preMasterInitialization,
 preMergeRegionsAction,
 preMergeRegionsCommitAction,
 preModifyNamespace,
 preModifyTable,
 preModifyTableAction,
 preModifyTableAction,
 preMoveServers,
 preMoveServersAndTables,
 preMoveTables,
 preRemoveRSGroup,
 preRemoveServers,
 preSplitRegionAction,
 preSplitRegionAfterMETAAction,
 preSplitRegionBeforeMETAAction,
 preTruncateTableAction
 
 
 



[17/29] hbase-site git commit: Published site at 477f9fdb32873387231c5fbbff130ba8bf7b5d68.

2018-05-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/621479e1/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
index 2b1b6c6..adaa381 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
@@ -1011,2651 +1011,2662 @@
 1003}
 1004
 1005zombieDetector.interrupt();
-1006  }
-1007
-1008  /**
-1009   * Adds the {@code 
MasterSpaceQuotaObserver} to the list of configured Master observers to
-1010   * automatically remove space quotas 
for a table when that table is deleted.
-1011   */
-1012  @VisibleForTesting
-1013  public void 
updateConfigurationForSpaceQuotaObserver(Configuration conf) {
-1014// We're configured to not delete 
quotas on table deletion, so we don't need to add the obs.
-1015if (!conf.getBoolean(
-1016  
MasterSpaceQuotaObserver.REMOVE_QUOTA_ON_TABLE_DELETE,
-1017  
MasterSpaceQuotaObserver.REMOVE_QUOTA_ON_TABLE_DELETE_DEFAULT)) {
-1018  return;
-1019}
-1020String[] masterCoprocs = 
conf.getStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY);
-1021final int length = null == 
masterCoprocs ? 0 : masterCoprocs.length;
-1022String[] updatedCoprocs = new 
String[length + 1];
-1023if (length  0) {
-1024  System.arraycopy(masterCoprocs, 0, 
updatedCoprocs, 0, masterCoprocs.length);
-1025}
-1026updatedCoprocs[length] = 
MasterSpaceQuotaObserver.class.getName();
-1027
conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, updatedCoprocs);
-1028  }
-1029
-1030  private void initMobCleaner() {
-1031this.expiredMobFileCleanerChore = 
new ExpiredMobFileCleanerChore(this);
-1032
getChoreService().scheduleChore(expiredMobFileCleanerChore);
-1033
-1034int mobCompactionPeriod = 
conf.getInt(MobConstants.MOB_COMPACTION_CHORE_PERIOD,
-1035
MobConstants.DEFAULT_MOB_COMPACTION_CHORE_PERIOD);
-1036if (mobCompactionPeriod  0) {
-1037  this.mobCompactChore = new 
MobCompactionChore(this, mobCompactionPeriod);
-1038  
getChoreService().scheduleChore(mobCompactChore);
-1039} else {
-1040  LOG
-1041.info("The period is " + 
mobCompactionPeriod + " seconds, MobCompactionChore is disabled");
-1042}
-1043this.mobCompactThread = new 
MasterMobCompactionThread(this);
-1044  }
-1045
-1046  /**
-1047   * Create a {@link 
MasterMetaBootstrap} instance.
-1048   */
-1049  MasterMetaBootstrap 
createMetaBootstrap(final HMaster master, final MonitoredTask status) {
-1050// We put this out here in a method 
so can do a Mockito.spy and stub it out
-1051// w/ a mocked up 
MasterMetaBootstrap.
-1052return new 
MasterMetaBootstrap(master, status);
-1053  }
-1054
-1055  /**
-1056   * Create a {@link ServerManager} 
instance.
-1057   */
-1058  ServerManager 
createServerManager(final MasterServices master) throws IOException {
-1059// We put this out here in a method 
so can do a Mockito.spy and stub it out
-1060// w/ a mocked up ServerManager.
-1061setupClusterConnection();
-1062return new ServerManager(master);
-1063  }
-1064
-1065  private void 
waitForRegionServers(final MonitoredTask status)
-1066  throws IOException, 
InterruptedException {
-1067
this.serverManager.waitForRegionServers(status);
-1068// Check zk for region servers that 
are up but didn't register
-1069for (ServerName sn: 
this.regionServerTracker.getOnlineServers()) {
-1070  // The isServerOnline check is 
opportunistic, correctness is handled inside
-1071  if 
(!this.serverManager.isServerOnline(sn) 
-1072  
serverManager.checkAndRecordNewServer(sn, ServerMetricsBuilder.of(sn))) {
-1073LOG.info("Registered server 
found up in zk but who has not yet reported in: " + sn);
-1074  }
-1075}
-1076  }
-1077
-1078  void initClusterSchemaService() throws 
IOException, InterruptedException {
-1079this.clusterSchemaService = new 
ClusterSchemaServiceImpl(this);
-1080
this.clusterSchemaService.startAsync();
-1081try {
-1082  
this.clusterSchemaService.awaitRunning(getConfiguration().getInt(
-1083
HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS,
-1084
DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS), TimeUnit.SECONDS);
-1085} catch (TimeoutException toe) {
-1086  throw new IOException("Timedout 
starting ClusterSchemaService", toe);
-1087}
-1088  }
-1089
-1090  void initQuotaManager() throws 
IOException {
-1091MasterQuotaManager quotaManager = 
new MasterQuotaManager(this);
-1092
this.assignmentManager.setRegionStateListener(quotaManager);
-1093quotaManager.start();
-1094this.quotaManager = quotaManager;
-1095  }
-1096
-1097  SpaceQuotaSnapshotNotifier 
createQuotaSnapshotNotifier() {
-1098

[17/29] hbase-site git commit: Published site at c9f8c3436f6e38b5c7807677c5c3e7fc3e19e071.

2018-05-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ead846d7/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.ExportSnapshotInputFormat.ExportSnapshotRecordReader.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.ExportSnapshotInputFormat.ExportSnapshotRecordReader.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.ExportSnapshotInputFormat.ExportSnapshotRecordReader.html
index 6ab40ed..b77fb8a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.ExportSnapshotInputFormat.ExportSnapshotRecordReader.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/ExportSnapshot.ExportSnapshotInputFormat.ExportSnapshotRecordReader.html
@@ -37,1086 +37,1114 @@
 029import java.util.Comparator;
 030import java.util.LinkedList;
 031import java.util.List;
-032
-033import 
org.apache.hadoop.conf.Configuration;
-034import 
org.apache.hadoop.fs.FSDataInputStream;
-035import 
org.apache.hadoop.fs.FSDataOutputStream;
-036import 
org.apache.hadoop.fs.FileChecksum;
-037import org.apache.hadoop.fs.FileStatus;
-038import org.apache.hadoop.fs.FileSystem;
-039import org.apache.hadoop.fs.FileUtil;
-040import org.apache.hadoop.fs.Path;
-041import 
org.apache.hadoop.fs.permission.FsPermission;
-042import 
org.apache.hadoop.hbase.HBaseConfiguration;
-043import 
org.apache.hadoop.hbase.HConstants;
-044import 
org.apache.hadoop.hbase.TableName;
-045import 
org.apache.hadoop.hbase.client.RegionInfo;
-046import 
org.apache.hadoop.hbase.io.FileLink;
-047import 
org.apache.hadoop.hbase.io.HFileLink;
-048import 
org.apache.hadoop.hbase.io.WALLink;
-049import 
org.apache.hadoop.hbase.io.hadoopbackport.ThrottledInputStream;
-050import 
org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
-051import 
org.apache.hadoop.hbase.mob.MobUtils;
-052import 
org.apache.hadoop.hbase.util.AbstractHBaseTool;
-053import 
org.apache.hadoop.hbase.util.FSUtils;
-054import 
org.apache.hadoop.hbase.util.HFileArchiveUtil;
-055import 
org.apache.hadoop.hbase.util.Pair;
-056import 
org.apache.hadoop.io.BytesWritable;
-057import org.apache.hadoop.io.IOUtils;
-058import 
org.apache.hadoop.io.NullWritable;
-059import org.apache.hadoop.io.Writable;
-060import 
org.apache.hadoop.mapreduce.InputFormat;
-061import 
org.apache.hadoop.mapreduce.InputSplit;
-062import org.apache.hadoop.mapreduce.Job;
-063import 
org.apache.hadoop.mapreduce.JobContext;
-064import 
org.apache.hadoop.mapreduce.Mapper;
-065import 
org.apache.hadoop.mapreduce.RecordReader;
-066import 
org.apache.hadoop.mapreduce.TaskAttemptContext;
-067import 
org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
-068import 
org.apache.hadoop.mapreduce.security.TokenCache;
-069import 
org.apache.hadoop.util.StringUtils;
-070import org.apache.hadoop.util.Tool;
-071import 
org.apache.yetus.audience.InterfaceAudience;
-072import org.slf4j.Logger;
-073import org.slf4j.LoggerFactory;
-074import 
org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine;
-075import 
org.apache.hbase.thirdparty.org.apache.commons.cli.Option;
-076import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
-077import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotFileInfo;
-078import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
-079
-080/**
-081 * Export the specified snapshot to a 
given FileSystem.
-082 *
-083 * The .snapshot/name folder is copied to 
the destination cluster
-084 * and then all the hfiles/wals are 
copied using a Map-Reduce Job in the .archive/ location.
-085 * When everything is done, the second 
cluster can restore the snapshot.
-086 */
-087@InterfaceAudience.Public
-088public class ExportSnapshot extends 
AbstractHBaseTool implements Tool {
-089  public static final String NAME = 
"exportsnapshot";
-090  /** Configuration prefix for overrides 
for the source filesystem */
-091  public static final String 
CONF_SOURCE_PREFIX = NAME + ".from.";
-092  /** Configuration prefix for overrides 
for the destination filesystem */
-093  public static final String 
CONF_DEST_PREFIX = NAME + ".to.";
-094
-095  private static final Logger LOG = 
LoggerFactory.getLogger(ExportSnapshot.class);
-096
-097  private static final String MR_NUM_MAPS 
= "mapreduce.job.maps";
-098  private static final String 
CONF_NUM_SPLITS = "snapshot.export.format.splits";
-099  private static final String 
CONF_SNAPSHOT_NAME = "snapshot.export.format.snapshot.name";
-100  private static final String 
CONF_SNAPSHOT_DIR = "snapshot.export.format.snapshot.dir";
-101  private static final String 
CONF_FILES_USER = "snapshot.export.files.attributes.user";
-102  private static final String 
CONF_FILES_GROUP = "snapshot.export.files.attributes.group";
-103  private static final String 
CONF_FILES_MODE = "snapshot.export.files.attributes.mode";
-104  

[17/29] hbase-site git commit: Published site at .

2017-10-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e6ae7c3e/hbase-annotations/index.html
--
diff --git a/hbase-annotations/index.html b/hbase-annotations/index.html
index 3138d8a..097ae06 100644
--- a/hbase-annotations/index.html
+++ b/hbase-annotations/index.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Annotations  About
 
@@ -119,7 +119,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-10-08
+  Last Published: 
2017-10-09
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e6ae7c3e/hbase-annotations/integration.html
--
diff --git a/hbase-annotations/integration.html 
b/hbase-annotations/integration.html
index 656ce4a..30111fb 100644
--- a/hbase-annotations/integration.html
+++ b/hbase-annotations/integration.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Annotations  CI Management
 
@@ -126,7 +126,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-10-08
+  Last Published: 
2017-10-09
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e6ae7c3e/hbase-annotations/issue-tracking.html
--
diff --git a/hbase-annotations/issue-tracking.html 
b/hbase-annotations/issue-tracking.html
index b2c90f4..c2aee3f 100644
--- a/hbase-annotations/issue-tracking.html
+++ b/hbase-annotations/issue-tracking.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Annotations  Issue Management
 
@@ -123,7 +123,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-10-08
+  Last Published: 
2017-10-09
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e6ae7c3e/hbase-annotations/license.html
--
diff --git a/hbase-annotations/license.html b/hbase-annotations/license.html
index 2b20221..bb4e654 100644
--- a/hbase-annotations/license.html
+++ b/hbase-annotations/license.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Annotations  Project Licenses
 
@@ -326,7 +326,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-10-08
+  Last Published: 
2017-10-09
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e6ae7c3e/hbase-annotations/mail-lists.html
--
diff --git a/hbase-annotations/mail-lists.html 
b/hbase-annotations/mail-lists.html
index 3afb186..7a9a30e 100644
--- a/hbase-annotations/mail-lists.html
+++ b/hbase-annotations/mail-lists.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Annotations  Project Mailing Lists
 
@@ -176,7 +176,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-10-08
+  Last Published: 
2017-10-09
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e6ae7c3e/hbase-annotations/plugin-management.html
--
diff --git a/hbase-annotations/plugin-management.html 
b/hbase-annotations/plugin-management.html
index 94c6e46..11562d3 100644
--- a/hbase-annotations/plugin-management.html
+++ b/hbase-annotations/plugin-management.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Annotations  Project Plugin 
Management
 
@@ -271,7 +271,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-10-08
+  Last Published: 
2017-10-09
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e6ae7c3e/hbase-annotations/plugins.html
--
diff --git a/hbase-annotations/plugins.html b/hbase-annotations/plugins.html
index d3b2c5e..fc1075d 100644
--- a/hbase-annotations/plugins.html
+++ b/hbase-annotations/plugins.html
@@ -7,7 +7,7 @@
   
 
 
-
+