[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site c8e4f1987 -> 346adc371


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
index a701ece..26ee60a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
@@ -896,7376 +896,7383 @@
 888}
 889
 890// Write HRI to a file in case we 
need to recover hbase:meta
-891status.setStatus("Writing region info 
on filesystem");
-892fs.checkRegionInfoOnFilesystem();
-893
-894// Initialize all the HStores
-895status.setStatus("Initializing all 
the Stores");
-896long maxSeqId = 
initializeStores(reporter, status);
-897this.mvcc.advanceTo(maxSeqId);
-898if 
(ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
-899  ListStore stores = 
this.getStores();  // update the stores that we are replaying
-900  try {
-901for (Store store : stores) {
-902  ((HStore) 
store).startReplayingFromWAL();
-903}
-904// Recover any edits if 
available.
-905maxSeqId = Math.max(maxSeqId,
-906
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
-907// Make sure mvcc is up to max.
-908this.mvcc.advanceTo(maxSeqId);
-909  } finally {
-910for (Store store : stores) {  
  // update the stores that we are done replaying
-911  
((HStore)store).stopReplayingFromWAL();
-912}
-913  }
-914
-915}
-916this.lastReplayedOpenRegionSeqId = 
maxSeqId;
-917
-918
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
-919this.writestate.flushRequested = 
false;
-920this.writestate.compacting.set(0);
+891// Only the primary replica should 
write .regioninfo
+892if 
(this.getRegionInfo().getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) {
+893  status.setStatus("Writing region 
info on filesystem");
+894  fs.checkRegionInfoOnFilesystem();
+895} else {
+896  if (LOG.isDebugEnabled()) {
+897LOG.debug("Skipping creation of 
.regioninfo file for " + this.getRegionInfo());
+898  }
+899}
+900
+901// Initialize all the HStores
+902status.setStatus("Initializing all 
the Stores");
+903long maxSeqId = 
initializeStores(reporter, status);
+904this.mvcc.advanceTo(maxSeqId);
+905if 
(ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
+906  ListStore stores = 
this.getStores();  // update the stores that we are replaying
+907  try {
+908for (Store store : stores) {
+909  ((HStore) 
store).startReplayingFromWAL();
+910}
+911// Recover any edits if 
available.
+912maxSeqId = Math.max(maxSeqId,
+913
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
+914// Make sure mvcc is up to max.
+915this.mvcc.advanceTo(maxSeqId);
+916  } finally {
+917for (Store store : stores) {  
  // update the stores that we are done replaying
+918  
((HStore)store).stopReplayingFromWAL();
+919}
+920  }
 921
-922if (this.writestate.writesEnabled) 
{
-923  // Remove temporary data left over 
from old regions
-924  status.setStatus("Cleaning up 
temporary data from old regions");
-925  fs.cleanupTempDir();
-926}
-927
-928if (this.writestate.writesEnabled) 
{
-929  status.setStatus("Cleaning up 
detritus from prior splits");
-930  // Get rid of any splits or merges 
that were lost in-progress.  Clean out
-931  // these directories here on open.  
We may be opening a region that was
-932  // being split but we crashed in 
the middle of it all.
-933  fs.cleanupAnySplitDetritus();
-934  fs.cleanupMergesDir();
-935}
-936
-937// Initialize split policy
-938this.splitPolicy = 
RegionSplitPolicy.create(this, conf);
-939
-940// Initialize flush policy
-941this.flushPolicy = 
FlushPolicyFactory.create(this, conf);
-942
-943long lastFlushTime = 
EnvironmentEdgeManager.currentTime();
-944for (Store store: stores.values()) 
{
-945  
this.lastStoreFlushTimeMap.put(store, lastFlushTime);
-946}
-947
-948// Use maximum of log sequenceid or 
that which was found in stores
-949// (particularly if no recovered 
edits, seqid will be -1).
-950long nextSeqid = maxSeqId;
-951
-952// In distributedLogReplay mode, we 
don't know the last change sequence number because region
-953// 

[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-09 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 709036d95 -> 2aec596ed


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2aec596e/devapidocs/org/apache/hadoop/hbase/client/class-use/TableState.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/TableState.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/TableState.html
index e9520ce..8580b83 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/TableState.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/TableState.html
@@ -202,13 +202,13 @@
 
 
 TableState
-ClusterConnection.getTableState(TableNametableName)
-Retrieve TableState, represent current table state.
-
+ConnectionImplementation.getTableState(TableNametableName)
 
 
 TableState
-ConnectionImplementation.getTableState(TableNametableName)
+ClusterConnection.getTableState(TableNametableName)
+Retrieve TableState, represent current table state.
+
 
 
 static TableState



[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-31 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 0fb66c20d -> 1837997e5


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1837997e/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AppendTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AppendTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AppendTest.html
index b506688..7e708b5 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AppendTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AppendTest.html
@@ -1474,841 +1474,847 @@
 1466  }
 1467  put.setDurability(opts.writeToWAL 
? Durability.SYNC_WAL : Durability.SKIP_WAL);
 1468  mutator.mutate(put);
-1469}
-1470  }
-1471
-1472  static class ScanTest extends 
TableTest {
-1473private ResultScanner testScanner;
+1469  if (opts.autoFlush) {
+1470mutator.flush();
+1471  }
+1472}
+1473  }
 1474
-1475ScanTest(Connection con, TestOptions 
options, Status status) {
-1476  super(con, options, status);
-1477}
-1478
-1479@Override
-1480void testTakedown() throws 
IOException {
-1481  if (this.testScanner != null) {
-1482this.testScanner.close();
-1483  }
-1484  super.testTakedown();
-1485}
-1486
-1487
-1488@Override
-1489void testRow(final int i) throws 
IOException {
-1490  if (this.testScanner == null) {
-1491Scan scan = new 
Scan().withStartRow(format(opts.startRow)).setCaching(opts.caching)
-1492
.setCacheBlocks(opts.cacheBlocks).setAsyncPrefetch(opts.asyncPrefetch)
-1493
.setReadType(opts.scanReadType);
-1494if (opts.addColumns) {
-1495  scan.addColumn(FAMILY_NAME, 
QUALIFIER_NAME);
-1496} else {
-1497  scan.addFamily(FAMILY_NAME);
-1498}
-1499if (opts.filterAll) {
-1500  scan.setFilter(new 
FilterAllFilter());
+1475  static class ScanTest extends 
TableTest {
+1476private ResultScanner testScanner;
+1477
+1478ScanTest(Connection con, TestOptions 
options, Status status) {
+1479  super(con, options, status);
+1480}
+1481
+1482@Override
+1483void testTakedown() throws 
IOException {
+1484  if (this.testScanner != null) {
+1485this.testScanner.close();
+1486  }
+1487  super.testTakedown();
+1488}
+1489
+1490
+1491@Override
+1492void testRow(final int i) throws 
IOException {
+1493  if (this.testScanner == null) {
+1494Scan scan = new 
Scan().withStartRow(format(opts.startRow)).setCaching(opts.caching)
+1495
.setCacheBlocks(opts.cacheBlocks).setAsyncPrefetch(opts.asyncPrefetch)
+1496
.setReadType(opts.scanReadType);
+1497if (opts.addColumns) {
+1498  scan.addColumn(FAMILY_NAME, 
QUALIFIER_NAME);
+1499} else {
+1500  scan.addFamily(FAMILY_NAME);
 1501}
-1502this.testScanner = 
table.getScanner(scan);
-1503  }
-1504  Result r = testScanner.next();
-1505  updateValueSize(r);
-1506}
-1507  }
-1508
-1509  /**
-1510   * Base class for operations that are 
CAS-like; that read a value and then set it based off what
-1511   * they read. In this category is 
increment, append, checkAndPut, etc.
-1512   *
-1513   * pThese operations also want 
some concurrency going on. Usually when these tests run, they
-1514   * operate in their own part of the 
key range. In CASTest, we will have them all overlap on the
-1515   * same key space. We do this with our 
getStartRow and getLastRow overrides.
-1516   */
-1517  static abstract class CASTableTest 
extends TableTest {
-1518private final byte [] qualifier;
-1519CASTableTest(Connection con, 
TestOptions options, Status status) {
-1520  super(con, options, status);
-1521  qualifier = 
Bytes.toBytes(this.getClass().getSimpleName());
-1522}
-1523
-1524byte [] getQualifier() {
-1525  return this.qualifier;
-1526}
-1527
-1528@Override
-1529int getStartRow() {
-1530  return 0;
-1531}
-1532
-1533@Override
-1534int getLastRow() {
-1535  return opts.perClientRunRows;
-1536}
-1537  }
-1538
-1539  static class IncrementTest extends 
CASTableTest {
-1540IncrementTest(Connection con, 
TestOptions options, Status status) {
-1541  super(con, options, status);
-1542}
-1543
-1544@Override
-1545void testRow(final int i) throws 
IOException {
-1546  Increment increment = new 
Increment(format(i));
-1547  increment.addColumn(FAMILY_NAME, 
getQualifier(), 1l);
-1548  
updateValueSize(this.table.increment(increment));
-1549}
-1550  }
-1551
-1552  static class AppendTest extends 
CASTableTest {
-1553AppendTest(Connection con, 
TestOptions options, Status status) {
-1554  super(con, options, status);
-1555}

[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site ee3302912 -> a2b2dd19e


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
index c9a18a3..c80f6d8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
@@ -2492,2617 +2492,2627 @@
 2484  return;
 2485}
 2486  }
-2487  
errors.reportError(ERROR_CODE.LINGERING_SPLIT_PARENT, "Region "
-2488  + descriptiveName + " is a 
split parent in META, in HDFS, "
-2489  + "and not deployed on any 
region server. This could be transient, "
-2490  + "consider to run the catalog 
janitor first!");
-2491  if (shouldFixSplitParents()) {
-2492setShouldRerun();
-2493resetSplitParent(hbi);
-2494  }
-2495} else if (inMeta  !inHdfs 
 !isDeployed) {
-2496  
errors.reportError(ERROR_CODE.NOT_IN_HDFS_OR_DEPLOYED, "Region "
-2497  + descriptiveName + " found in 
META, but not in HDFS "
-2498  + "or deployed on any region 
server.");
-2499  if (shouldFixMeta()) {
-2500deleteMetaRegion(hbi);
-2501  }
-2502} else if (inMeta  !inHdfs 
 isDeployed) {
-2503  
errors.reportError(ERROR_CODE.NOT_IN_HDFS, "Region " + descriptiveName
-2504  + " found in META, but not in 
HDFS, " +
-2505  "and deployed on " + 
Joiner.on(", ").join(hbi.deployedOn));
-2506  // We treat HDFS as ground truth.  
Any information in meta is transient
-2507  // and equivalent data can be 
regenerated.  So, lets unassign and remove
-2508  // these problems from META.
-2509  if (shouldFixAssignments()) {
-2510errors.print("Trying to fix 
unassigned region...");
-2511undeployRegions(hbi);
-2512  }
-2513  if (shouldFixMeta()) {
-2514// wait for it to complete
-2515deleteMetaRegion(hbi);
-2516  }
-2517} else if (inMeta  inHdfs 
 !isDeployed  shouldBeDeployed) {
-2518  
errors.reportError(ERROR_CODE.NOT_DEPLOYED, "Region " + descriptiveName
-2519  + " not deployed on any region 
server.");
-2520  tryAssignmentRepair(hbi, "Trying 
to fix unassigned region...");
-2521} else if (inMeta  inHdfs 
 isDeployed  !shouldBeDeployed) {
-2522  
errors.reportError(ERROR_CODE.SHOULD_NOT_BE_DEPLOYED,
-2523  "Region " + descriptiveName + 
" should not be deployed according " +
-2524  "to META, but is deployed on " 
+ Joiner.on(", ").join(hbi.deployedOn));
-2525  if (shouldFixAssignments()) {
-2526errors.print("Trying to close 
the region " + descriptiveName);
-2527setShouldRerun();
-2528
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2529  }
-2530} else if (inMeta  inHdfs 
 isMultiplyDeployed) {
-2531  
errors.reportError(ERROR_CODE.MULTI_DEPLOYED, "Region " + descriptiveName
-2532  + " is listed in hbase:meta on 
region server " + hbi.metaEntry.regionServer
-2533  + " but is multiply assigned 
to region servers " +
-2534  Joiner.on(", 
").join(hbi.deployedOn));
-2535  // If we are trying to fix the 
errors
-2536  if (shouldFixAssignments()) {
-2537errors.print("Trying to fix 
assignment error...");
-2538setShouldRerun();
-2539
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2540  }
-2541} else if (inMeta  inHdfs 
 isDeployed  !deploymentMatchesMeta) {
-2542  
errors.reportError(ERROR_CODE.SERVER_DOES_NOT_MATCH_META, "Region "
-2543  + descriptiveName + " listed 
in hbase:meta on region server " +
-2544  hbi.metaEntry.regionServer + " 
but found on region server " +
-2545  hbi.deployedOn.get(0));
-2546  // If we are trying to fix the 
errors
-2547  if (shouldFixAssignments()) {
-2548errors.print("Trying to fix 
assignment error...");
-2549setShouldRerun();
-2550
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2551
HBaseFsckRepair.waitUntilAssigned(admin, hbi.getHdfsHRI());
-2552  }
-2553} else {
-2554  
errors.reportError(ERROR_CODE.UNKNOWN, "Region " + descriptiveName +
-2555  " is in an unforeseen state:" 
+
-2556  " inMeta=" + inMeta +
-2557  " inHdfs=" + inHdfs +
-2558  " isDeployed=" + isDeployed 
+
-2559  " isMultiplyDeployed=" + 
isMultiplyDeployed +
-2560  " deploymentMatchesMeta=" + 
deploymentMatchesMeta +
-2561  " shouldBeDeployed=" + 
shouldBeDeployed);

[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-26 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 61500d59b -> 21766f4a4


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21766f4a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.CorruptedLogFileException.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.CorruptedLogFileException.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.CorruptedLogFileException.html
index 512f519..c36843d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.CorruptedLogFileException.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.CorruptedLogFileException.html
@@ -719,1696 +719,1697 @@
 711for (FileStatus status : files) 
{
 712  String fileName = 
status.getPath().getName();
 713  try {
-714Long tmpSeqId = 
Long.parseLong(fileName.substring(0, fileName.length()
-715- 
SEQUENCE_ID_FILE_SUFFIX_LENGTH));
-716maxSeqId = Math.max(tmpSeqId, 
maxSeqId);
-717  } catch (NumberFormatException 
ex) {
-718LOG.warn("Invalid SeqId File 
Name=" + fileName);
-719  }
-720}
-721  }
-722}
-723if (maxSeqId  newSeqId) {
-724  newSeqId = maxSeqId;
-725}
-726newSeqId += safetyBumper; // bump up 
SeqId
-727
-728// write a new seqId file
-729Path newSeqIdFile = new 
Path(editsdir, newSeqId + SEQUENCE_ID_FILE_SUFFIX);
-730if (newSeqId != maxSeqId) {
-731  try {
-732if 
(!fs.createNewFile(newSeqIdFile)  !fs.exists(newSeqIdFile)) {
-733  throw new IOException("Failed 
to create SeqId file:" + newSeqIdFile);
-734}
-735if (LOG.isDebugEnabled()) {
-736  LOG.debug("Wrote file=" + 
newSeqIdFile + ", newSeqId=" + newSeqId
-737  + ", maxSeqId=" + 
maxSeqId);
-738}
-739  } catch (FileAlreadyExistsException 
ignored) {
-740// latest hdfs throws this 
exception. it's all right if newSeqIdFile already exists
-741  }
-742}
-743// remove old ones
-744if (files != null) {
-745  for (FileStatus status : files) {
-746if 
(newSeqIdFile.equals(status.getPath())) {
-747  continue;
-748}
-749fs.delete(status.getPath(), 
false);
-750  }
-751}
-752return newSeqId;
-753  }
-754
-755  /**
-756   * Create a new {@link Reader} for 
reading logs to split.
-757   *
-758   * @param file
-759   * @return A new Reader instance, 
caller should close
-760   * @throws IOException
-761   * @throws CorruptedLogFileException
-762   */
-763  protected Reader getReader(FileStatus 
file, boolean skipErrors, CancelableProgressable reporter)
-764  throws IOException, 
CorruptedLogFileException {
-765Path path = file.getPath();
-766long length = file.getLen();
-767Reader in;
-768
-769// Check for possibly empty file. 
With appends, currently Hadoop reports a
-770// zero length even if the file has 
been sync'd. Revisit if HDFS-376 or
-771// HDFS-878 is committed.
-772if (length = 0) {
-773  LOG.warn("File " + path + " might 
be still open, length is 0");
-774}
-775
-776try {
-777  FSUtils.getInstance(fs, 
conf).recoverFileLease(fs, path, conf, reporter);
-778  try {
-779in = getReader(path, reporter);
-780  } catch (EOFException e) {
-781if (length = 0) {
-782  // TODO should we ignore an 
empty, not-last log file if skip.errors
-783  // is false? Either way, the 
caller should decide what to do. E.g.
-784  // ignore if this is the last 
log in sequence.
-785  // TODO is this scenario still 
possible if the log has been
-786  // recovered (i.e. closed)
-787  LOG.warn("Could not open " + 
path + " for reading. File is empty", e);
-788  return null;
-789} else {
-790  // EOFException being ignored
-791  return null;
-792}
-793  }
-794} catch (IOException e) {
-795  if (e instanceof 
FileNotFoundException) {
-796// A wal file may not exist 
anymore. Nothing can be recovered so move on
-797LOG.warn("File " + path + " 
doesn't exist anymore.", e);
-798return null;
-799  }
-800  if (!skipErrors || e instanceof 
InterruptedIOException) {
-801throw e; // Don't mark the file 
corrupted if interrupted, or not skipErrors
-802  }
-803  CorruptedLogFileException t =
-804new 
CorruptedLogFileException("skipErrors=true Could not open wal " +
-805path + " ignoring");
-806  t.initCause(e);
-807  throw t;
-808}
-809return in;
-810  }
-811
-812  static private Entry 
getNextLogLine(Reader in, Path path, boolean skipErrors)
-813  throws CorruptedLogFileException, 
IOException {
-814try {
-815  return in.next();
-816} catch (EOFException eof) {
-817  // truncated files are expected if 
a 

[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-24 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 98e5da7db -> 2d5075d77


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d5075d7/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.html
index 0b1b520..44308b5 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.html
@@ -33,854 +33,840 @@
 025import org.apache.commons.logging.Log;
 026import 
org.apache.commons.logging.LogFactory;
 027import 
org.apache.hadoop.fs.FSDataInputStream;
-028import 
org.apache.hadoop.hbase.ProcedureInfo;
-029import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-030import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-031import 
org.apache.hadoop.hbase.procedure2.Procedure;
-032import 
org.apache.hadoop.hbase.procedure2.ProcedureUtil;
-033import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator;
-034import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStoreTracker;
-035import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
-036import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureWALEntry;
-037
-038/**
-039 * Helper class that loads the procedures 
stored in a WAL
-040 */
-041@InterfaceAudience.Private
-042@InterfaceStability.Evolving
-043public class ProcedureWALFormatReader {
-044  private static final Log LOG = 
LogFactory.getLog(ProcedureWALFormatReader.class);
-045
-046  // 
==
-047  //  We read the WALs in reverse order 
from the newest to the oldest.
-048  //  We have different entry types:
-049  //   - INIT: Procedure submitted by the 
user (also known as 'root procedure')
-050  //   - INSERT: Children added to the 
procedure parentId:[childId, ...]
-051  //   - UPDATE: The specified procedure 
was updated
-052  //   - DELETE: The procedure was 
removed (finished/rolledback and result TTL expired)
-053  //
-054  // In the WAL we can find multiple 
times the same procedure as UPDATE or INSERT.
-055  // We read the WAL from top to bottom, 
so every time we find an entry of the
-056  // same procedure, that will be the 
"latest" update (Caveat: with multiple threads writing
-057  // the store, this assumption does not 
hold).
-058  //
-059  // We keep two in-memory maps:
-060  //  - localProcedureMap: is the map 
containing the entries in the WAL we are processing
-061  //  - procedureMap: is the map 
containing all the procedures we found up to the WAL in process.
-062  // localProcedureMap is merged with the 
procedureMap once we reach the WAL EOF.
-063  //
-064  // Since we are reading the WALs in 
reverse order (newest to oldest),
-065  // if we find an entry related to a 
procedure we already have in 'procedureMap' we can discard it.
-066  //
-067  // The WAL is append-only so the last 
procedure in the WAL is the one that
-068  // was in execution at the time we 
crashed/closed the server.
-069  // Given that, the procedure replay 
order can be inferred by the WAL order.
-070  //
-071  // Example:
-072  //WAL-2: [A, B, A, C, D]
-073  //WAL-1: [F, G, A, F, B]
-074  //Replay-Order: [D, C, A, B, F, 
G]
-075  //
-076  // The "localProcedureMap" keeps a 
"replayOrder" list. Every time we add the
-077  // record to the map that record is 
moved to the head of the "replayOrder" list.
-078  // Using the example above:
-079  //WAL-2 
localProcedureMap.replayOrder is [D, C, A, B]
-080  //WAL-1 
localProcedureMap.replayOrder is [F, G]
-081  //
-082  // Each time we reach the WAL-EOF, the 
"replayOrder" list is merged/appended in 'procedureMap'
-083  // so using the example above we end up 
with: [D, C, A, B] + [F, G] as replay order.
-084  //
-085  //  Fast Start: INIT/INSERT record and 
StackIDs
-086  // 
-
-087  // We have two special records, INIT 
and INSERT, that track the first time
-088  // the procedure was added to the WAL. 
We can use this information to be able
-089  // to start procedures before reaching 
the end of the WAL, or before reading all WALs.
-090  // But in some cases, the WAL with that 
record can be already gone.
-091  // As an alternative, we can use the 
stackIds on each procedure,
-092  // to identify when a procedure is 
ready to start.
-093  // If there are gaps in the sum of the 
stackIds we need to read more WALs.
-094  //
-095  // Example (all procs child of A):
-096  //   WAL-2: [A, B]   A 
stackIds = [0, 4], B stackIds = [1, 5]
-097  //   

[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-23 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site dd9ff6c2d -> 0383a9c24


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0383a9c2/testdevapidocs/org/apache/hadoop/hbase/chaos/actions/ChangeSplitPolicyAction.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/chaos/actions/ChangeSplitPolicyAction.html
 
b/testdevapidocs/org/apache/hadoop/hbase/chaos/actions/ChangeSplitPolicyAction.html
index 825ba57..4a4162f 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/chaos/actions/ChangeSplitPolicyAction.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/chaos/actions/ChangeSplitPolicyAction.html
@@ -210,7 +210,7 @@ extends Action
-forceBalancer,
 getConf,
 getCurrentServers,
 init,
 killDataNode,
 killMaster,
 killRs,
 killZKNode, startDataNode,
 startMaster,
 startRs,
 startZKNode,
 unbalanceRegions
+forceBalancer,
 getConf,
 getCurrentServers,
 init,
 killDataNode,
 killMaster,
 killRs,
 killZKNode, modifyAllTableColumns,
 modifyAllTableColumns,
 startDataNode,
 startMaster,
 startRs,
 startZK
 Node, unbalanceRegions
 
 
 



[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-22 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site b86866c7b -> f391bcef9


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f391bcef/devapidocs/src-html/org/apache/hadoop/hbase/client/NoncedRegionServerCallable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/NoncedRegionServerCallable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/NoncedRegionServerCallable.html
index 0c1ad73..f9f7379 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/NoncedRegionServerCallable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/NoncedRegionServerCallable.html
@@ -55,8 +55,8 @@
 047   * @param row The row we want in 
codetableName/code.
 048   */
 049  public 
NoncedRegionServerCallable(Connection connection, TableName tableName, byte [] 
row,
-050  HBaseRpcController rpcController) 
{
-051super(connection, tableName, row, 
rpcController);
+050  HBaseRpcController rpcController, 
int priority) {
+051super(connection, tableName, row, 
rpcController, priority);
 052this.nonce = 
getConnection().getNonceGenerator().newNonce();
 053  }
 054



[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-21 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site be6740763 -> ca5b02753


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca5b0275/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.EncryptHandler.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.EncryptHandler.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.EncryptHandler.html
index 42b6f60..37a733a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.EncryptHandler.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.EncryptHandler.html
@@ -28,10 +28,10 @@
 020import static 
io.netty.handler.timeout.IdleState.READER_IDLE;
 021import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY;
 022
-023import com.google.common.base.Charsets;
-024import 
com.google.common.base.Throwables;
-025import 
com.google.common.collect.ImmutableSet;
-026import com.google.common.collect.Maps;
+023import 
org.apache.hadoop.hbase.shaded.com.google.common.base.Charsets;
+024import 
org.apache.hadoop.hbase.shaded.com.google.common.base.Throwables;
+025import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableSet;
+026import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
 027import 
com.google.protobuf.CodedOutputStream;
 028
 029import io.netty.buffer.ByteBuf;
@@ -93,7 +93,7 @@
 085import 
org.apache.hadoop.crypto.key.KeyProvider.KeyVersion;
 086import 
org.apache.hadoop.fs.FileEncryptionInfo;
 087import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-088import 
org.apache.hadoop.hbase.util.ByteStringer;
+088import com.google.protobuf.ByteString;
 089import 
org.apache.hadoop.hdfs.DFSClient;
 090import 
org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 091import 
org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -373,416 +373,418 @@
 365  
DataTransferEncryptorMessageProto.newBuilder();
 366  
builder.setStatus(DataTransferEncryptorStatus.SUCCESS);
 367  if (payload != null) {
-368
builder.setPayload(ByteStringer.wrap(payload));
-369  }
-370  if (options != null) {
-371
builder.addAllCipherOption(PB_HELPER.convertCipherOptions(options));
-372  }
-373  DataTransferEncryptorMessageProto 
proto = builder.build();
-374  int size = 
proto.getSerializedSize();
-375  size += 
CodedOutputStream.computeRawVarint32Size(size);
-376  ByteBuf buf = 
ctx.alloc().buffer(size);
-377  proto.writeDelimitedTo(new 
ByteBufOutputStream(buf));
-378  ctx.write(buf);
-379}
-380
-381@Override
-382public void 
handlerAdded(ChannelHandlerContext ctx) throws Exception {
-383  
ctx.write(ctx.alloc().buffer(4).writeInt(SASL_TRANSFER_MAGIC_NUMBER));
-384  sendSaslMessage(ctx, new 
byte[0]);
-385  ctx.flush();
-386  step++;
-387}
-388
-389@Override
-390public void 
channelInactive(ChannelHandlerContext ctx) throws Exception {
-391  saslClient.dispose();
-392}
-393
-394private void 
check(DataTransferEncryptorMessageProto proto) throws IOException {
-395  if (proto.getStatus() == 
DataTransferEncryptorStatus.ERROR_UNKNOWN_KEY) {
-396throw new 
InvalidEncryptionKeyException(proto.getMessage());
-397  } else if (proto.getStatus() == 
DataTransferEncryptorStatus.ERROR) {
-398throw new 
IOException(proto.getMessage());
-399  }
-400}
-401
-402private String getNegotiatedQop() {
-403  return (String) 
saslClient.getNegotiatedProperty(Sasl.QOP);
-404}
-405
-406private boolean 
isNegotiatedQopPrivacy() {
-407  String qop = getNegotiatedQop();
-408  return qop != null  
"auth-conf".equalsIgnoreCase(qop);
-409}
-410
-411private boolean 
requestedQopContainsPrivacy() {
-412  SetString requestedQop =
-413  
ImmutableSet.copyOf(Arrays.asList(saslProps.get(Sasl.QOP).split(",")));
-414  return 
requestedQop.contains("auth-conf");
-415}
-416
-417private void checkSaslComplete() 
throws IOException {
-418  if (!saslClient.isComplete()) {
-419throw new IOException("Failed to 
complete SASL handshake");
-420  }
-421  SetString requestedQop =
-422  
ImmutableSet.copyOf(Arrays.asList(saslProps.get(Sasl.QOP).split(",")));
-423  String negotiatedQop = 
getNegotiatedQop();
-424  LOG.debug(
-425"Verifying QOP, requested QOP = " 
+ requestedQop + ", negotiated QOP = " + negotiatedQop);
-426  if 
(!requestedQop.contains(negotiatedQop)) {
-427throw new 
IOException(String.format("SASL handshake completed, but "
-428+ "channel does not have 
acceptable quality of protection, "
-429+ "requested = %s, negotiated 
= %s",
-430  

[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-19 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 419fe0731 -> 9eba7fcf3


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9eba7fcf/devapidocs/src-html/org/apache/hadoop/hbase/util/ByteBufferArray.BufferCreatorCallable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/ByteBufferArray.BufferCreatorCallable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/ByteBufferArray.BufferCreatorCallable.html
new file mode 100644
index 000..7a442f0
--- /dev/null
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/ByteBufferArray.BufferCreatorCallable.html
@@ -0,0 +1,390 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+Source code
+
+
+
+
+001/**
+002 * Copyright The Apache Software 
Foundation
+003 *
+004 * Licensed to the Apache Software 
Foundation (ASF) under one or more
+005 * contributor license agreements. See 
the NOTICE file distributed with this
+006 * work for additional information 
regarding copyright ownership. The ASF
+007 * licenses this file to you under the 
Apache License, Version 2.0 (the
+008 * "License"); you may not use this file 
except in compliance with the License.
+009 * You may obtain a copy of the License 
at
+010 *
+011 * 
http://www.apache.org/licenses/LICENSE-2.0
+012 *
+013 * Unless required by applicable law or 
agreed to in writing, software
+014 * distributed under the License is 
distributed on an "AS IS" BASIS, WITHOUT
+015 * WARRANTIES OR CONDITIONS OF ANY KIND, 
either express or implied. See the
+016 * License for the specific language 
governing permissions and limitations
+017 * under the License.
+018 */
+019package org.apache.hadoop.hbase.util;
+020
+021import java.io.IOException;
+022import java.nio.ByteBuffer;
+023import java.util.concurrent.Callable;
+024import 
java.util.concurrent.ExecutionException;
+025import 
java.util.concurrent.ExecutorService;
+026import java.util.concurrent.Future;
+027import 
java.util.concurrent.LinkedBlockingQueue;
+028import 
java.util.concurrent.ThreadPoolExecutor;
+029import java.util.concurrent.TimeUnit;
+030
+031import org.apache.commons.logging.Log;
+032import 
org.apache.commons.logging.LogFactory;
+033import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+034import 
org.apache.hadoop.hbase.nio.ByteBuff;
+035import 
org.apache.hadoop.hbase.nio.MultiByteBuff;
+036import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
+037import 
org.apache.hadoop.util.StringUtils;
+038
+039import 
com.google.common.annotations.VisibleForTesting;
+040
+041/**
+042 * This class manages an array of 
ByteBuffers with a default size 4MB. These
+043 * buffers are sequential and could be 
considered as a large buffer.It supports
+044 * reading/writing data from this large 
buffer with a position and offset
+045 */
+046@InterfaceAudience.Private
+047public final class ByteBufferArray {
+048  private static final Log LOG = 
LogFactory.getLog(ByteBufferArray.class);
+049
+050  public static final int 
DEFAULT_BUFFER_SIZE = 4 * 1024 * 1024;
+051  @VisibleForTesting
+052  ByteBuffer buffers[];
+053  private int bufferSize;
+054  private int bufferCount;
+055
+056  /**
+057   * We allocate a number of byte buffers 
as the capacity. In order not to out
+058   * of the array bounds for the last 
byte(see {@link ByteBufferArray#multiple}),
+059   * we will allocate one additional 
buffer with capacity 0;
+060   * @param capacity total size of the 
byte buffer array
+061   * @param directByteBuffer true if we 
allocate direct buffer
+062   * @param allocator the 
ByteBufferAllocator that will create the buffers
+063   * @throws IOException throws 
IOException if there is an exception thrown by the allocator
+064   */
+065  public ByteBufferArray(long capacity, 
boolean directByteBuffer, ByteBufferAllocator allocator)
+066  throws IOException {
+067this.bufferSize = 
DEFAULT_BUFFER_SIZE;
+068if (this.bufferSize  (capacity / 
16))
+069  this.bufferSize = (int) 
roundUp(capacity / 16, 32768);
+070this.bufferCount = (int) 
(roundUp(capacity, bufferSize) / bufferSize);
+071LOG.info("Allocating buffers total=" 
+ StringUtils.byteDesc(capacity)
+072+ ", sizePerBuffer=" + 
StringUtils.byteDesc(bufferSize) + ", count="
+073+ bufferCount + ", direct=" + 
directByteBuffer);
+074buffers = new ByteBuffer[bufferCount 
+ 1];
+075createBuffers(directByteBuffer, 
allocator);
+076  }
+077
+078  private void createBuffers(boolean 
directByteBuffer, ByteBufferAllocator allocator)
+079  throws IOException {
+080int threadCount = 
Runtime.getRuntime().availableProcessors();
+081ExecutorService service = new 
ThreadPoolExecutor(threadCount, threadCount, 0L,
+082TimeUnit.MILLISECONDS, new 
LinkedBlockingQueueRunnable());
+083int perThreadCount = 
Math.round((float) (bufferCount) / threadCount);
+084int lastThreadCount = bufferCount - 
(perThreadCount * (threadCount - 1));
+085

[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-15 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site fd04f4ec9 -> 17128d27c


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/17128d27/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer.html
index eb9099e..35d5549 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer.html
@@ -232,2671 +232,2699 @@
 224import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownResponse;
 225import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest;
 226import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse;
-227import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest;
-228import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterResponse;
-229import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest;
-230import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse;
-231import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
-232import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponse;
-233import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest;
-234import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse;
-235import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest;
-236import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse;
-237import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest;
-238import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse;
-239import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest;
-240import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse;
-241import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest;
-242import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse;
-243import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest;
-244import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse;
-245import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest;
-246import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse;
-247import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
-248import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-249import 
org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
-250import 
org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-251import 
org.apache.hadoop.hbase.util.Bytes;
-252import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-253import 
org.apache.hadoop.hbase.util.ForeignExceptionUtil;
-254import 
org.apache.hadoop.hbase.util.Pair;
-255
-256/**
-257 * The implementation of AsyncAdmin.
-258 */
-259@InterfaceAudience.Private
-260public class RawAsyncHBaseAdmin 
implements AsyncAdmin {
-261  public static final String 
FLUSH_TABLE_PROCEDURE_SIGNATURE = "flush-table-proc";
-262
-263  private static final Log LOG = 
LogFactory.getLog(AsyncHBaseAdmin.class);
+227import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionRequest;
+228import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionResponse;
+229import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest;
+230import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterResponse;
+231import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest;
+232import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse;
+233import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
+234import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponse;
+235import 

[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-12 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 2f4156a78 -> 2777c6936


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2777c693/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestStore.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestStore.html 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestStore.html
index 4196a6c..6c65fd1 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestStore.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestStore.html
@@ -114,1476 +114,1636 @@
 106import org.mockito.Mockito;
 107
 108import com.google.common.collect.Lists;
-109
-110/**
-111 * Test class for the Store
-112 */
-113@Category({RegionServerTests.class, 
MediumTests.class})
-114public class TestStore {
-115  private static final Log LOG = 
LogFactory.getLog(TestStore.class);
-116  @Rule public TestName name = new 
TestName();
-117
-118  HStore store;
-119  byte [] table = 
Bytes.toBytes("table");
-120  byte [] family = 
Bytes.toBytes("family");
+109import 
org.apache.hadoop.hbase.filter.Filter;
+110import 
org.apache.hadoop.hbase.filter.FilterBase;
+111import static 
org.junit.Assert.assertEquals;
+112import static 
org.junit.Assert.assertTrue;
+113
+114/**
+115 * Test class for the Store
+116 */
+117@Category({RegionServerTests.class, 
MediumTests.class})
+118public class TestStore {
+119  private static final Log LOG = 
LogFactory.getLog(TestStore.class);
+120  @Rule public TestName name = new 
TestName();
 121
-122  byte [] row = Bytes.toBytes("row");
-123  byte [] row2 = Bytes.toBytes("row2");
-124  byte [] qf1 = Bytes.toBytes("qf1");
-125  byte [] qf2 = Bytes.toBytes("qf2");
-126  byte [] qf3 = Bytes.toBytes("qf3");
-127  byte [] qf4 = Bytes.toBytes("qf4");
-128  byte [] qf5 = Bytes.toBytes("qf5");
-129  byte [] qf6 = Bytes.toBytes("qf6");
-130
-131  NavigableSetbyte[] qualifiers = 
new ConcurrentSkipListSet(Bytes.BYTES_COMPARATOR);
-132
-133  ListCell expected = new 
ArrayList();
-134  ListCell result = new 
ArrayList();
-135
-136  long id = System.currentTimeMillis();
-137  Get get = new Get(row);
-138
-139  private HBaseTestingUtility TEST_UTIL = 
new HBaseTestingUtility();
-140  private final String DIR = 
TEST_UTIL.getDataTestDir("TestStore").toString();
-141
+122  HStore store;
+123  byte [] table = 
Bytes.toBytes("table");
+124  byte [] family = 
Bytes.toBytes("family");
+125
+126  byte [] row = Bytes.toBytes("row");
+127  byte [] row2 = Bytes.toBytes("row2");
+128  byte [] qf1 = Bytes.toBytes("qf1");
+129  byte [] qf2 = Bytes.toBytes("qf2");
+130  byte [] qf3 = Bytes.toBytes("qf3");
+131  byte [] qf4 = Bytes.toBytes("qf4");
+132  byte [] qf5 = Bytes.toBytes("qf5");
+133  byte [] qf6 = Bytes.toBytes("qf6");
+134
+135  NavigableSetbyte[] qualifiers = 
new ConcurrentSkipListSet(Bytes.BYTES_COMPARATOR);
+136
+137  ListCell expected = new 
ArrayList();
+138  ListCell result = new 
ArrayList();
+139
+140  long id = System.currentTimeMillis();
+141  Get get = new Get(row);
 142
-143  /**
-144   * Setup
-145   * @throws IOException
-146   */
-147  @Before
-148  public void setUp() throws IOException 
{
-149qualifiers.add(qf1);
-150qualifiers.add(qf3);
-151qualifiers.add(qf5);
-152
-153Iteratorbyte[] iter = 
qualifiers.iterator();
-154while(iter.hasNext()){
-155  byte [] next = iter.next();
-156  expected.add(new KeyValue(row, 
family, next, 1, (byte[])null));
-157  get.addColumn(family, next);
-158}
-159  }
-160
-161  private void init(String methodName) 
throws IOException {
-162init(methodName, 
TEST_UTIL.getConfiguration());
+143  private HBaseTestingUtility TEST_UTIL = 
new HBaseTestingUtility();
+144  private final String DIR = 
TEST_UTIL.getDataTestDir("TestStore").toString();
+145
+146
+147  /**
+148   * Setup
+149   * @throws IOException
+150   */
+151  @Before
+152  public void setUp() throws IOException 
{
+153qualifiers.add(qf1);
+154qualifiers.add(qf3);
+155qualifiers.add(qf5);
+156
+157Iteratorbyte[] iter = 
qualifiers.iterator();
+158while(iter.hasNext()){
+159  byte [] next = iter.next();
+160  expected.add(new KeyValue(row, 
family, next, 1, (byte[])null));
+161  get.addColumn(family, next);
+162}
 163  }
 164
-165  private Store init(String methodName, 
Configuration conf)
-166  throws IOException {
-167HColumnDescriptor hcd = new 
HColumnDescriptor(family);
-168// some of the tests write 4 versions 
and then flush
-169// (with HBASE-4241, lower versions 
are collected on flush)
-170hcd.setMaxVersions(4);
-171return init(methodName, conf, hcd);
-172  }
-173
-174  private Store init(String methodName, 
Configuration conf,
-175  HColumnDescriptor hcd) throws 
IOException {
-176HTableDescriptor htd = new 
HTableDescriptor(TableName.valueOf(table));
-177return init(methodName, conf, 

[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-11 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 5a4910e7b -> 90c7dfe41


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/90c7dfe4/hbase-archetypes/hbase-client-project/source-repository.html
--
diff --git a/hbase-archetypes/hbase-client-project/source-repository.html 
b/hbase-archetypes/hbase-client-project/source-repository.html
index 68b0a44..7ff8082 100644
--- a/hbase-archetypes/hbase-client-project/source-repository.html
+++ b/hbase-archetypes/hbase-client-project/source-repository.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-07-10
+Last Published: 2017-07-11
   | Version: 
3.0.0-SNAPSHOT
   
 Apache HBase - Exemplar for 
hbase-client archetype



[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-10 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site f2b6c7226 -> 0821e51a0


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0821e51a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.StorageTypeSetter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.StorageTypeSetter.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.StorageTypeSetter.html
index f729c99..0a32350 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.StorageTypeSetter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.StorageTypeSetter.html
@@ -54,757 +54,756 @@
 046import 
io.netty.channel.ChannelPipeline;
 047import io.netty.channel.EventLoop;
 048import 
io.netty.channel.SimpleChannelInboundHandler;
-049import 
io.netty.channel.socket.nio.NioSocketChannel;
-050import 
io.netty.handler.codec.protobuf.ProtobufDecoder;
-051import 
io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
-052import 
io.netty.handler.timeout.IdleStateEvent;
-053import 
io.netty.handler.timeout.IdleStateHandler;
-054import io.netty.util.concurrent.Future;
-055import 
io.netty.util.concurrent.FutureListener;
-056import 
io.netty.util.concurrent.Promise;
-057
-058import java.io.IOException;
-059import 
java.lang.reflect.InvocationTargetException;
-060import java.lang.reflect.Method;
-061import java.util.ArrayList;
-062import java.util.EnumSet;
-063import java.util.List;
-064import java.util.concurrent.TimeUnit;
-065
-066import org.apache.commons.logging.Log;
-067import 
org.apache.commons.logging.LogFactory;
-068import 
org.apache.hadoop.conf.Configuration;
-069import 
org.apache.hadoop.crypto.CryptoProtocolVersion;
-070import 
org.apache.hadoop.crypto.Encryptor;
-071import org.apache.hadoop.fs.CreateFlag;
-072import org.apache.hadoop.fs.FileSystem;
-073import 
org.apache.hadoop.fs.FileSystemLinkResolver;
-074import org.apache.hadoop.fs.Path;
-075import 
org.apache.hadoop.fs.UnresolvedLinkException;
-076import 
org.apache.hadoop.fs.permission.FsPermission;
-077import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-078import 
org.apache.hadoop.hbase.client.ConnectionUtils;
-079import 
org.apache.hadoop.hbase.util.CancelableProgressable;
-080import 
org.apache.hadoop.hbase.util.FSUtils;
-081import 
org.apache.hadoop.hdfs.DFSClient;
-082import 
org.apache.hadoop.hdfs.DFSOutputStream;
-083import 
org.apache.hadoop.hdfs.DistributedFileSystem;
-084import 
org.apache.hadoop.hdfs.protocol.ClientProtocol;
-085import 
org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-086import 
org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-087import 
org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-088import 
org.apache.hadoop.hdfs.protocol.LocatedBlock;
-089import 
org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
-090import 
org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
-091import 
org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
-092import 
org.apache.hadoop.hdfs.protocol.datatransfer.Op;
-093import 
org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck;
-094import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto;
-095import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
-096import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto;
-097import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto;
-098import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto;
-099import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto;
-100import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto;
-101import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
-102import 
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
-103import 
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto;
-104import 
org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
-105import 
org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
-106import 
org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
-107import 
org.apache.hadoop.io.EnumSetWritable;
-108import 
org.apache.hadoop.ipc.RemoteException;
-109import org.apache.hadoop.net.NetUtils;
-110import 
org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
-111import 
org.apache.hadoop.security.token.Token;
-112import 
org.apache.hadoop.util.DataChecksum;
-113
-114/**
-115 * Helper class for implementing {@link 
FanOutOneBlockAsyncDFSOutput}.
-116 */
-117@InterfaceAudience.Private
-118public final class 
FanOutOneBlockAsyncDFSOutputHelper {
+049import 

[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-09 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site dc577176c -> 2d27954ab


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d27954a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableProcedureBiConsumer.html
index 16c0042..71844ce 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableProcedureBiConsumer.html
@@ -126,2499 +126,2543 @@
 118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
 119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
 120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse;
-135import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
-136import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
-137import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
-138import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
-139import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest;
-140import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesResponse;
-141import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
-142import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
-143import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest;
-144import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse;
-145import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
-146import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse;
-147import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest;
-148import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse;
-149import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledRequest;
-150import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledResponse;
-151import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest;
-152import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse;
-153import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest;
-154import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse;
-155import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest;
-156import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse;
-157import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
-158import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
-159import 

[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-08 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 225052054 -> 9fb0764ba


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9fb0764b/devapidocs/org/apache/hadoop/hbase/io/crypto/class-use/Encryption.Context.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/crypto/class-use/Encryption.Context.html
 
b/devapidocs/org/apache/hadoop/hbase/io/crypto/class-use/Encryption.Context.html
index 9842663..4c3213b 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/crypto/class-use/Encryption.Context.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/crypto/class-use/Encryption.Context.html
@@ -332,33 +332,33 @@
 
 
 static StoreFileWriter
-MobUtils.createWriter(org.apache.hadoop.conf.Configurationconf,
+MobUtils.createWriter(org.apache.hadoop.conf.Configurationconf,
 org.apache.hadoop.fs.FileSystemfs,
-HColumnDescriptorfamily,
-MobFileNamemobFileName,
-org.apache.hadoop.fs.PathbasePath,
+ColumnFamilyDescriptorfamily,
+org.apache.hadoop.fs.Pathpath,
 longmaxKeyCount,
 Compression.Algorithmcompression,
 CacheConfigcacheConfig,
 Encryption.ContextcryptoContext,
+ChecksumTypechecksumType,
+intbytesPerChecksum,
+intblocksize,
+BloomTypebloomType,
 booleanisCompaction)
 Creates a writer for the mob file in temp directory.
 
 
 
 static StoreFileWriter
-MobUtils.createWriter(org.apache.hadoop.conf.Configurationconf,
+MobUtils.createWriter(org.apache.hadoop.conf.Configurationconf,
 org.apache.hadoop.fs.FileSystemfs,
 HColumnDescriptorfamily,
-org.apache.hadoop.fs.Pathpath,
+MobFileNamemobFileName,
+org.apache.hadoop.fs.PathbasePath,
 longmaxKeyCount,
 Compression.Algorithmcompression,
 CacheConfigcacheConfig,
 Encryption.ContextcryptoContext,
-ChecksumTypechecksumType,
-intbytesPerChecksum,
-intblocksize,
-BloomTypebloomType,
 booleanisCompaction)
 Creates a writer for the mob file in temp directory.
 
@@ -463,8 +463,8 @@
 
 
 static Encryption.Context
-EncryptionUtil.createEncryptionContext(org.apache.hadoop.conf.Configurationconf,
-   HColumnDescriptorfamily)
+EncryptionUtil.createEncryptionContext(org.apache.hadoop.conf.Configurationconf,
+   ColumnFamilyDescriptorfamily)
 Helper to create an encyption context.
 
 



[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 1772a08a9 -> b3b50f225


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b3b50f22/testdevapidocs/src-html/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.DummyServer.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.DummyServer.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.DummyServer.html
index f34435c..b0a6a6f 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.DummyServer.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.DummyServer.html
@@ -30,399 +30,410 @@
 022import static 
org.junit.Assert.assertTrue;
 023
 024import java.io.IOException;
-025import java.util.Random;
-026
-027import org.apache.commons.logging.Log;
-028import 
org.apache.commons.logging.LogFactory;
-029import 
org.apache.hadoop.conf.Configuration;
-030import 
org.apache.hadoop.fs.FSDataOutputStream;
-031import org.apache.hadoop.fs.FileStatus;
-032import org.apache.hadoop.fs.FileSystem;
-033import org.apache.hadoop.fs.Path;
-034import 
org.apache.hadoop.hbase.ChoreService;
-035import 
org.apache.hadoop.hbase.CoordinatedStateManager;
-036import 
org.apache.hadoop.hbase.HBaseTestingUtility;
-037import 
org.apache.hadoop.hbase.HConstants;
-038import org.apache.hadoop.hbase.Server;
-039import 
org.apache.hadoop.hbase.ServerName;
-040import 
org.apache.hadoop.hbase.client.ClusterConnection;
-041import 
org.apache.hadoop.hbase.testclassification.MasterTests;
-042import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-043import 
org.apache.hadoop.hbase.util.EnvironmentEdge;
-044import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-045import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-046import 
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-047import org.junit.AfterClass;
-048import org.junit.Assert;
-049import org.junit.BeforeClass;
-050import org.junit.Test;
-051import 
org.junit.experimental.categories.Category;
-052
-053@Category({MasterTests.class, 
MediumTests.class})
-054public class TestHFileCleaner {
-055  private static final Log LOG = 
LogFactory.getLog(TestHFileCleaner.class);
-056
-057  private final static 
HBaseTestingUtility UTIL = new HBaseTestingUtility();
-058
-059  @BeforeClass
-060  public static void setupCluster() 
throws Exception {
-061// have to use a minidfs cluster 
because the localfs doesn't modify file times correctly
-062UTIL.startMiniDFSCluster(1);
-063  }
-064
-065  @AfterClass
-066  public static void shutdownCluster() 
throws IOException {
-067UTIL.shutdownMiniDFSCluster();
-068  }
-069
-070  @Test
-071  public void testTTLCleaner() throws 
IOException, InterruptedException {
-072FileSystem fs = 
UTIL.getDFSCluster().getFileSystem();
-073Path root = 
UTIL.getDataTestDirOnTestFS();
-074Path file = new Path(root, "file");
-075fs.createNewFile(file);
-076long createTime = 
System.currentTimeMillis();
-077assertTrue("Test file not created!", 
fs.exists(file));
-078TimeToLiveHFileCleaner cleaner = new 
TimeToLiveHFileCleaner();
-079// update the time info for the file, 
so the cleaner removes it
-080fs.setTimes(file, createTime - 100, 
-1);
-081Configuration conf = 
UTIL.getConfiguration();
-082
conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, 100);
-083cleaner.setConf(conf);
-084assertTrue("File not set deletable - 
check mod time:" + getFileStats(file, fs)
-085+ " with create time:" + 
createTime, cleaner.isFileDeletable(fs.getFileStatus(file)));
-086  }
-087
-088  /**
-089   * @param file to check
-090   * @return loggable information about 
the file
-091   */
-092  private String getFileStats(Path file, 
FileSystem fs) throws IOException {
-093FileStatus status = 
fs.getFileStatus(file);
-094return "File" + file + ", mtime:" + 
status.getModificationTime() + ", atime:"
-095+ status.getAccessTime();
-096  }
-097
-098  @Test(timeout = 60 *1000)
-099  public void testHFileCleaning() throws 
Exception {
-100final EnvironmentEdge originalEdge = 
EnvironmentEdgeManager.getDelegate();
-101String prefix = 
"someHFileThatWouldBeAUUID";
-102Configuration conf = 
UTIL.getConfiguration();
-103// set TTL
-104long ttl = 2000;
-105
conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
-106  
"org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner");
-107
conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, ttl);
-108Server server = new DummyServer();
-109Path archivedHfileDir = new 
Path(UTIL.getDataTestDirOnTestFS(), HConstants.HFILE_ARCHIVE_DIRECTORY);
-110FileSystem fs = 
FileSystem.get(conf);
-111HFileCleaner cleaner = new 
HFileCleaner(1000, server, conf, fs, archivedHfileDir);
-112
-113// Create 2 invalid files, 1 "recent" 

[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-05 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 4d31ce6df -> ca9f69253


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca9f6925/devapidocs/org/apache/hadoop/hbase/regionserver/package-use.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/package-use.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/package-use.html
index 73d6999..40ec19a 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/package-use.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/package-use.html
@@ -949,39 +949,51 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
+CompactingMemStore.IndexType
+Types of indexes (part of immutable segments) to be used 
after flattening,
+ compaction, or merge are applied.
+
+
+
 CompactionPipeline
 The compaction pipeline of a CompactingMemStore, is a 
FIFO queue of segments.
 
 
-
+
 CompactionRequestor
 
-
+
 CompactionTool.CompactionWorker
 Class responsible to execute the Compaction on the 
specified path.
 
 
-
+
 CompactSplit
 Compact region on request and then run split if 
appropriate
 
 
-
+
 CompactSplit.CompactionRunner
 
-
+
 CompositeImmutableSegment
 The CompositeImmutableSegments is created as a collection 
of ImmutableSegments and supports
  the interface of a single ImmutableSegments.
 
 
-
+
 ConstantSizeRegionSplitPolicy
 A RegionSplitPolicy 
implementation which splits a region
  as soon as any of its store files exceeds a maximum configurable
  size.
 
 
+
+CSLMImmutableSegment
+CSLMImmutableSegment is an abstract class that extends the 
API supported by a Segment,
+ and ImmutableSegment.
+
+
 
 DefaultHeapMemoryTuner.StepDirection
 
@@ -1120,53 +1132,48 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 
 
 
-ImmutableSegment.Type
-Types of ImmutableSegment
-
-
-
 IncreasingToUpperBoundRegionSplitPolicy
 Split size is the number of regions that are on this server 
that all are
  of the same table, cubed, times 2x the region flush size OR the maximum
  region split size, whichever is smaller.
 
 
-
+
 InternalScanner
 Internal scanners differ from client-side scanners in that 
they operate on
  HStoreKeys and byte[] instead of RowResults.
 
 
-
+
 KeyValueHeap
 Implements a heap merge across any number of 
KeyValueScanners.
 
 
-
+
 KeyValueHeap.KVScannerComparator
 
-
+
 KeyValueScanner
 Scanner that returns the next KeyValue.
 
 
-
+
 LastSequenceId
 Last flushed sequence Ids for the regions and their stores 
on region server
 
 
-
+
 LeaseException
 Reports a problem with a lease
 
 
-
+
 LeaseListener
 LeaseListener is an interface meant to be implemented by 
users of the Leases
  class.
 
 
-
+
 Leases
 Leases
 
@@ -1174,39 +1181,39 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
  clients that occasionally send heartbeats.
 
 
-
+
 Leases.Lease
 This class tracks a single Lease.
 
 
-
+
 Leases.LeaseStillHeldException
 Thrown if we are asked to create a lease but lease on 
passed name already
  exists.
 
 
-
+
 LogRoller
 Runs periodically to determine if the WAL should be 
rolled.
 
 
-
+
 MemStore
 The MemStore holds in-memory modifications to the 
Store.
 
 
-
+
 MemStoreCompactor
 The ongoing MemStore Compaction manager, dispatches a solo 
running compaction and interrupts
  the compaction if requested.
 
 
-
+
 MemStoreCompactor.Action
 Types of actions to be done on the pipeline upon 
MemStoreCompaction invocation.
 
 
-
+
 MemStoreFlusher
 Thread that flushes cache on request
 
@@ -1215,23 +1222,23 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
  sleep time which is invariant.
 
 
-
+
 MemStoreFlusher.FlushHandler
 
-
+
 MemStoreFlusher.FlushQueueEntry
 
-
+
 MemStoreFlusher.FlushRegionEntry
 Datastructure used in the flush queue.
 
 
-
+
 MemStoreLAB
 A memstore-local allocation buffer.
 
 
-
+
 MemStoreSegmentsIterator
 The MemStoreSegmentsIterator is designed to perform one 
iteration over given list of segments
  For another iteration new instance of MemStoreSegmentsIterator needs to be 
created
@@ -1239,537 +1246,537 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
  in each period of time
 
 
-
+
 MemstoreSize
 Wraps the data size part and total heap space occupied by 
the memstore.
 
 
-
+
 MemStoreSnapshot
 Holds details of the snapshot taken on a MemStore.
 
 
-
+
 MetricsHeapMemoryManager
 This class is for maintaining the various regionserver's 
heap memory manager statistics and
  publishing them through the metrics interfaces.
 
 
-
+
 MetricsHeapMemoryManagerSource
 This interface will be implemented by a MetricsSource that 
will export metrics from
  HeapMemoryManager in RegionServer into the hadoop metrics system.
 
 
-
+
 MetricsHeapMemoryManagerSourceImpl
 Hadoop2 implementation of 
MetricsHeapMemoryManagerSource.
 
 
-
+
 MetricsRegion
 This is the glue between the 

[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-27 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 0a610a81d -> 8e3b63ca5


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8e3b63ca/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaRackCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaRackCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaRackCostFunction.html
index c895448..545d4da 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaRackCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaRackCostFunction.html
@@ -1294,425 +1294,426 @@
 1286  }
 1287
 1288  // We normalize locality to be a 
score between 0 and 1.0 representing how good it
-1289  // is compared to how good it 
could be
-1290  locality /= bestLocality;
-1291}
-1292
-1293@Override
-1294protected void regionMoved(int 
region, int oldServer, int newServer) {
-1295  int oldEntity = type == 
LocalityType.SERVER ? oldServer : cluster.serverIndexToRackIndex[oldServer];
-1296  int newEntity = type == 
LocalityType.SERVER ? newServer : cluster.serverIndexToRackIndex[newServer];
-1297  if (this.services == null) {
-1298return;
-1299  }
-1300  double localityDelta = 
getWeightedLocality(region, newEntity) - getWeightedLocality(region, 
oldEntity);
-1301  double normalizedDelta = 
localityDelta / bestLocality;
-1302  locality += normalizedDelta;
-1303}
-1304
-1305@Override
-1306double cost() {
-1307  return 1 - locality;
-1308}
-1309
-1310private int 
getMostLocalEntityForRegion(int region) {
-1311  return 
cluster.getOrComputeRegionsToMostLocalEntities(type)[region];
-1312}
-1313
-1314private double 
getWeightedLocality(int region, int entity) {
-1315  return 
cluster.getOrComputeWeightedLocality(region, entity, type);
-1316}
-1317
-1318  }
-1319
-1320  static class 
ServerLocalityCostFunction extends LocalityBasedCostFunction {
-1321
-1322private static final String 
LOCALITY_COST_KEY = "hbase.master.balancer.stochastic.localityCost";
-1323private static final float 
DEFAULT_LOCALITY_COST = 25;
-1324
-1325
ServerLocalityCostFunction(Configuration conf, MasterServices srv) {
-1326  super(
-1327  conf,
-1328  srv,
-1329  LocalityType.SERVER,
-1330  LOCALITY_COST_KEY,
-1331  DEFAULT_LOCALITY_COST
-1332  );
-1333}
-1334
-1335@Override
-1336int regionIndexToEntityIndex(int 
region) {
-1337  return 
cluster.regionIndexToServerIndex[region];
-1338}
-1339  }
-1340
-1341  static class RackLocalityCostFunction 
extends LocalityBasedCostFunction {
-1342
-1343private static final String 
RACK_LOCALITY_COST_KEY = "hbase.master.balancer.stochastic.rackLocalityCost";
-1344private static final float 
DEFAULT_RACK_LOCALITY_COST = 15;
-1345
-1346public 
RackLocalityCostFunction(Configuration conf, MasterServices services) {
-1347  super(
-1348  conf,
-1349  services,
-1350  LocalityType.RACK,
-1351  RACK_LOCALITY_COST_KEY,
-1352  DEFAULT_RACK_LOCALITY_COST
-1353  );
-1354}
-1355
-1356@Override
-1357int regionIndexToEntityIndex(int 
region) {
-1358  return 
cluster.getRackForRegion(region);
-1359}
-1360  }
-1361
-1362  /**
-1363   * Base class the allows writing costs 
functions from rolling average of some
-1364   * number from RegionLoad.
-1365   */
-1366  abstract static class 
CostFromRegionLoadFunction extends CostFunction {
-1367
-1368private ClusterStatus clusterStatus 
= null;
-1369private MapString, 
DequeBalancerRegionLoad loads = null;
-1370private double[] stats = null;
-1371
CostFromRegionLoadFunction(Configuration conf) {
-1372  super(conf);
-1373}
-1374
-1375void setClusterStatus(ClusterStatus 
status) {
-1376  this.clusterStatus = status;
-1377}
-1378
-1379void setLoads(MapString, 
DequeBalancerRegionLoad l) {
-1380  this.loads = l;
-1381}
-1382
-1383@Override
-1384double cost() {
-1385  if (clusterStatus == null || loads 
== null) {
-1386return 0;
-1387  }
-1388
-1389  if (stats == null || stats.length 
!= cluster.numServers) {
-1390stats = new 
double[cluster.numServers];
-1391  }
-1392
-1393  for (int i =0; i  
stats.length; i++) {
-1394//Cost this server has from 
RegionLoad
-1395long cost = 0;
-1396
-1397// for every region on this 
server get the rl
-1398for(int 
regionIndex:cluster.regionsPerServer[i]) {
-1399  
CollectionBalancerRegionLoad regionLoadList =  
cluster.regionLoads[regionIndex];
-1400

[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-26 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 1a694056b -> aecb12861


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aecb1286/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ProcedureBiConsumer.html
index 01496d6..dc12c09 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ProcedureBiConsumer.html
@@ -48,2406 +48,2267 @@
 040
 041import io.netty.util.Timeout;
 042import io.netty.util.TimerTask;
-043import java.util.stream.Stream;
-044import org.apache.commons.io.IOUtils;
-045import org.apache.commons.logging.Log;
-046import 
org.apache.commons.logging.LogFactory;
-047import 
org.apache.hadoop.hbase.HRegionInfo;
-048import 
org.apache.hadoop.hbase.HRegionLocation;
-049import 
org.apache.hadoop.hbase.MetaTableAccessor;
-050import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-051import 
org.apache.hadoop.hbase.NotServingRegionException;
-052import 
org.apache.hadoop.hbase.ProcedureInfo;
-053import 
org.apache.hadoop.hbase.RegionLocations;
-054import 
org.apache.hadoop.hbase.ServerName;
-055import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-056import 
org.apache.hadoop.hbase.HConstants;
-057import 
org.apache.hadoop.hbase.TableExistsException;
-058import 
org.apache.hadoop.hbase.TableName;
-059import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-060import 
org.apache.hadoop.hbase.TableNotDisabledException;
-061import 
org.apache.hadoop.hbase.TableNotEnabledException;
-062import 
org.apache.hadoop.hbase.TableNotFoundException;
-063import 
org.apache.hadoop.hbase.UnknownRegionException;
-064import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-065import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-066import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-067import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-068import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-069import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-070import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-071import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-072import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-073import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-074import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-075import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-076import 
org.apache.hadoop.hbase.replication.ReplicationException;
-077import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-078import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-079import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-080import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-081import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-082import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-100import 

[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-20 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site da32ec44a -> a719cd003


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a719cd00/hbase-archetypes/dependency-management.html
--
diff --git a/hbase-archetypes/dependency-management.html 
b/hbase-archetypes/dependency-management.html
index cdcd3f9..a2cb465 100644
--- a/hbase-archetypes/dependency-management.html
+++ b/hbase-archetypes/dependency-management.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-06-19
+Last Published: 2017-06-20
   | Version: 
3.0.0-SNAPSHOT
   
 Apache HBase - Archetypes
@@ -234,360 +234,366 @@
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
License, Version 2.0
 
+org.apache.avro
+http://avro.apache.org;>avro
+1.7.7
+jar
+http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
License, Version 2.0
+
 org.apache.commons
 http://commons.apache.org/proper/commons-crypto/;>commons-crypto
 1.0.0
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.commons
 http://commons.apache.org/math/;>commons-math
 2.2
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
License, Version 2.0
-
+
 org.apache.curator
 http://curator.apache.org/curator-client;>curator-client
 2.12.0
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
License, Version 2.0
-
+
 org.apache.curator
 http://curator.apache.org/curator-framework;>curator-framework
 2.12.0
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
License, Version 2.0
-
+
 org.apache.curator
 http://curator.apache.org/curator-recipes;>curator-recipes
 2.12.0
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
License, Version 2.0
-
+
 org.apache.hadoop
 hadoop-auth
 2.7.1
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hadoop
 hadoop-client
 2.7.1
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hadoop
 hadoop-common
 2.7.1
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hadoop
 hadoop-hdfs
 2.7.1
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hadoop
 hadoop-mapreduce-client-core
 2.7.1
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hadoop
 hadoop-mapreduce-client-jobclient
 2.7.1
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hadoop
 hadoop-minicluster
 2.7.1
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-annotations;>hbase-annotations
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-annotations;>hbase-annotations
 3.0.0-SNAPSHOT
 test-jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-client;>hbase-client
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-common;>hbase-common
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-endpoint;>hbase-endpoint
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-examples;>hbase-examples
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-external-blockcache;>hbase-external-blockcache
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-hadoop-compat;>hbase-hadoop-compat
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-hadoop2-compat;>hbase-hadoop2-compat
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-metrics;>hbase-metrics
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 

[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 6ea4056e5 -> 476c54ede


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/hbase-spark/dependency-convergence.html
--
diff --git a/hbase-spark/dependency-convergence.html 
b/hbase-spark/dependency-convergence.html
index 3e06cc6..7807f9d 100644
--- a/hbase-spark/dependency-convergence.html
+++ b/hbase-spark/dependency-convergence.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-06-09
+Last Published: 2017-06-10
   | Version: 
3.0.0-SNAPSHOT
   
 Apache HBase - Spark



[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-09 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 26b4148ad -> 77a552c41


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/77a552c4/devapidocs/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.html
index 532c0e4..0e1f18f 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.html
@@ -652,7 +652,7 @@ extends 
 
 tablePriorities
-private finalMasterProcedureScheduler.TablePriorities
 tablePriorities
+private finalMasterProcedureScheduler.TablePriorities
 tablePriorities
 
 
 
@@ -669,7 +669,7 @@ extends 
 
 MasterProcedureScheduler
-publicMasterProcedureScheduler(org.apache.hadoop.conf.Configurationconf)
+publicMasterProcedureScheduler(org.apache.hadoop.conf.Configurationconf)
 
 
 
@@ -686,7 +686,7 @@ extends 
 
 yield
-publicvoidyield(Procedureproc)
+publicvoidyield(Procedureproc)
 Description copied from 
interface:ProcedureScheduler
 The procedure can't run at the moment.
  add it back to the queue, giving priority to someone else.
@@ -702,7 +702,7 @@ extends 
 
 enqueue
-protectedvoidenqueue(Procedureproc,
+protectedvoidenqueue(Procedureproc,
booleanaddFront)
 Description copied from 
class:AbstractProcedureScheduler
 Add the procedure to the queue.
@@ -722,7 +722,7 @@ extends 
 
 doAdd
-privateT extends http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in 
java.lang">ComparableTvoiddoAdd(MasterProcedureScheduler.FairQueueTfairq,
+privateT extends http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in 
java.lang">ComparableTvoiddoAdd(MasterProcedureScheduler.FairQueueTfairq,
  MasterProcedureScheduler.QueueTqueue,
  Procedureproc,
  booleanaddFront)
@@ -734,7 +734,7 @@ extends 
 
 queueHasRunnables
-protectedbooleanqueueHasRunnables()
+protectedbooleanqueueHasRunnables()
 Description copied from 
class:AbstractProcedureScheduler
 Returns true if there are procedures available to process.
  NOTE: this method is called with the sched lock held.
@@ -752,7 +752,7 @@ extends 
 
 dequeue
-protectedProceduredequeue()
+protectedProceduredequeue()
 Description copied from 
class:AbstractProcedureScheduler
 Fetch one Procedure from the queue
  NOTE: this method is called with the sched lock held.
@@ -770,7 +770,7 @@ extends 
 
 doPoll
-privateT extends http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableTProceduredoPoll(MasterProcedureScheduler.FairQueueTfairq)
+privateT extends http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableTProceduredoPoll(MasterProcedureScheduler.FairQueueTfairq)
 
 
 
@@ -779,7 +779,7 @@ extends 
 
 createLockInfo
-privateLockInfocreateLockInfo(LockInfo.ResourceTyperesourceType,
+privateLockInfocreateLockInfo(LockInfo.ResourceTyperesourceType,
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringresourceName,
 LockAndQueuequeue)
 
@@ -790,7 +790,7 @@ extends 
 
 listLocks
-publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListLockInfolistLocks()
+publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListLockInfolistLocks()
 Description copied from 
interface:ProcedureScheduler
 List lock queues.
 
@@ -805,7 +805,7 @@ extends 
 
 getLockInfoForResource
-publicLockInfogetLockInfoForResource(LockInfo.ResourceTyperesourceType,
+publicLockInfogetLockInfoForResource(LockInfo.ResourceTyperesourceType,
http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringresourceName)
 
 Returns:
@@ -819,7 +819,7 @@ extends 
 
 clear
-publicvoidclear()
+publicvoidclear()
 Description copied from 
interface:ProcedureScheduler
 Clear current state of scheduler such that it is equivalent 
to newly created scheduler.
  Used for testing failure and recovery. To emulate server crash/restart,
@@ -832,7 +832,7 @@ extends 
 
 clearQueue
-protectedvoidclearQueue()
+protectedvoidclearQueue()
 
 
 
@@ -843,7 +843,7 @@ extends 
 
 clear
-privateT extends 

[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-07 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site f4a44f441 -> b44796ef0


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b44796ef/devapidocs/org/apache/hadoop/hbase/client/Scan.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/Scan.html 
b/devapidocs/org/apache/hadoop/hbase/client/Scan.html
index 684300f..97f06eb 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/Scan.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/Scan.html
@@ -4,7 +4,7 @@
 
 
 
-Scan (Apache HBase 2.0.0-SNAPSHOT API)
+Scan (Apache HBase 3.0.0-SNAPSHOT API)
 
 
 
@@ -12,14 +12,14 @@
 

[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-06 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site ba217cb74 -> 6ad4f21aa


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6ad4f21a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/StoreFile.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/StoreFile.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/StoreFile.html
index 846a89c..c0c4e4b 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/StoreFile.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/StoreFile.html
@@ -4,7 +4,7 @@
 
 
 
-Uses of Class org.apache.hadoop.hbase.regionserver.StoreFile (Apache 
HBase 2.0.0-SNAPSHOT API)
+Uses of Interface org.apache.hadoop.hbase.regionserver.StoreFile 
(Apache HBase 2.0.0-SNAPSHOT API)
 
 
 
@@ -12,7 +12,7 @@
 

[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-03 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 3b85ae9b2 -> c9d354248


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestReplicaWithCluster.RegionServerStoppedCopro.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestReplicaWithCluster.RegionServerStoppedCopro.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestReplicaWithCluster.RegionServerStoppedCopro.html
index ebf0532..fd80980 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestReplicaWithCluster.RegionServerStoppedCopro.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestReplicaWithCluster.RegionServerStoppedCopro.html
@@ -48,493 +48,493 @@
 040import 
org.apache.hadoop.hbase.HConstants;
 041import 
org.apache.hadoop.hbase.HTableDescriptor;
 042import 
org.apache.hadoop.hbase.RegionLocations;
-043import org.apache.hadoop.hbase.Waiter;
-044
-045import 
org.apache.hadoop.hbase.client.replication.ReplicationAdmin;
-046import 
org.apache.hadoop.hbase.coprocessor.ObserverContext;
-047import 
org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-048import 
org.apache.hadoop.hbase.coprocessor.RegionObserver;
-049import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-050import 
org.apache.hadoop.hbase.regionserver.RegionScanner;
-051import 
org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
-052import 
org.apache.hadoop.hbase.regionserver.StorefileRefresherChore;
-053import 
org.apache.hadoop.hbase.regionserver.TestHRegionServerBulkLoad;
-054import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-055import 
org.apache.hadoop.hbase.testclassification.ClientTests;
-056import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-057import 
org.apache.hadoop.hbase.util.Bytes;
-058import 
org.apache.hadoop.hbase.util.Pair;
-059import 
org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
-060import org.junit.AfterClass;
-061import org.junit.Assert;
-062import org.junit.BeforeClass;
-063import org.junit.Test;
-064import 
org.junit.experimental.categories.Category;
-065
-066@Category({MediumTests.class, 
ClientTests.class})
-067public class TestReplicaWithCluster {
-068  private static final Log LOG = 
LogFactory.getLog(TestReplicaWithCluster.class);
-069
-070  private static final int NB_SERVERS = 
3;
-071  private static final byte[] row = 
TestReplicaWithCluster.class.getName().getBytes();
-072  private static final 
HBaseTestingUtility HTU = new HBaseTestingUtility();
-073
-074  // second minicluster used in testing 
of replication
-075  private static HBaseTestingUtility 
HTU2;
-076  private static final byte[] f = 
HConstants.CATALOG_FAMILY;
-077
-078  private final static int REFRESH_PERIOD 
= 1000;
-079  private final static int 
META_SCAN_TIMEOUT_IN_MILLISEC = 200;
-080
-081  /**
-082   * This copro is used to synchronize 
the tests.
-083   */
-084  public static class SlowMeCopro 
implements RegionObserver {
-085static final AtomicLong sleepTime = 
new AtomicLong(0);
-086static final 
AtomicReferenceCountDownLatch cdl = new AtomicReference(new 
CountDownLatch(0));
-087
-088public SlowMeCopro() {
-089}
-090
-091@Override
-092public void preGetOp(final 
ObserverContextRegionCoprocessorEnvironment e,
-093 final Get get, 
final ListCell results) throws IOException {
-094
-095  if 
(e.getEnvironment().getRegion().getRegionInfo().getReplicaId() == 0) {
-096CountDownLatch latch = 
cdl.get();
-097try {
-098  if (sleepTime.get()  0) {
-099LOG.info("Sleeping for " + 
sleepTime.get() + " ms");
-100
Thread.sleep(sleepTime.get());
-101  } else if (latch.getCount() 
 0) {
-102LOG.info("Waiting for the 
counterCountDownLatch");
-103latch.await(2, 
TimeUnit.MINUTES); // To help the tests to finish.
-104if (latch.getCount()  0) 
{
-105  throw new 
RuntimeException("Can't wait more");
-106}
-107  }
-108} catch (InterruptedException e1) 
{
-109  LOG.error(e1);
-110}
-111  } else {
-112LOG.info("We're not the primary 
replicas.");
-113  }
-114}
-115  }
-116
-117  /**
-118   * This copro is used to simulate 
region server down exception for Get and Scan
-119   */
-120  public static class 
RegionServerStoppedCopro implements RegionObserver {
-121
-122public RegionServerStoppedCopro() {
-123}
-124
-125@Override
-126public void preGetOp(final 
ObserverContextRegionCoprocessorEnvironment e,
-127final Get get, final 
ListCell results) throws IOException {
-128
-129  int replicaId = 
e.getEnvironment().getRegion().getRegionInfo().getReplicaId();
+043import 
org.apache.hadoop.hbase.ServerName;
+044import 
org.apache.hadoop.hbase.TableName;

[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-01 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 8ecf5078b -> 6dd311171


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6dd31117/devapidocs/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
new file mode 100644
index 000..de62ff5
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
@@ -0,0 +1,1448 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+MergeTableRegionsProcedure (Apache HBase 2.0.0-SNAPSHOT API)
+
+
+
+
+
+var methods = 
{"i0":10,"i1":9,"i2":9,"i3":10,"i4":10,"i5":10,"i6":9,"i7":9,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":9,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10};
+var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.master.assignment
+Class 
MergeTableRegionsProcedure
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.procedure2.ProcedureTEnvironment
+
+
+org.apache.hadoop.hbase.procedure2.StateMachineProcedureMasterProcedureEnv,TState
+
+
+org.apache.hadoop.hbase.master.procedure.AbstractStateMachineTableProcedureorg.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsState
+
+
+org.apache.hadoop.hbase.master.assignment.MergeTableRegionsProcedure
+
+
+
+
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableProcedureMasterProcedureEnv, TableProcedureInterface
+
+
+
+@InterfaceAudience.Private
+public class MergeTableRegionsProcedure
+extends AbstractStateMachineTableProcedureorg.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsState
+The procedure to Merge a region in a table.
+ This procedure takes an exclusive table lock since it is working over 
multiple regions.
+ It holds the lock for the life of the procedure.
+
+
+
+
+
+
+
+
+
+
+
+Nested Class Summary
+
+
+
+
+Nested classes/interfaces inherited from 
classorg.apache.hadoop.hbase.procedure2.StateMachineProcedure
+StateMachineProcedure.Flow
+
+
+
+
+
+Nested classes/interfaces inherited from 
classorg.apache.hadoop.hbase.procedure2.Procedure
+Procedure.LockState
+
+
+
+
+
+Nested classes/interfaces inherited from 
interfaceorg.apache.hadoop.hbase.master.procedure.TableProcedureInterface
+TableProcedureInterface.TableOperationType
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields
+
+Modifier and Type
+Field and Description
+
+
+private boolean
+forcible
+
+
+private boolean
+lock
+
+
+private static 
org.apache.commons.logging.Log
+LOG
+
+
+private HRegionInfo
+mergedRegion
+
+
+private ServerName
+regionLocation
+
+
+private HRegionInfo[]
+regionsToMerge
+
+
+private http://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean
+traceEnabled
+
+
+
+
+
+
+Fields inherited from classorg.apache.hadoop.hbase.procedure2.Procedure
+NO_PROC_ID,
 NO_TIMEOUT
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+MergeTableRegionsProcedure()
+
+
+MergeTableRegionsProcedure(MasterProcedureEnvenv,
+  HRegionInfo[]regionsToMerge,
+  booleanforcible)
+
+
+MergeTableRegionsProcedure(MasterProcedureEnvenv,
+  HRegionInforegionToMergeA,
+  HRegionInforegionToMergeB)
+
+
+MergeTableRegionsProcedure(MasterProcedureEnvenv,

[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-23 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 64facdcf5 -> dab57116f


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dab57116/devapidocs/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.html 
b/devapidocs/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.html
index f088abe..ca1d939 100644
--- a/devapidocs/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.html
+++ b/devapidocs/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":9,"i1":9,"i2":9,"i3":9,"i4":9,"i5":9,"i6":9,"i7":9,"i8":9,"i9":9,"i10":9,"i11":9,"i12":9,"i13":9,"i14":9,"i15":9,"i16":9,"i17":9,"i18":9};
+var methods = 
{"i0":9,"i1":9,"i2":9,"i3":9,"i4":9,"i5":9,"i6":9,"i7":9,"i8":9,"i9":9,"i10":9,"i11":9,"i12":9,"i13":9,"i14":9,"i15":9,"i16":9,"i17":9,"i18":9,"i19":9,"i20":9,"i21":9,"i22":9,"i23":9};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public class QuotaSettingsFactory
+public class QuotaSettingsFactory
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 
 
@@ -186,35 +186,73 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
   
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotasquotas)
 
 
+(package private) static QuotaSettings
+fromSpace(TableNametable,
+ http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringnamespace,
+ 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaprotoQuota)
+
+
 (package private) static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListQuotaSettings
 fromTableQuotas(TableNametableName,

org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotasquotas)
 
-
+
 private static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListQuotaSettings
 fromThrottle(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringuserName,
 TableNametableName,
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringnamespace,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Throttlethrottle)
 
-
+
 (package private) static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListQuotaSettings
 fromUserQuotas(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringuserName,
   
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotasquotas)
 
-
+
 (package private) static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListQuotaSettings
 fromUserQuotas(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringuserName,
   http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringnamespace,
   
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotasquotas)
 
-
+
 (package private) static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListQuotaSettings
 fromUserQuotas(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringuserName,
   TableNametableName,
   
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotasquotas)
 
-
+
+static QuotaSettings
+limitNamespaceSpace(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringnamespace,
+   longsizeLimit,
+   SpaceViolationPolicyviolationPolicy)
+Creates a QuotaSettings 
object to limit the FileSystem space usage for the given
+ namespace to the given size in bytes.
+
+
+
+static QuotaSettings
+limitTableSpace(TableNametableName,
+   longsizeLimit,
+   SpaceViolationPolicyviolationPolicy)
+Creates a QuotaSettings 
object to limit the FileSystem space usage for the given table
+ to the given size in bytes.
+
+
+
+static 

[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-19 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site b78b944c1 -> c635e71ba


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestReplicaWithCluster.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestReplicaWithCluster.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestReplicaWithCluster.html
index a519d7c..ebf0532 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestReplicaWithCluster.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestReplicaWithCluster.html
@@ -47,538 +47,612 @@
 039import 
org.apache.hadoop.hbase.HColumnDescriptor;
 040import 
org.apache.hadoop.hbase.HConstants;
 041import 
org.apache.hadoop.hbase.HTableDescriptor;
-042import org.apache.hadoop.hbase.Waiter;
-043
-044import 
org.apache.hadoop.hbase.client.replication.ReplicationAdmin;
-045import 
org.apache.hadoop.hbase.coprocessor.ObserverContext;
-046import 
org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-047import 
org.apache.hadoop.hbase.coprocessor.RegionObserver;
-048import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-049import 
org.apache.hadoop.hbase.regionserver.RegionScanner;
-050import 
org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
-051import 
org.apache.hadoop.hbase.regionserver.StorefileRefresherChore;
-052import 
org.apache.hadoop.hbase.regionserver.TestHRegionServerBulkLoad;
-053import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-054import 
org.apache.hadoop.hbase.testclassification.ClientTests;
-055import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-056import 
org.apache.hadoop.hbase.util.Bytes;
-057import 
org.apache.hadoop.hbase.util.Pair;
-058import 
org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
-059import org.junit.AfterClass;
-060import org.junit.Assert;
-061import org.junit.BeforeClass;
-062import org.junit.Test;
-063import 
org.junit.experimental.categories.Category;
-064
-065@Category({MediumTests.class, 
ClientTests.class})
-066public class TestReplicaWithCluster {
-067  private static final Log LOG = 
LogFactory.getLog(TestReplicaWithCluster.class);
-068
-069  private static final int NB_SERVERS = 
3;
-070  private static final byte[] row = 
TestReplicaWithCluster.class.getName().getBytes();
-071  private static final 
HBaseTestingUtility HTU = new HBaseTestingUtility();
-072
-073  // second minicluster used in testing 
of replication
-074  private static HBaseTestingUtility 
HTU2;
-075  private static final byte[] f = 
HConstants.CATALOG_FAMILY;
-076
-077  private final static int REFRESH_PERIOD 
= 1000;
-078
-079  /**
-080   * This copro is used to synchronize 
the tests.
-081   */
-082  public static class SlowMeCopro 
implements RegionObserver {
-083static final AtomicLong sleepTime = 
new AtomicLong(0);
-084static final 
AtomicReferenceCountDownLatch cdl = new AtomicReference(new 
CountDownLatch(0));
-085
-086public SlowMeCopro() {
-087}
-088
-089@Override
-090public void preGetOp(final 
ObserverContextRegionCoprocessorEnvironment e,
-091 final Get get, 
final ListCell results) throws IOException {
-092
-093  if 
(e.getEnvironment().getRegion().getRegionInfo().getReplicaId() == 0) {
-094CountDownLatch latch = 
cdl.get();
-095try {
-096  if (sleepTime.get()  0) {
-097LOG.info("Sleeping for " + 
sleepTime.get() + " ms");
-098
Thread.sleep(sleepTime.get());
-099  } else if (latch.getCount() 
 0) {
-100LOG.info("Waiting for the 
counterCountDownLatch");
-101latch.await(2, 
TimeUnit.MINUTES); // To help the tests to finish.
-102if (latch.getCount()  0) 
{
-103  throw new 
RuntimeException("Can't wait more");
-104}
-105  }
-106} catch (InterruptedException e1) 
{
-107  LOG.error(e1);
-108}
-109  } else {
-110LOG.info("We're not the primary 
replicas.");
-111  }
-112}
-113  }
-114
-115  /**
-116   * This copro is used to simulate 
region server down exception for Get and Scan
-117   */
-118  public static class 
RegionServerStoppedCopro implements RegionObserver {
-119
-120public RegionServerStoppedCopro() {
-121}
-122
-123@Override
-124public void preGetOp(final 
ObserverContextRegionCoprocessorEnvironment e,
-125final Get get, final 
ListCell results) throws IOException {
-126
-127  int replicaId = 
e.getEnvironment().getRegion().getRegionInfo().getReplicaId();
+042import 
org.apache.hadoop.hbase.RegionLocations;
+043import org.apache.hadoop.hbase.Waiter;
+044
+045import 
org.apache.hadoop.hbase.client.replication.ReplicationAdmin;
+046import 
org.apache.hadoop.hbase.coprocessor.ObserverContext;
+047import 

[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-18 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 848b47918 -> 8b4cf63f4


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b4cf63f/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer.html
index 4fd4af0..1b2d845 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer.html
@@ -186,2271 +186,2272 @@
 178import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse;
 179import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest;
 180import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse;
-181import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-182import 
org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
-183import 
org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-184import 
org.apache.hadoop.hbase.util.Bytes;
-185import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-186import 
org.apache.hadoop.hbase.util.ForeignExceptionUtil;
-187import 
org.apache.hadoop.hbase.util.Pair;
-188
-189/**
-190 * The implementation of AsyncAdmin.
-191 */
-192@InterfaceAudience.Private
-193@InterfaceStability.Evolving
-194public class AsyncHBaseAdmin implements 
AsyncAdmin {
-195  public static final String 
FLUSH_TABLE_PROCEDURE_SIGNATURE = "flush-table-proc";
-196
-197  private static final Log LOG = 
LogFactory.getLog(AsyncHBaseAdmin.class);
-198
-199  private final AsyncConnectionImpl 
connection;
-200
-201  private final RawAsyncTable 
metaTable;
-202
-203  private final long rpcTimeoutNs;
-204
-205  private final long 
operationTimeoutNs;
-206
-207  private final long pauseNs;
-208
-209  private final int maxAttempts;
-210
-211  private final int startLogErrorsCnt;
-212
-213  private final NonceGenerator ng;
-214
-215  AsyncHBaseAdmin(AsyncConnectionImpl 
connection) {
-216this.connection = connection;
-217this.metaTable = 
connection.getRawTable(META_TABLE_NAME);
-218this.rpcTimeoutNs = 
connection.connConf.getRpcTimeoutNs();
-219this.operationTimeoutNs = 
connection.connConf.getOperationTimeoutNs();
-220this.pauseNs = 
connection.connConf.getPauseNs();
-221this.maxAttempts = 
connection.connConf.getMaxRetries();
-222this.startLogErrorsCnt = 
connection.connConf.getStartLogErrorsCnt();
-223this.ng = 
connection.getNonceGenerator();
-224  }
-225
-226  private T 
MasterRequestCallerBuilderT newMasterCaller() {
-227return 
this.connection.callerFactory.T masterRequest()
-228.rpcTimeout(rpcTimeoutNs, 
TimeUnit.NANOSECONDS)
-229
.operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS)
-230.pause(pauseNs, 
TimeUnit.NANOSECONDS).maxAttempts(maxAttempts)
-231
.startLogErrorsCnt(startLogErrorsCnt);
-232  }
-233
-234  private T 
AdminRequestCallerBuilderT newAdminCaller() {
-235return 
this.connection.callerFactory.T adminRequest()
-236.rpcTimeout(rpcTimeoutNs, 
TimeUnit.NANOSECONDS)
-237
.operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS)
-238.pause(pauseNs, 
TimeUnit.NANOSECONDS).maxAttempts(maxAttempts)
-239
.startLogErrorsCnt(startLogErrorsCnt);
-240  }
-241
-242  @FunctionalInterface
-243  private interface 
MasterRpcCallRESP, REQ {
-244void call(MasterService.Interface 
stub, HBaseRpcController controller, REQ req,
-245RpcCallbackRESP done);
-246  }
-247
-248  @FunctionalInterface
-249  private interface AdminRpcCallRESP, 
REQ {
-250void call(AdminService.Interface 
stub, HBaseRpcController controller, REQ req,
-251RpcCallbackRESP done);
-252  }
-253
-254  @FunctionalInterface
-255  private interface ConverterD, S 
{
-256D convert(S src) throws 
IOException;
-257  }
-258
-259  private PREQ, PRESP, RESP 
CompletableFutureRESP call(HBaseRpcController controller,
-260  MasterService.Interface stub, PREQ 
preq, MasterRpcCallPRESP, PREQ rpcCall,
-261  ConverterRESP, PRESP 
respConverter) {
-262CompletableFutureRESP future 
= new CompletableFuture();
-263rpcCall.call(stub, controller, preq, 
new RpcCallbackPRESP() {
-264
-265  @Override
-266  public void run(PRESP resp) {
-267if (controller.failed()) {
-268  
future.completeExceptionally(controller.getFailed());
-269} else {
-270  try {
-271
future.complete(respConverter.convert(resp));
-272  } catch (IOException e) {

[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-15 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site dd8334d37 -> f55ebeaa5


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f55ebeaa/testdevapidocs/src-html/org/apache/hadoop/hbase/ipc/AbstractTestIPC.TestFailingRpcServer.FailingConnection.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/ipc/AbstractTestIPC.TestFailingRpcServer.FailingConnection.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/ipc/AbstractTestIPC.TestFailingRpcServer.FailingConnection.html
index 8335bdf..e49c7e6 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/ipc/AbstractTestIPC.TestFailingRpcServer.FailingConnection.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/ipc/AbstractTestIPC.TestFailingRpcServer.FailingConnection.html
@@ -323,138 +323,139 @@
 315  new 
InetSocketAddress("localhost", 0), conf, scheduler);
 316}
 317
-318class FailingConnection extends 
Connection {
-319  public 
FailingConnection(SocketChannel channel, long lastContact) {
-320super(channel, lastContact);
-321  }
-322
-323  @Override
-324  public void processRequest(ByteBuff 
buf) throws IOException, InterruptedException {
-325// this will throw exception 
after the connection header is read, and an RPC is sent
-326// from client
-327throw new 
DoNotRetryIOException("Failing for test");
-328  }
-329}
-330
-331@Override
-332protected Connection 
getConnection(SocketChannel channel, long time) {
-333  return new 
FailingConnection(channel, time);
-334}
-335  }
-336
-337  /** Tests that the connection closing 
is handled by the client with outstanding RPC calls */
-338  @Test
-339  public void 
testConnectionCloseWithOutstandingRPCs() throws InterruptedException, 
IOException {
-340Configuration conf = new 
Configuration(CONF);
-341RpcServer rpcServer = new 
TestFailingRpcServer(conf);
-342try (AbstractRpcClient? 
client = createRpcClient(conf)) {
-343  rpcServer.start();
-344  BlockingInterface stub = 
newBlockingStub(client, rpcServer.getListenerAddress());
-345  EchoRequestProto param = 
EchoRequestProto.newBuilder().setMessage("hello").build();
-346  stub.echo(null, param);
-347  fail("RPC should have failed 
because connection closed");
-348} catch (ServiceException e) {
-349  LOG.info("Caught expected 
exception: " + e.toString());
-350} finally {
-351  rpcServer.stop();
-352}
-353  }
-354
-355  @Test
-356  public void testAsyncEcho() throws 
IOException {
-357Configuration conf = 
HBaseConfiguration.create();
-358RpcServer rpcServer = 
RpcServerFactory.createRpcServer(null,
-359"testRpcServer", 
Lists.newArrayList(new BlockingServiceAndInterface(
-360SERVICE, null)), new 
InetSocketAddress("localhost", 0), CONF,
-361new FifoRpcScheduler(CONF, 1));
-362try (AbstractRpcClient? 
client = createRpcClient(conf)) {
-363  rpcServer.start();
-364  Interface stub = newStub(client, 
rpcServer.getListenerAddress());
-365  int num = 10;
-366  ListHBaseRpcController 
pcrcList = new ArrayList();
-367  
ListBlockingRpcCallbackEchoResponseProto callbackList = new 
ArrayList();
-368  for (int i = 0; i  num; i++) 
{
-369HBaseRpcController pcrc = new 
HBaseRpcControllerImpl();
-370
BlockingRpcCallbackEchoResponseProto done = new 
BlockingRpcCallback();
-371stub.echo(pcrc, 
EchoRequestProto.newBuilder().setMessage("hello-" + i).build(), done);
-372pcrcList.add(pcrc);
-373callbackList.add(done);
-374  }
-375  for (int i = 0; i  num; i++) 
{
-376HBaseRpcController pcrc = 
pcrcList.get(i);
-377assertFalse(pcrc.failed());
-378assertNull(pcrc.cellScanner());
-379assertEquals("hello-" + i, 
callbackList.get(i).get().getMessage());
-380  }
-381} finally {
-382  rpcServer.stop();
-383}
-384  }
-385
-386  @Test
-387  public void testAsyncRemoteError() 
throws IOException {
-388AbstractRpcClient? client = 
createRpcClient(CONF);
-389RpcServer rpcServer = 
RpcServerFactory.createRpcServer(null,
-390"testRpcServer", 
Lists.newArrayList(new BlockingServiceAndInterface(
-391SERVICE, null)), new 
InetSocketAddress("localhost", 0), CONF,
-392new FifoRpcScheduler(CONF, 1));
-393try {
-394  rpcServer.start();
-395  Interface stub = newStub(client, 
rpcServer.getListenerAddress());
-396  
BlockingRpcCallbackEmptyResponseProto callback = new 
BlockingRpcCallback();
-397  HBaseRpcController pcrc = new 
HBaseRpcControllerImpl();
-398  stub.error(pcrc, 
EmptyRequestProto.getDefaultInstance(), callback);
-399  assertNull(callback.get());
-400  assertTrue(pcrc.failed());
-401  LOG.info("Caught expected 
exception: " + pcrc.getFailed());
-402  IOException ioe = 

[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-13 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 80faeee01 -> 8e0a51670


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8e0a5167/testdevapidocs/src-html/org/apache/hadoop/hbase/master/balancer/LoadBalancerPerformanceEvaluation.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/balancer/LoadBalancerPerformanceEvaluation.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/balancer/LoadBalancerPerformanceEvaluation.html
new file mode 100644
index 000..b040a50
--- /dev/null
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/balancer/LoadBalancerPerformanceEvaluation.html
@@ -0,0 +1,252 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+Source code
+
+
+
+
+001/*
+002 * Licensed to the Apache Software 
Foundation (ASF) under one
+003 * or more contributor license 
agreements.  See the NOTICE file
+004 * distributed with this work for 
additional information
+005 * regarding copyright ownership.  The 
ASF licenses this file
+006 * to you under the Apache License, 
Version 2.0 (the
+007 * "License"); you may not use this file 
except in compliance
+008 * with the License.  You may obtain a 
copy of the License at
+009 *
+010 * 
http://www.apache.org/licenses/LICENSE-2.0
+011 *
+012 * Unless required by applicable law or 
agreed to in writing, software
+013 * distributed under the License is 
distributed on an "AS IS" BASIS,
+014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+015 * See the License for the specific 
language governing permissions and
+016 * limitations under the License.
+017 */
+018
+019package 
org.apache.hadoop.hbase.master.balancer;
+020
+021import 
com.google.common.base.Preconditions;
+022import 
com.google.common.base.Stopwatch;
+023import 
org.apache.commons.cli.CommandLine;
+024import org.apache.commons.cli.Option;
+025import org.apache.commons.logging.Log;
+026import 
org.apache.commons.logging.LogFactory;
+027import 
org.apache.hadoop.hbase.HBaseCommonTestingUtility;
+028import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
+029import 
org.apache.hadoop.hbase.HConstants;
+030import 
org.apache.hadoop.hbase.HRegionInfo;
+031import 
org.apache.hadoop.hbase.ServerName;
+032import 
org.apache.hadoop.hbase.TableName;
+033import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+034import 
org.apache.hadoop.hbase.master.LoadBalancer;
+035import 
org.apache.hadoop.hbase.util.AbstractHBaseTool;
+036import 
org.apache.hadoop.hbase.util.Bytes;
+037
+038import java.io.IOException;
+039import java.util.ArrayList;
+040import java.util.Collections;
+041import java.util.HashMap;
+042import java.util.List;
+043import java.util.Map;
+044
+045/**
+046 * Tool to test performance of different 
{@link org.apache.hadoop.hbase.master.LoadBalancer}
+047 * implementations.
+048 * Example command:
+049 * $ bin/hbase 
org.apache.hadoop.hbase.master.balancer.LoadBalancerPerformanceEvaluation
+050 *   -regions 1000 -servers 100
+051 *   -load_balancer 
org.apache.hadoop.hbase.master.balancer.SimpleLoadBalancer
+052 */
+053@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
+054public class 
LoadBalancerPerformanceEvaluation extends AbstractHBaseTool {
+055  private static final Log LOG =
+056  
LogFactory.getLog(LoadBalancerPerformanceEvaluation.class.getName());
+057
+058  protected static final 
HBaseCommonTestingUtility UTIL = new HBaseCommonTestingUtility();
+059
+060  private static final int 
DEFAULT_NUM_REGIONS = 100;
+061  private static Option NUM_REGIONS_OPT = 
new Option("regions", true,
+062  "Number of regions to consider by 
load balancer. Default: " + DEFAULT_NUM_REGIONS);
+063
+064  private static final int 
DEFAULT_NUM_SERVERS = 1000;
+065  private static Option NUM_SERVERS_OPT = 
new Option("servers", true,
+066  "Number of servers to consider by 
load balancer. Default: " + DEFAULT_NUM_SERVERS);
+067
+068  private static final String 
DEFAULT_LOAD_BALANCER =
+069  
"org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer";
+070  private static Option LOAD_BALANCER_OPT 
= new Option("load_balancer", true,
+071  "Type of Load Balancer to use. 
Default: " + DEFAULT_LOAD_BALANCER);
+072
+073  private int numRegions;
+074  private int numServers;
+075  private String loadBalancerType;
+076  private Class? 
loadBalancerClazz;
+077
+078  private LoadBalancer loadBalancer;
+079
+080  // data
+081  private ListServerName 
servers;
+082  private ListHRegionInfo 
regions;
+083  private MapHRegionInfo, 
ServerName regionServerMap;
+084  private MapServerName, 
ListHRegionInfo serverRegionMap;
+085
+086  // Non-default configurations.
+087  private void setupConf() {
+088
conf.setClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, loadBalancerClazz, 
LoadBalancer.class);
+089loadBalancer = 
LoadBalancerFactory.getLoadBalancer(conf);
+090  }
+091
+092  private void 

[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-12 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site d5aa6a181 -> 709b8fccb


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/709b8fcc/devapidocs/src-html/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.html
index 40e88c1..740d74a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.html
@@ -688,7 +688,7 @@
 680   */
 681  private long cloneSnapshot(final 
SnapshotDescription reqSnapshot, final TableName tableName,
 682  final SnapshotDescription snapshot, 
final HTableDescriptor snapshotTableDesc,
-683  final NonceKey nonceKey, final 
boolean restoreAcl) throws IOException {
+683  final NonceKey nonceKey) throws 
IOException {
 684MasterCoprocessorHost cpHost = 
master.getMasterCoprocessorHost();
 685HTableDescriptor htd = new 
HTableDescriptor(tableName, snapshotTableDesc);
 686if (cpHost != null) {
@@ -696,7 +696,7 @@
 688}
 689long procId;
 690try {
-691  procId = cloneSnapshot(snapshot, 
htd, nonceKey, restoreAcl);
+691  procId = cloneSnapshot(snapshot, 
htd, nonceKey);
 692} catch (IOException e) {
 693  LOG.error("Exception occurred while 
cloning the snapshot " + snapshot.getName()
 694+ " as table " + 
tableName.getNameAsString(), e);
@@ -720,7 +720,7 @@
 712   * @return procId the ID of the clone 
snapshot procedure
 713   */
 714  synchronized long cloneSnapshot(final 
SnapshotDescription snapshot,
-715  final HTableDescriptor 
hTableDescriptor, final NonceKey nonceKey, final boolean restoreAcl)
+715  final HTableDescriptor 
hTableDescriptor, final NonceKey nonceKey)
 716  throws HBaseSnapshotException {
 717TableName tableName = 
hTableDescriptor.getTableName();
 718
@@ -736,8 +736,8 @@
 728
 729try {
 730  long procId = 
master.getMasterProcedureExecutor().submitProcedure(
-731new 
CloneSnapshotProcedure(master.getMasterProcedureExecutor().getEnvironment(),
-732hTableDescriptor, snapshot, 
restoreAcl),
+731new CloneSnapshotProcedure(
+732  
master.getMasterProcedureExecutor().getEnvironment(), hTableDescriptor, 
snapshot),
 733nonceKey);
 734  
this.restoreTableToProcIdMap.put(tableName, procId);
 735  return procId;
@@ -755,8 +755,8 @@
 747   * @param nonceKey unique identifier to 
prevent duplicated RPC
 748   * @throws IOException
 749   */
-750  public long 
restoreOrCloneSnapshot(final SnapshotDescription reqSnapshot, final NonceKey 
nonceKey,
-751  final boolean restoreAcl) throws 
IOException {
+750  public long 
restoreOrCloneSnapshot(final SnapshotDescription reqSnapshot, final NonceKey 
nonceKey)
+751  throws IOException {
 752FileSystem fs = 
master.getMasterFileSystem().getFileSystem();
 753Path snapshotDir = 
SnapshotDescriptionUtils.getCompletedSnapshotDir(reqSnapshot, rootDir);
 754
@@ -787,394 +787,393 @@
 779if 
(MetaTableAccessor.tableExists(master.getConnection(), tableName)) {
 780  procId = 
restoreSnapshot(reqSnapshot, tableName, snapshot, snapshotTableDesc, 
nonceKey);
 781} else {
-782  procId =
-783  cloneSnapshot(reqSnapshot, 
tableName, snapshot, snapshotTableDesc, nonceKey, restoreAcl);
-784}
-785return procId;
-786  }
-787
-788  /**
-789   * Restore the specified snapshot.
-790   * The restore will fail if the 
destination table has a snapshot or restore in progress.
-791   *
-792   * @param reqSnapshot Snapshot 
Descriptor from request
-793   * @param tableName table to restore
-794   * @param snapshot Snapshot 
Descriptor
-795   * @param snapshotTableDesc Table 
Descriptor
-796   * @param nonceKey unique identifier to 
prevent duplicated RPC
-797   * @return procId the ID of the restore 
snapshot procedure
-798   * @throws IOException
-799   */
-800  private long restoreSnapshot(final 
SnapshotDescription reqSnapshot, final TableName tableName,
-801  final SnapshotDescription snapshot, 
final HTableDescriptor snapshotTableDesc,
-802  final NonceKey nonceKey) throws 
IOException {
-803MasterCoprocessorHost cpHost = 
master.getMasterCoprocessorHost();
-804
-805if 
(master.getTableStateManager().isTableState(
-806  
TableName.valueOf(snapshot.getTable()), TableState.State.ENABLED)) {
-807  throw new 
UnsupportedOperationException("Table '" +
-808
TableName.valueOf(snapshot.getTable()) + "' must be disabled in order to " +
-809"perform a restore 
operation.");
-810}
-811
-812// call Coprocessor pre hook
-813if (cpHost != null) {
-814  
cpHost.preRestoreSnapshot(reqSnapshot, snapshotTableDesc);
-815}
-816
-817long procId;
-818

[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-11 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site cac6146f8 -> 1241ee85f


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.EnvironmentEdgeForMemstoreTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.EnvironmentEdgeForMemstoreTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.EnvironmentEdgeForMemstoreTest.html
index 6db2655..e2c4f9d 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.EnvironmentEdgeForMemstoreTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.EnvironmentEdgeForMemstoreTest.html
@@ -170,939 +170,938 @@
 162Scan scan = new Scan();
 163ListCell result = new 
ArrayList();
 164Configuration conf = 
HBaseConfiguration.create();
-165ScanInfo scanInfo =
-166new ScanInfo(conf, null, 0, 1, 
HConstants.LATEST_TIMESTAMP, KeepDeletedCells.FALSE, 0,
-167
this.memstore.getComparator());
-168ScanType scanType = 
ScanType.USER_SCAN;
-169StoreScanner s = new 
StoreScanner(scan, scanInfo, scanType, null, memstorescanners);
-170int count = 0;
-171try {
-172  while (s.next(result)) {
-173LOG.info(result);
-174count++;
-175// Row count is same as column 
count.
-176assertEquals(rowCount, 
result.size());
-177result.clear();
-178  }
-179} finally {
-180  s.close();
-181}
-182assertEquals(rowCount, count);
-183for (KeyValueScanner scanner : 
memstorescanners) {
-184  scanner.close();
-185}
-186
-187memstorescanners = 
this.memstore.getScanners(mvcc.getReadPoint());
-188// Now assert can count same number 
even if a snapshot mid-scan.
-189s = new StoreScanner(scan, scanInfo, 
scanType, null, memstorescanners);
-190count = 0;
-191try {
-192  while (s.next(result)) {
-193LOG.info(result);
-194// Assert the stuff is coming out 
in right order.
-195
assertTrue(CellUtil.matchingRow(result.get(0), Bytes.toBytes(count)));
-196count++;
-197// Row count is same as column 
count.
-198assertEquals(rowCount, 
result.size());
-199if (count == 2) {
-200  this.memstore.snapshot();
-201  LOG.info("Snapshotted");
-202}
-203result.clear();
-204  }
-205} finally {
-206  s.close();
-207}
-208assertEquals(rowCount, count);
-209for (KeyValueScanner scanner : 
memstorescanners) {
-210  scanner.close();
-211}
-212memstorescanners = 
this.memstore.getScanners(mvcc.getReadPoint());
-213// Assert that new values are seen in 
kvset as we scan.
-214long ts = 
System.currentTimeMillis();
-215s = new StoreScanner(scan, scanInfo, 
scanType, null, memstorescanners);
-216count = 0;
-217int snapshotIndex = 5;
-218try {
-219  while (s.next(result)) {
-220LOG.info(result);
-221// Assert the stuff is coming out 
in right order.
-222
assertTrue(CellUtil.matchingRow(result.get(0), Bytes.toBytes(count)));
-223// Row count is same as column 
count.
-224assertEquals("count=" + count + 
", result=" + result, rowCount, result.size());
-225count++;
-226if (count == snapshotIndex) {
-227  MemStoreSnapshot snapshot = 
this.memstore.snapshot();
-228  
this.memstore.clearSnapshot(snapshot.getId());
-229  // Added more rows into kvset.  
But the scanner wont see these rows.
-230  addRows(this.memstore, ts);
-231  LOG.info("Snapshotted, cleared 
it and then added values (which wont be seen)");
-232}
-233result.clear();
-234  }
-235} finally {
-236  s.close();
-237}
-238assertEquals(rowCount, count);
-239  }
-240
-241  /**
-242   * A simple test which verifies the 3 
possible states when scanning across snapshot.
-243   * @throws IOException
-244   * @throws CloneNotSupportedException
-245   */
-246  @Test
-247  public void testScanAcrossSnapshot2() 
throws IOException, CloneNotSupportedException {
-248// we are going to the scanning 
across snapshot with two kvs
-249// kv1 should always be returned 
before kv2
-250final byte[] one = 
Bytes.toBytes(1);
-251final byte[] two = 
Bytes.toBytes(2);
-252final byte[] f = 
Bytes.toBytes("f");
-253final byte[] q = 
Bytes.toBytes("q");
-254final byte[] v = Bytes.toBytes(3);
-255
-256final KeyValue kv1 = new 
KeyValue(one, f, q, v);
-257final KeyValue kv2 = new 
KeyValue(two, f, q, v);
-258
-259// use case 1: both kvs in kvset
-260this.memstore.add(kv1.clone(), 
null);
-261this.memstore.add(kv2.clone(), 
null);
-262verifyScanAcrossSnapshot2(kv1, 
kv2);
-263
-264// use case 2: both 

[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-10 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 8678c699d -> dd7176bfc


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dd7176bf/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
index 17bc96c..1642d61 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
@@ -2317,7 +2317,7 @@
 2309  }
 2310
 2311  public long restoreSnapshot(final 
SnapshotDescription snapshotDesc,
-2312  final long nonceGroup, final long 
nonce) throws IOException {
+2312  final long nonceGroup, final long 
nonce, final boolean restoreAcl) throws IOException {
 2313checkInitialized();
 2314
getSnapshotManager().checkSnapshotSupport();
 2315
@@ -2329,1031 +2329,1032 @@
 2321new 
MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
 2322  @Override
 2323  protected void run() throws 
IOException {
-2324
setProcId(getSnapshotManager().restoreOrCloneSnapshot(snapshotDesc, 
getNonceKey()));
-2325  }
-2326
-2327  @Override
-2328  protected String getDescription() 
{
-2329return 
"RestoreSnapshotProcedure";
-2330  }
-2331});
-2332  }
-2333
-2334  @Override
-2335  public void checkTableModifiable(final 
TableName tableName)
-2336  throws IOException, 
TableNotFoundException, TableNotDisabledException {
-2337if (isCatalogTable(tableName)) {
-2338  throw new IOException("Can't 
modify catalog tables");
-2339}
-2340if 
(!MetaTableAccessor.tableExists(getConnection(), tableName)) {
-2341  throw new 
TableNotFoundException(tableName);
-2342}
-2343if 
(!getTableStateManager().isTableState(tableName, TableState.State.DISABLED)) 
{
-2344  throw new 
TableNotDisabledException(tableName);
-2345}
-2346  }
-2347
-2348  /**
-2349   * @return cluster status
-2350   */
-2351  public ClusterStatus 
getClusterStatus() throws InterruptedIOException {
-2352// Build Set of backup masters from 
ZK nodes
-2353ListString 
backupMasterStrings;
-2354try {
-2355  backupMasterStrings = 
ZKUtil.listChildrenNoWatch(this.zooKeeper,
-2356
this.zooKeeper.znodePaths.backupMasterAddressesZNode);
-2357} catch (KeeperException e) {
-2358  
LOG.warn(this.zooKeeper.prefix("Unable to list backup servers"), e);
-2359  backupMasterStrings = null;
-2360}
-2361
-2362ListServerName backupMasters 
= null;
-2363if (backupMasterStrings != null 
 !backupMasterStrings.isEmpty()) {
-2364  backupMasters = new 
ArrayList(backupMasterStrings.size());
-2365  for (String s: 
backupMasterStrings) {
-2366try {
-2367  byte [] bytes;
-2368  try {
-2369bytes = 
ZKUtil.getData(this.zooKeeper, ZKUtil.joinZNode(
-2370
this.zooKeeper.znodePaths.backupMasterAddressesZNode, s));
-2371  } catch (InterruptedException 
e) {
-2372throw new 
InterruptedIOException();
-2373  }
-2374  if (bytes != null) {
-2375ServerName sn;
-2376try {
-2377  sn = 
ProtobufUtil.parseServerNameFrom(bytes);
-2378} catch 
(DeserializationException e) {
-2379  LOG.warn("Failed parse, 
skipping registering backup server", e);
-2380  continue;
-2381}
-2382backupMasters.add(sn);
-2383  }
-2384} catch (KeeperException e) {
-2385  
LOG.warn(this.zooKeeper.prefix("Unable to get information about " +
-2386   "backup servers"), 
e);
-2387}
-2388  }
-2389  Collections.sort(backupMasters, 
new ComparatorServerName() {
-2390@Override
-2391public int compare(ServerName 
s1, ServerName s2) {
-2392  return 
s1.getServerName().compareTo(s2.getServerName());
-2393}});
-2394}
-2395
-2396String clusterId = fileSystemManager 
!= null ?
-2397  
fileSystemManager.getClusterId().toString() : null;
-2398SetRegionState 
regionsInTransition = assignmentManager != null ?
-2399  
assignmentManager.getRegionStates().getRegionsInTransition() : null;
-2400String[] coprocessors = cpHost != 
null ? getMasterCoprocessors() : null;
-2401boolean balancerOn = 
loadBalancerTracker != null ?
-2402  loadBalancerTracker.isBalancerOn() 
: false;
-2403MapServerName, ServerLoad 
onlineServers = null;
-2404SetServerName deadServers = 
null;
-2405if (serverManager != null) {
-2406  deadServers = 
serverManager.getDeadServers().copyServerNames();
-2407  onlineServers = 
serverManager.getOnlineServers();
-2408}
-2409return new 

[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-09 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site baff48117 -> 7ef4c5a9a


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7ef4c5a9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerShippedCallBack.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerShippedCallBack.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerShippedCallBack.html
index 17d1bcb..6dfd1d4 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerShippedCallBack.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerShippedCallBack.html
@@ -35,863 +35,863 @@
 027import java.net.InetSocketAddress;
 028import java.net.UnknownHostException;
 029import java.nio.ByteBuffer;
-030import java.util.ArrayList;
-031import java.util.Collections;
-032import java.util.HashMap;
-033import java.util.Iterator;
-034import java.util.List;
-035import java.util.Map;
-036import java.util.Map.Entry;
-037import java.util.NavigableMap;
-038import java.util.Set;
-039import java.util.TreeSet;
-040import 
java.util.concurrent.ConcurrentHashMap;
-041import 
java.util.concurrent.ConcurrentMap;
-042import 
java.util.concurrent.atomic.AtomicLong;
-043import 
java.util.concurrent.atomic.LongAdder;
-044
-045import 
org.apache.commons.lang.mutable.MutableObject;
-046import org.apache.commons.logging.Log;
-047import 
org.apache.commons.logging.LogFactory;
-048import 
org.apache.hadoop.conf.Configuration;
-049import org.apache.hadoop.fs.Path;
-050import 
org.apache.hadoop.hbase.ByteBufferCell;
-051import org.apache.hadoop.hbase.Cell;
-052import 
org.apache.hadoop.hbase.CellScannable;
-053import 
org.apache.hadoop.hbase.CellScanner;
-054import 
org.apache.hadoop.hbase.CellUtil;
-055import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-056import 
org.apache.hadoop.hbase.DroppedSnapshotException;
-057import 
org.apache.hadoop.hbase.HBaseIOException;
-058import 
org.apache.hadoop.hbase.HConstants;
-059import 
org.apache.hadoop.hbase.HRegionInfo;
-060import 
org.apache.hadoop.hbase.HTableDescriptor;
-061import 
org.apache.hadoop.hbase.MultiActionResultTooLarge;
-062import 
org.apache.hadoop.hbase.NotServingRegionException;
-063import 
org.apache.hadoop.hbase.ServerName;
-064import 
org.apache.hadoop.hbase.TableName;
-065import 
org.apache.hadoop.hbase.UnknownScannerException;
-066import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-067import 
org.apache.hadoop.hbase.client.Append;
-068import 
org.apache.hadoop.hbase.client.ConnectionUtils;
-069import 
org.apache.hadoop.hbase.client.Delete;
-070import 
org.apache.hadoop.hbase.client.Durability;
-071import 
org.apache.hadoop.hbase.client.Get;
-072import 
org.apache.hadoop.hbase.client.Increment;
-073import 
org.apache.hadoop.hbase.client.Mutation;
-074import 
org.apache.hadoop.hbase.client.Put;
-075import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-076import 
org.apache.hadoop.hbase.client.Result;
-077import 
org.apache.hadoop.hbase.client.RowMutations;
-078import 
org.apache.hadoop.hbase.client.Scan;
-079import 
org.apache.hadoop.hbase.client.VersionInfoUtil;
-080import 
org.apache.hadoop.hbase.conf.ConfigurationObserver;
-081import 
org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
-082import 
org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException;
-083import 
org.apache.hadoop.hbase.exceptions.ScannerResetException;
-084import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-085import 
org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
-086import 
org.apache.hadoop.hbase.ipc.HBaseRPCErrorHandler;
-087import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-088import 
org.apache.hadoop.hbase.ipc.PriorityFunction;
-089import 
org.apache.hadoop.hbase.ipc.QosPriority;
-090import 
org.apache.hadoop.hbase.ipc.RpcCallContext;
-091import 
org.apache.hadoop.hbase.ipc.RpcCallback;
-092import 
org.apache.hadoop.hbase.ipc.RpcServer;
-093import 
org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface;
-094import 
org.apache.hadoop.hbase.ipc.RpcServerFactory;
-095import 
org.apache.hadoop.hbase.ipc.RpcServerInterface;
-096import 
org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
-097import 
org.apache.hadoop.hbase.ipc.ServerRpcController;
-098import 
org.apache.hadoop.hbase.master.MasterRpcServices;
-099import 
org.apache.hadoop.hbase.quotas.OperationQuota;
-100import 
org.apache.hadoop.hbase.quotas.RegionServerQuotaManager;
-101import 
org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl;
-102import 
org.apache.hadoop.hbase.regionserver.Leases.Lease;
-103import 
org.apache.hadoop.hbase.regionserver.Leases.LeaseStillHeldException;
-104import 
org.apache.hadoop.hbase.regionserver.Region.Operation;
-105import 

[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-05-03 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site f8a136728 -> 31df46740


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/31df4674/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html
index 0610ad0..8b22aa1 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html
@@ -2665,1591 +2665,1570 @@
 2657  @Override
 2658  public byte[] 
execProcedureWithRet(String signature, String instance, MapString, 
String props)
 2659  throws IOException {
-2660ProcedureDescription.Builder builder 
= ProcedureDescription.newBuilder();
-2661
builder.setSignature(signature).setInstance(instance);
-2662for (EntryString, String 
entry : props.entrySet()) {
-2663  NameStringPair pair = 
NameStringPair.newBuilder().setName(entry.getKey())
-2664  
.setValue(entry.getValue()).build();
-2665  builder.addConfiguration(pair);
-2666}
-2667
-2668final ExecProcedureRequest request = 
ExecProcedureRequest.newBuilder()
-2669
.setProcedure(builder.build()).build();
-2670// run the procedure on the master
-2671ExecProcedureResponse response = 
executeCallable(new MasterCallableExecProcedureResponse(
-2672getConnection(), 
getRpcControllerFactory()) {
-2673  @Override
-2674  protected ExecProcedureResponse 
rpcCall() throws Exception {
-2675return 
master.execProcedureWithRet(getRpcController(), request);
-2676  }
-2677});
-2678
-2679return response.hasReturnData() ? 
response.getReturnData().toByteArray() : null;
-2680  }
-2681
-2682  @Override
-2683  public void execProcedure(String 
signature, String instance, MapString, String props)
-2684  throws IOException {
-2685ProcedureDescription.Builder builder 
= ProcedureDescription.newBuilder();
-2686
builder.setSignature(signature).setInstance(instance);
-2687for (EntryString, String 
entry : props.entrySet()) {
-2688  NameStringPair pair = 
NameStringPair.newBuilder().setName(entry.getKey())
-2689  
.setValue(entry.getValue()).build();
-2690  builder.addConfiguration(pair);
-2691}
-2692
-2693final ExecProcedureRequest request = 
ExecProcedureRequest.newBuilder()
-2694
.setProcedure(builder.build()).build();
-2695// run the procedure on the master
-2696ExecProcedureResponse response = 
executeCallable(new MasterCallableExecProcedureResponse(
-2697getConnection(), 
getRpcControllerFactory()) {
-2698  @Override
-2699  protected ExecProcedureResponse 
rpcCall() throws Exception {
-2700return 
master.execProcedure(getRpcController(), request);
-2701  }
-2702});
-2703
-2704long start = 
EnvironmentEdgeManager.currentTime();
-2705long max = 
response.getExpectedTimeout();
-2706long maxPauseTime = max / 
this.numRetries;
-2707int tries = 0;
-2708LOG.debug("Waiting a max of " + max 
+ " ms for procedure '" +
-2709signature + " : " + instance + 
"'' to complete. (max " + maxPauseTime + " ms per retry)");
-2710boolean done = false;
-2711while (tries == 0
-2712|| 
((EnvironmentEdgeManager.currentTime() - start)  max  !done)) {
-2713  try {
-2714// sleep a backoff = 
pauseTime amount
-2715long sleep = 
getPauseTime(tries++);
-2716sleep = sleep  maxPauseTime 
? maxPauseTime : sleep;
-2717LOG.debug("(#" + tries + ") 
Sleeping: " + sleep +
-2718  "ms while waiting for 
procedure completion.");
-2719Thread.sleep(sleep);
-2720  } catch (InterruptedException e) 
{
-2721throw 
(InterruptedIOException)new 
InterruptedIOException("Interrupted").initCause(e);
-2722  }
-2723  LOG.debug("Getting current status 
of procedure from master...");
-2724  done = 
isProcedureFinished(signature, instance, props);
-2725}
-2726if (!done) {
-2727  throw new IOException("Procedure 
'" + signature + " : " + instance
-2728  + "' wasn't completed in 
expectedTime:" + max + " ms");
-2729}
+2660ProcedureDescription desc = 
ProtobufUtil.buildProcedureDescription(signature, instance, props);
+2661final ExecProcedureRequest request 
=
+2662
ExecProcedureRequest.newBuilder().setProcedure(desc).build();
+2663// run the procedure on the master
+2664ExecProcedureResponse response = 
executeCallable(
+2665  new 
MasterCallableExecProcedureResponse(getConnection(), 
getRpcControllerFactory()) {
+2666@Override
+2667protected ExecProcedureResponse 
rpcCall() throws Exception {
+2668  return 
master.execProcedureWithRet(getRpcController(), 

[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-04-28 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 4f60e1ab0 -> 6f2e75f27


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6f2e75f2/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.html
index a665139..3fedd0b 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.html
@@ -879,1201 +879,1221 @@
 871// includes the header size also.
 872private int 
unencodedDataSizeWritten;
 873
-874/**
-875 * Bytes to be written to the file 
system, including the header. Compressed
-876 * if compression is turned on. It 
also includes the checksum data that
-877 * immediately follows the block 
data. (header + data + checksums)
-878 */
-879private ByteArrayOutputStream 
onDiskBlockBytesWithHeader;
-880
-881/**
-882 * The size of the checksum data on 
disk. It is used only if data is
-883 * not compressed. If data is 
compressed, then the checksums are already
-884 * part of onDiskBytesWithHeader. If 
data is uncompressed, then this
-885 * variable stores the checksum data 
for this block.
-886 */
-887private byte[] onDiskChecksum = 
HConstants.EMPTY_BYTE_ARRAY;
-888
-889/**
-890 * Current block's start offset in 
the {@link HFile}. Set in
-891 * {@link 
#writeHeaderAndData(FSDataOutputStream)}.
-892 */
-893private long startOffset;
-894
-895/**
-896 * Offset of previous block by block 
type. Updated when the next block is
-897 * started.
-898 */
-899private long[] prevOffsetByType;
-900
-901/** The offset of the previous block 
of the same type */
-902private long prevOffset;
-903/** Meta data that holds information 
about the hfileblock**/
-904private HFileContext fileContext;
-905
-906/**
-907 * @param dataBlockEncoder data block 
encoding algorithm to use
-908 */
-909public Writer(HFileDataBlockEncoder 
dataBlockEncoder, HFileContext fileContext) {
-910  if 
(fileContext.getBytesPerChecksum()  HConstants.HFILEBLOCK_HEADER_SIZE) {
-911throw new 
RuntimeException("Unsupported value of bytesPerChecksum. " +
-912" Minimum is " + 
HConstants.HFILEBLOCK_HEADER_SIZE + " but the configured value is " +
-913
fileContext.getBytesPerChecksum());
-914  }
-915  this.dataBlockEncoder = 
dataBlockEncoder != null?
-916  dataBlockEncoder: 
NoOpDataBlockEncoder.INSTANCE;
-917  this.dataBlockEncodingCtx = 
this.dataBlockEncoder.
-918  
newDataBlockEncodingContext(HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext);
-919  // TODO: This should be lazily 
instantiated since we usually do NOT need this default encoder
-920  this.defaultBlockEncodingCtx = new 
HFileBlockDefaultEncodingContext(null,
-921  
HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext);
-922  // TODO: Set BAOS initial size. Use 
fileContext.getBlocksize() and add for header/checksum
-923  baosInMemory = new 
ByteArrayOutputStream();
-924  prevOffsetByType = new 
long[BlockType.values().length];
-925  for (int i = 0; i  
prevOffsetByType.length; ++i) {
-926prevOffsetByType[i] = UNSET;
-927  }
-928  // TODO: Why fileContext saved away 
when we have dataBlockEncoder and/or
-929  // defaultDataBlockEncoder?
-930  this.fileContext = fileContext;
-931}
-932
-933/**
-934 * Starts writing into the block. The 
previous block's data is discarded.
-935 *
-936 * @return the stream the user can 
write their data into
-937 * @throws IOException
-938 */
-939DataOutputStream 
startWriting(BlockType newBlockType)
-940throws IOException {
-941  if (state == State.BLOCK_READY 
 startOffset != -1) {
-942// We had a previous block that 
was written to a stream at a specific
-943// offset. Save that offset as 
the last offset of a block of that type.
-944
prevOffsetByType[blockType.getId()] = startOffset;
-945  }
-946
-947  startOffset = -1;
-948  blockType = newBlockType;
-949
-950  baosInMemory.reset();
-951  
baosInMemory.write(HConstants.HFILEBLOCK_DUMMY_HEADER);
-952
-953  state = State.WRITING;
-954
-955  // We will compress it later in 
finishBlock()
-956  userDataStream = new 
ByteBufferWriterDataOutputStream(baosInMemory);
-957  if (newBlockType == BlockType.DATA) 
{
-958
this.dataBlockEncoder.startBlockEncoding(dataBlockEncodingCtx, 
userDataStream);
-959  }
-960  this.unencodedDataSizeWritten = 
0;
-961  return userDataStream;
-962}
-963
-964/**
-965 * Writes the Cell to this block
-966 * @param cell
-967 * @throws IOException
-968 */
-969

[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-04-27 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 4b833a624 -> efd0601e8


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/efd0601e/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/MasterCoprocessorEnvironment.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/MasterCoprocessorEnvironment.html
 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/MasterCoprocessorEnvironment.html
index 8f4a81a..a35d286 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/MasterCoprocessorEnvironment.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/MasterCoprocessorEnvironment.html
@@ -487,33 +487,40 @@
 
 
 default void
+MasterObserver.postListLocks(ObserverContextMasterCoprocessorEnvironmentctx,
+ http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListLockInfolockInfoList)
+Called after a listLocks request has been processed.
+
+
+
+default void
 MasterObserver.postListNamespaceDescriptors(ObserverContextMasterCoprocessorEnvironmentctx,
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListNamespaceDescriptordescriptors)
 Called after a listNamespaceDescriptors request has been 
processed.
 
 
-
+
 default void
 MasterObserver.postListProcedures(ObserverContextMasterCoprocessorEnvironmentctx,
   http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListProcedureInfoprocInfoList)
 Called after a listProcedures request has been 
processed.
 
 
-
+
 default void
 MasterObserver.postListReplicationPeers(ObserverContextMasterCoprocessorEnvironmentctx,
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringregex)
 Called after list replication peers.
 
 
-
+
 default void
 MasterObserver.postListSnapshot(ObserverContextMasterCoprocessorEnvironmentctx,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescriptionsnapshot)
 Called after listSnapshots request has been processed.
 
 
-
+
 default void
 MasterObserver.postLockHeartbeat(ObserverContextMasterCoprocessorEnvironmentctx,
  LockProcedureproc,
@@ -521,14 +528,14 @@
 Called after heartbeat to a lock.
 
 
-
+
 default void
 MasterObserver.postMergeRegions(ObserverContextMasterCoprocessorEnvironmentc,
 HRegionInfo[]regionsToMerge)
 called after merge regions request.
 
 
-
+
 default void
 MasterObserver.postMergeRegionsCommitAction(ObserverContextMasterCoprocessorEnvironmentctx,
 HRegionInfo[]regionsToMerge,
@@ -536,7 +543,7 @@
 This will be called after PONR step as part of regions 
merge transaction.
 
 
-
+
 default void
 MasterObserver.postModifyColumn(ObserverContextMasterCoprocessorEnvironmentctx,
 TableNametableName,
@@ -548,7 +555,7 @@
 
 
 
-
+
 default void
 MasterObserver.postModifyColumnFamily(ObserverContextMasterCoprocessorEnvironmentctx,
   TableNametableName,
@@ -556,7 +563,7 @@
 Called after the column family has been updated.
 
 
-
+
 default void
 MasterObserver.postModifyColumnHandler(ObserverContextMasterCoprocessorEnvironmentctx,
TableNametableName,
@@ -568,14 +575,14 @@
 
 
 
-
+
 default void
 MasterObserver.postModifyNamespace(ObserverContextMasterCoprocessorEnvironmentctx,
NamespaceDescriptorns)
 Called after the modifyNamespace operation has been 
requested.
 
 
-
+
 default void
 MasterObserver.postModifyTable(ObserverContextMasterCoprocessorEnvironmentctx,
TableNametableName,
@@ -583,7 +590,7 @@
 Called after the modifyTable operation has been 
requested.
 
 
-
+
 default void
 MasterObserver.postModifyTableHandler(ObserverContextMasterCoprocessorEnvironmentctx,
   TableNametableName,
@@ -595,7 +602,7 @@
 
 
 
-
+
 default void
 MasterObserver.postMove(ObserverContextMasterCoprocessorEnvironmentctx,
 HRegionInforegion,
@@ -604,7 +611,7 @@
 Called after the region move has been requested.
 
 
-
+
 default void
 MasterObserver.postMoveServers(ObserverContextMasterCoprocessorEnvironmentctx,
http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetAddressservers,
@@ -612,7 +619,7 @@
 Called after servers are moved to target region server 
group
 
 
-
+
 default void
 MasterObserver.postMoveServersAndTables(ObserverContextMasterCoprocessorEnvironmentctx,
 http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetAddressservers,
@@ -621,7 +628,7 @@
 

[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-04-21 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 9c843314d -> 10601a303


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/10601a30/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.MergeTableRegionsFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.MergeTableRegionsFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.MergeTableRegionsFuture.html
index 8c56a67..8e3d847 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.MergeTableRegionsFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.MergeTableRegionsFuture.html
@@ -2506,1742 +2506,1743 @@
 2498  public void restoreSnapshot(final 
String snapshotName)
 2499  throws IOException, 
RestoreSnapshotException {
 2500boolean takeFailSafeSnapshot =
-2501  
conf.getBoolean("hbase.snapshot.restore.take.failsafe.snapshot", false);
-2502restoreSnapshot(snapshotName, 
takeFailSafeSnapshot);
-2503  }
-2504
-2505  @Override
-2506  public void restoreSnapshot(final 
byte[] snapshotName, final boolean takeFailSafeSnapshot)
-2507  throws IOException, 
RestoreSnapshotException {
-2508
restoreSnapshot(Bytes.toString(snapshotName), takeFailSafeSnapshot);
-2509  }
-2510
-2511  /*
-2512   * Check whether the snapshot exists 
and contains disabled table
-2513   *
-2514   * @param snapshotName name of the 
snapshot to restore
-2515   * @throws IOException if a remote or 
network exception occurs
-2516   * @throws RestoreSnapshotException if 
no valid snapshot is found
-2517   */
-2518  private TableName 
getTableNameBeforeRestoreSnapshot(final String snapshotName)
-2519  throws IOException, 
RestoreSnapshotException {
-2520TableName tableName = null;
-2521for (SnapshotDescription 
snapshotInfo: listSnapshots()) {
-2522  if 
(snapshotInfo.getName().equals(snapshotName)) {
-2523tableName = 
snapshotInfo.getTableName();
-2524break;
-2525  }
-2526}
-2527
-2528if (tableName == null) {
-2529  throw new 
RestoreSnapshotException(
-2530"Unable to find the table name 
for snapshot=" + snapshotName);
-2531}
-2532return tableName;
-2533  }
-2534
-2535  @Override
-2536  public void restoreSnapshot(final 
String snapshotName, final boolean takeFailSafeSnapshot)
-2537  throws IOException, 
RestoreSnapshotException {
-2538TableName tableName = 
getTableNameBeforeRestoreSnapshot(snapshotName);
-2539
-2540// The table does not exists, switch 
to clone.
-2541if (!tableExists(tableName)) {
-2542  cloneSnapshot(snapshotName, 
tableName);
-2543  return;
-2544}
-2545
-2546// Check if the table is disabled
-2547if (!isTableDisabled(tableName)) {
-2548  throw new 
TableNotDisabledException(tableName);
-2549}
-2550
-2551// Take a snapshot of the current 
state
-2552String failSafeSnapshotSnapshotName 
= null;
-2553if (takeFailSafeSnapshot) {
-2554  failSafeSnapshotSnapshotName = 
conf.get("hbase.snapshot.restore.failsafe.name",
-2555
"hbase-failsafe-{snapshot.name}-{restore.timestamp}");
-2556  failSafeSnapshotSnapshotName = 
failSafeSnapshotSnapshotName
-2557.replace("{snapshot.name}", 
snapshotName)
-2558.replace("{table.name}", 
tableName.toString().replace(TableName.NAMESPACE_DELIM, '.'))
-2559.replace("{restore.timestamp}", 
String.valueOf(EnvironmentEdgeManager.currentTime()));
-2560  LOG.info("Taking restore-failsafe 
snapshot: " + failSafeSnapshotSnapshotName);
-2561  
snapshot(failSafeSnapshotSnapshotName, tableName);
-2562}
-2563
-2564try {
-2565  // Restore snapshot
-2566  get(
-2567
internalRestoreSnapshotAsync(snapshotName, tableName),
-2568syncWaitTimeout,
-2569TimeUnit.MILLISECONDS);
-2570} catch (IOException e) {
-2571  // Somthing went wrong during the 
restore...
-2572  // if the pre-restore snapshot is 
available try to rollback
-2573  if (takeFailSafeSnapshot) {
-2574try {
-2575  get(
-2576
internalRestoreSnapshotAsync(failSafeSnapshotSnapshotName, tableName),
-2577syncWaitTimeout,
-2578TimeUnit.MILLISECONDS);
-2579  String msg = "Restore 
snapshot=" + snapshotName +
-2580" failed. Rollback to 
snapshot=" + failSafeSnapshotSnapshotName + " succeeded.";
-2581  LOG.error(msg, e);
-2582  throw new 
RestoreSnapshotException(msg, e);
-2583} catch (IOException ex) {
-2584  String msg = "Failed to 
restore and rollback to snapshot=" + failSafeSnapshotSnapshotName;
-2585  LOG.error(msg, ex);
-2586  throw new 
RestoreSnapshotException(msg, e);
-2587}
-2588  } else {
-2589throw new 
RestoreSnapshotException("Failed to restore snapshot=" + snapshotName, e);
-2590  }
-2591}
-2592
-2593

[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-04-20 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 932a1c360 -> 662ea7dcb


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/662ea7dc/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html
index b798d4b..8c56a67 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html
@@ -3877,425 +3877,371 @@
 3869  throw new 
ReplicationException("tableCfs is null");
 3870}
 3871ReplicationPeerConfig peerConfig = 
getReplicationPeerConfig(id);
-3872MapTableName, 
ListString preTableCfs = peerConfig.getTableCFsMap();
-3873if (preTableCfs == null) {
-3874  
peerConfig.setTableCFsMap(tableCfs);
-3875} else {
-3876  for (Map.EntryTableName, ? 
extends CollectionString entry : tableCfs.entrySet()) {
-3877TableName table = 
entry.getKey();
-3878CollectionString 
appendCfs = entry.getValue();
-3879if 
(preTableCfs.containsKey(table)) {
-3880  ListString cfs = 
preTableCfs.get(table);
-3881  if (cfs == null || appendCfs 
== null || appendCfs.isEmpty()) {
-3882preTableCfs.put(table, 
null);
-3883  } else {
-3884SetString cfSet = 
new HashSetString(cfs);
-3885cfSet.addAll(appendCfs);
-3886preTableCfs.put(table, 
Lists.newArrayList(cfSet));
-3887  }
-3888} else {
-3889  if (appendCfs == null || 
appendCfs.isEmpty()) {
-3890preTableCfs.put(table, 
null);
-3891  } else {
-3892preTableCfs.put(table, 
Lists.newArrayList(appendCfs));
-3893  }
-3894}
-3895  }
-3896}
-3897updateReplicationPeerConfig(id, 
peerConfig);
-3898  }
-3899
-3900  @Override
-3901  public void 
removeReplicationPeerTableCFs(String id,
-3902  MapTableName, ? extends 
CollectionString tableCfs) throws ReplicationException,
-3903  IOException {
-3904if (tableCfs == null) {
-3905  throw new 
ReplicationException("tableCfs is null");
-3906}
-3907ReplicationPeerConfig peerConfig = 
getReplicationPeerConfig(id);
-3908MapTableName, 
ListString preTableCfs = peerConfig.getTableCFsMap();
-3909if (preTableCfs == null) {
-3910  throw new 
ReplicationException("Table-Cfs for peer" + id + " is null");
-3911}
-3912for (Map.EntryTableName, ? 
extends CollectionString entry : tableCfs.entrySet()) {
-3913
-3914  TableName table = 
entry.getKey();
-3915  CollectionString removeCfs 
= entry.getValue();
-3916  if 
(preTableCfs.containsKey(table)) {
-3917ListString cfs = 
preTableCfs.get(table);
-3918if (cfs == null  
(removeCfs == null || removeCfs.isEmpty())) {
-3919  preTableCfs.remove(table);
-3920} else if (cfs != null 
 (removeCfs != null  !removeCfs.isEmpty())) {
-3921  SetString cfSet = new 
HashSetString(cfs);
-3922  cfSet.removeAll(removeCfs);
-3923  if (cfSet.isEmpty()) {
-3924preTableCfs.remove(table);
-3925  } else {
-3926preTableCfs.put(table, 
Lists.newArrayList(cfSet));
-3927  }
-3928} else if (cfs == null 
 (removeCfs != null  !removeCfs.isEmpty())) {
-3929  throw new 
ReplicationException("Cannot remove cf of table: " + table
-3930  + " which doesn't specify 
cfs from table-cfs config in peer: " + id);
-3931} else if (cfs != null 
 (removeCfs == null || removeCfs.isEmpty())) {
-3932  throw new 
ReplicationException("Cannot remove table: " + table
-3933  + " which has specified 
cfs from table-cfs config in peer: " + id);
-3934}
-3935  } else {
-3936throw new 
ReplicationException("No table: " + table + " in table-cfs config of peer: " + 
id);
-3937  }
-3938}
-3939updateReplicationPeerConfig(id, 
peerConfig);
-3940  }
-3941
-3942  @Override
-3943  public 
ListReplicationPeerDescription listReplicationPeers() throws 
IOException {
-3944return 
listReplicationPeers((Pattern)null);
-3945  }
-3946
-3947  @Override
-3948  public 
ListReplicationPeerDescription listReplicationPeers(String regex) 
throws IOException {
-3949return 
listReplicationPeers(Pattern.compile(regex));
-3950  }
-3951
-3952  @Override
-3953  public 
ListReplicationPeerDescription listReplicationPeers(Pattern pattern)
-3954  throws IOException {
-3955return executeCallable(new 
MasterCallableListReplicationPeerDescription(getConnection(),
-3956getRpcControllerFactory()) {
-3957  @Override
-3958  

[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-04-19 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 105b8f4fc -> 6b4bae59f


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b4bae59/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/CacheConfig.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/CacheConfig.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/CacheConfig.html
index ad83c55..bff0679 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/CacheConfig.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/CacheConfig.html
@@ -291,435 +291,436 @@
 283  }
 284
 285  /**
-286   * Create a block cache configuration 
with the specified cache and
-287   * configuration parameters.
-288   * @param blockCache reference to block 
cache, null if completely disabled
-289   * @param cacheDataOnRead whether DATA 
blocks should be cached on read (we always cache INDEX
-290   * blocks and BLOOM blocks; this cannot 
be disabled).
-291   * @param inMemory whether blocks 
should be flagged as in-memory
-292   * @param cacheDataOnWrite whether data 
blocks should be cached on write
-293   * @param cacheIndexesOnWrite whether 
index blocks should be cached on write
-294   * @param cacheBloomsOnWrite whether 
blooms should be cached on write
-295   * @param evictOnClose whether blocks 
should be evicted when HFile is closed
-296   * @param cacheDataCompressed whether 
to store blocks as compressed in the cache
-297   * @param prefetchOnOpen whether to 
prefetch blocks upon open
-298   * @param cacheDataInL1 If more than 
one cache tier deployed, if true, cache this column families
-299   * data blocks up in the L1 tier.
-300   */
-301  CacheConfig(final BlockCache 
blockCache,
-302  final boolean cacheDataOnRead, 
final boolean inMemory,
-303  final boolean cacheDataOnWrite, 
final boolean cacheIndexesOnWrite,
-304  final boolean cacheBloomsOnWrite, 
final boolean evictOnClose,
-305  final boolean cacheDataCompressed, 
final boolean prefetchOnOpen,
-306  final boolean cacheDataInL1, final 
boolean dropBehindCompaction) {
-307this.blockCache = blockCache;
-308this.cacheDataOnRead = 
cacheDataOnRead;
-309this.inMemory = inMemory;
-310this.cacheDataOnWrite = 
cacheDataOnWrite;
-311this.cacheIndexesOnWrite = 
cacheIndexesOnWrite;
-312this.cacheBloomsOnWrite = 
cacheBloomsOnWrite;
-313this.evictOnClose = evictOnClose;
-314this.cacheDataCompressed = 
cacheDataCompressed;
-315this.prefetchOnOpen = 
prefetchOnOpen;
-316this.cacheDataInL1 = cacheDataInL1;
-317this.dropBehindCompaction = 
dropBehindCompaction;
-318  }
-319
-320  /**
-321   * Constructs a cache configuration 
copied from the specified configuration.
-322   * @param cacheConf
-323   */
-324  public CacheConfig(CacheConfig 
cacheConf) {
-325this(cacheConf.blockCache, 
cacheConf.cacheDataOnRead, cacheConf.inMemory,
-326cacheConf.cacheDataOnWrite, 
cacheConf.cacheIndexesOnWrite,
-327cacheConf.cacheBloomsOnWrite, 
cacheConf.evictOnClose,
-328cacheConf.cacheDataCompressed, 
cacheConf.prefetchOnOpen,
-329cacheConf.cacheDataInL1, 
cacheConf.dropBehindCompaction);
-330  }
-331
-332  private CacheConfig() {
-333this(null, false, false, false, 
false, false,
-334   false, false, false, 
false, false);
-335  }
-336
-337  /**
-338   * Checks whether the block cache is 
enabled.
-339   */
-340  public boolean isBlockCacheEnabled() 
{
-341return this.blockCache != null;
-342  }
-343
-344  /**
-345   * Returns the block cache.
-346   * @return the block cache, or null if 
caching is completely disabled
-347   */
-348  public BlockCache getBlockCache() {
-349return this.blockCache;
-350  }
-351
-352  /**
-353   * Returns whether the DATA blocks of 
this HFile should be cached on read or not (we always
-354   * cache the meta blocks, the INDEX and 
BLOOM blocks).
-355   * @return true if blocks should be 
cached on read, false if not
-356   */
-357  public boolean shouldCacheDataOnRead() 
{
-358return isBlockCacheEnabled() 
 cacheDataOnRead;
-359  }
-360
-361  public boolean 
shouldDropBehindCompaction() {
-362return dropBehindCompaction;
-363  }
-364
-365  /**
-366   * Should we cache a block of a 
particular category? We always cache
-367   * important blocks such as index 
blocks, as long as the block cache is
-368   * available.
-369   */
-370  public boolean 
shouldCacheBlockOnRead(BlockCategory category) {
-371return isBlockCacheEnabled()
-372 (cacheDataOnRead ||
-373category == 
BlockCategory.INDEX ||
-374category == 
BlockCategory.BLOOM ||
-375(prefetchOnOpen 
-376(category != 
BlockCategory.META 
-377 category != 
BlockCategory.UNKNOWN)));
-378  }
-379
-380  /**
-381   * @return true if blocks in this file 
should be flagged as in-memory
-382   */
-383  public boolean 

[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-04-18 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 7ed80b326 -> 2fcc2ae0b


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2fcc2ae0/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.Sink.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.Sink.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.Sink.html
index 88dff07..af5536f 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.Sink.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.Sink.html
@@ -77,16 +77,16 @@
 069import 
org.apache.hadoop.hbase.TableName;
 070import 
org.apache.hadoop.hbase.TableNotEnabledException;
 071import 
org.apache.hadoop.hbase.TableNotFoundException;
-072import 
org.apache.hadoop.hbase.client.Admin;
-073import 
org.apache.hadoop.hbase.client.Connection;
-074import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-075import 
org.apache.hadoop.hbase.client.Get;
-076import 
org.apache.hadoop.hbase.client.Put;
-077import 
org.apache.hadoop.hbase.client.RegionLocator;
-078import 
org.apache.hadoop.hbase.client.ResultScanner;
-079import 
org.apache.hadoop.hbase.client.Scan;
-080import 
org.apache.hadoop.hbase.client.Table;
-081import 
org.apache.hadoop.hbase.client.Scan.ReadType;
+072import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+073import 
org.apache.hadoop.hbase.client.Admin;
+074import 
org.apache.hadoop.hbase.client.Connection;
+075import 
org.apache.hadoop.hbase.client.ConnectionFactory;
+076import 
org.apache.hadoop.hbase.client.Get;
+077import 
org.apache.hadoop.hbase.client.Put;
+078import 
org.apache.hadoop.hbase.client.RegionLocator;
+079import 
org.apache.hadoop.hbase.client.ResultScanner;
+080import 
org.apache.hadoop.hbase.client.Scan;
+081import 
org.apache.hadoop.hbase.client.Table;
 082import 
org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
 083import 
org.apache.hadoop.hbase.tool.Canary.RegionTask.TaskType;
 084import 
org.apache.hadoop.hbase.util.Bytes;
@@ -118,1387 +118,1345 @@
 110 * 3. zookeeper mode - for each zookeeper 
instance, selects a zNode and
 111 * outputs some information about failure 
or latency.
 112 */
-113public final class Canary implements Tool 
{
-114  // Sink interface used by the canary to 
outputs information
-115  public interface Sink {
-116public long getReadFailureCount();
-117public long incReadFailureCount();
-118public void 
publishReadFailure(HRegionInfo region, Exception e);
-119public void 
publishReadFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
-120public void 
updateReadFailedHostList(HRegionInfo region, String serverName);
-121public MapString,String 
getReadFailures();
-122public void 
publishReadTiming(HRegionInfo region, HColumnDescriptor column, long msTime);
-123public long getWriteFailureCount();
-124public void 
publishWriteFailure(HRegionInfo region, Exception e);
-125public void 
publishWriteFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
-126public void 
publishWriteTiming(HRegionInfo region, HColumnDescriptor column, long 
msTime);
-127public void 
updateWriteFailedHostList(HRegionInfo region, String serverName);
-128public MapString,String 
getWriteFailures();
-129  }
-130  // new extended sink for output 
regionserver mode info
-131  // do not change the Sink interface 
directly due to maintaining the API
-132  public interface ExtendedSink extends 
Sink {
-133public void publishReadFailure(String 
table, String server);
-134public void publishReadTiming(String 
table, String server, long msTime);
-135  }
-136
-137  // Simple implementation of canary sink 
that allows to plot on
-138  // file or standard output timings or 
failures.
-139  public static class StdOutSink 
implements Sink {
-140private AtomicLong readFailureCount = 
new AtomicLong(0),
-141writeFailureCount = new 
AtomicLong(0);
-142
-143private MapString, String 
readFailures = new ConcurrentHashMap();
-144private MapString, String 
writeFailures = new ConcurrentHashMap();
-145
-146@Override
-147public long getReadFailureCount() {
-148  return readFailureCount.get();
-149}
-150
-151@Override
-152public long incReadFailureCount() {
-153  return 
readFailureCount.incrementAndGet();
-154}
-155
-156@Override
-157public void 
publishReadFailure(HRegionInfo region, Exception e) {
-158  
readFailureCount.incrementAndGet();
-159  LOG.error(String.format("read from 
region %s failed", region.getRegionNameAsString()), e);
-160}
-161
-162@Override
-163public void 
publishReadFailure(HRegionInfo region, HColumnDescriptor column, Exception e) 
{
-164  
readFailureCount.incrementAndGet();
-165  LOG.error(String.format("read from 
region %s column family %s failed",
-166
region.getRegionNameAsString(), 

[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-04-17 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 8782022ae -> e4348f534


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e4348f53/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
index 31517f6..ac4a9b3 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
@@ -64,1374 +64,1421 @@
 056import 
org.apache.hadoop.hbase.client.Scan.ReadType;
 057import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
 058import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-059import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-060import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-061import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-062import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-063import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-064import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-065import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-066import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-067import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-068import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-069import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-070import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-071import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-072import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-073import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-074import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-075import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-076import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-077import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-078import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-079import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-080import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-081import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-082import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesResponse;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse;
-100import 

[01/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-04-11 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 68bb540a7 -> e57d1b632


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e57d1b63/apidocs/org/apache/hadoop/hbase/client/Query.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/client/Query.html 
b/apidocs/org/apache/hadoop/hbase/client/Query.html
deleted file mode 100644
index d6ad319..000
--- a/apidocs/org/apache/hadoop/hbase/client/Query.html
+++ /dev/null
@@ -1,860 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-
-
-
-Query (Apache HBase 2.0.0-SNAPSHOT API)
-
-
-
-
-
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10};
-var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
-var altColor = "altColor";
-var rowColor = "rowColor";
-var tableTab = "tableTab";
-var activeTableTab = "activeTableTab";
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-PrevClass
-NextClass
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-Summary:
-Nested|
-Field|
-Constr|
-Method
-
-
-Detail:
-Field|
-Constr|
-Method
-
-
-
-
-
-
-
-
-org.apache.hadoop.hbase.client
-Class Query
-
-
-
-http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
-
-
-org.apache.hadoop.hbase.client.Operation
-
-
-org.apache.hadoop.hbase.client.OperationWithAttributes
-
-
-org.apache.hadoop.hbase.client.Query
-
-
-
-
-
-
-
-
-
-
-
-All Implemented Interfaces:
-Attributes
-
-
-Direct Known Subclasses:
-Get, Scan
-
-
-
-@InterfaceAudience.Public
-public abstract class Query
-extends OperationWithAttributes
-
-
-
-
-
-
-
-
-
-
-
-Field Summary
-
-Fields
-
-Modifier and Type
-Field and Description
-
-
-protected http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],TimeRange
-colFamTimeRangeMap
-
-
-protected Consistency
-consistency
-
-
-protected Filter
-filter
-
-
-protected http://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean
-loadColumnFamiliesOnDemand
-
-
-protected int
-targetReplicaId
-
-
-protected TimeRange
-tr
-
-
-
-
-
-
-Fields inherited from classorg.apache.hadoop.hbase.client.OperationWithAttributes
-ID_ATRIBUTE
-
-
-
-
-
-
-
-
-Constructor Summary
-
-Constructors
-
-Constructor and Description
-
-
-Query()
-
-
-
-
-
-
-
-
-
-Method Summary
-
-All MethodsInstance MethodsConcrete Methods
-
-Modifier and Type
-Method and Description
-
-
-boolean
-doLoadColumnFamiliesOnDemand()
-Get the logical value indicating whether on-demand CF 
loading should be allowed.
-
-
-
-byte[]
-getACL()
-
-
-org.apache.hadoop.hbase.security.visibility.Authorizations
-getAuthorizations()
-
-
-http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],TimeRange
-getColumnFamilyTimeRange()
-
-
-Consistency
-getConsistency()
-Returns the consistency level for this operation
-
-
-
-Filter
-getFilter()
-
-
-IsolationLevel
-getIsolationLevel()
-
-
-http://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean
-getLoadColumnFamiliesOnDemandValue()
-Get the raw loadColumnFamiliesOnDemand setting; if it's not 
set, can be null.
-
-
-
-int
-getReplicaId()
-Returns region replica id where Query will fetch data 
from.
-
-
-
-TimeRange
-getTimeRange()
-
-
-Query
-setACL(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">String,org.apache.hadoop.hbase.security.access.Permissionperms)
-
-
-Query
-setACL(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringuser,
-  
org.apache.hadoop.hbase.security.access.Permissionperms)
-
-
-Query
-setAuthorizations(org.apache.hadoop.hbase.security.visibility.Authorizationsauthorizations)
-Sets the authorizations to be used by this Query
-
-
-
-Query
-setColumnFamilyTimeRange(byte[]cf,
-longminStamp,
-