[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
index a701ece..26ee60a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
@@ -896,7376 +896,7383 @@
 888}
 889
 890// Write HRI to a file in case we 
need to recover hbase:meta
-891status.setStatus("Writing region info 
on filesystem");
-892fs.checkRegionInfoOnFilesystem();
-893
-894// Initialize all the HStores
-895status.setStatus("Initializing all 
the Stores");
-896long maxSeqId = 
initializeStores(reporter, status);
-897this.mvcc.advanceTo(maxSeqId);
-898if 
(ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
-899  ListStore stores = 
this.getStores();  // update the stores that we are replaying
-900  try {
-901for (Store store : stores) {
-902  ((HStore) 
store).startReplayingFromWAL();
-903}
-904// Recover any edits if 
available.
-905maxSeqId = Math.max(maxSeqId,
-906
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
-907// Make sure mvcc is up to max.
-908this.mvcc.advanceTo(maxSeqId);
-909  } finally {
-910for (Store store : stores) {  
  // update the stores that we are done replaying
-911  
((HStore)store).stopReplayingFromWAL();
-912}
-913  }
-914
-915}
-916this.lastReplayedOpenRegionSeqId = 
maxSeqId;
-917
-918
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
-919this.writestate.flushRequested = 
false;
-920this.writestate.compacting.set(0);
+891// Only the primary replica should 
write .regioninfo
+892if 
(this.getRegionInfo().getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) {
+893  status.setStatus("Writing region 
info on filesystem");
+894  fs.checkRegionInfoOnFilesystem();
+895} else {
+896  if (LOG.isDebugEnabled()) {
+897LOG.debug("Skipping creation of 
.regioninfo file for " + this.getRegionInfo());
+898  }
+899}
+900
+901// Initialize all the HStores
+902status.setStatus("Initializing all 
the Stores");
+903long maxSeqId = 
initializeStores(reporter, status);
+904this.mvcc.advanceTo(maxSeqId);
+905if 
(ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
+906  ListStore stores = 
this.getStores();  // update the stores that we are replaying
+907  try {
+908for (Store store : stores) {
+909  ((HStore) 
store).startReplayingFromWAL();
+910}
+911// Recover any edits if 
available.
+912maxSeqId = Math.max(maxSeqId,
+913
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
+914// Make sure mvcc is up to max.
+915this.mvcc.advanceTo(maxSeqId);
+916  } finally {
+917for (Store store : stores) {  
  // update the stores that we are done replaying
+918  
((HStore)store).stopReplayingFromWAL();
+919}
+920  }
 921
-922if (this.writestate.writesEnabled) 
{
-923  // Remove temporary data left over 
from old regions
-924  status.setStatus("Cleaning up 
temporary data from old regions");
-925  fs.cleanupTempDir();
-926}
-927
-928if (this.writestate.writesEnabled) 
{
-929  status.setStatus("Cleaning up 
detritus from prior splits");
-930  // Get rid of any splits or merges 
that were lost in-progress.  Clean out
-931  // these directories here on open.  
We may be opening a region that was
-932  // being split but we crashed in 
the middle of it all.
-933  fs.cleanupAnySplitDetritus();
-934  fs.cleanupMergesDir();
-935}
-936
-937// Initialize split policy
-938this.splitPolicy = 
RegionSplitPolicy.create(this, conf);
-939
-940// Initialize flush policy
-941this.flushPolicy = 
FlushPolicyFactory.create(this, conf);
-942
-943long lastFlushTime = 
EnvironmentEdgeManager.currentTime();
-944for (Store store: stores.values()) 
{
-945  
this.lastStoreFlushTimeMap.put(store, lastFlushTime);
-946}
-947
-948// Use maximum of log sequenceid or 
that which was found in stores
-949// (particularly if no recovered 
edits, seqid will be -1).
-950long nextSeqid = maxSeqId;
-951
-952// In distributedLogReplay mode, we 
don't know the last change sequence number because region
-953// is opened before recovery 
completes. So we add a safety bumper to avoid new sequence number
-954// overlaps used sequence numbers

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-08-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2aec596e/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
index 5bcc019..23fe2f3 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
@@ -279,14 +279,6 @@ service.
 
 
 private Scan
-AsyncScanSingleRegionRpcRetryingCaller.scan
-
-
-protected Scan
-ScannerCallable.scan
-
-
-private Scan
 ScannerCallableWithReplicas.scan
 
 
@@ -303,6 +295,14 @@ service.
 
 
 private Scan
+AsyncScanSingleRegionRpcRetryingCaller.scan
+
+
+protected Scan
+ScannerCallable.scan
+
+
+private Scan
 TableSnapshotScanner.scan
 
 
@@ -335,11 +335,11 @@ service.
 
 
 protected Scan
-ScannerCallable.getScan()
+ClientScanner.getScan()
 
 
 protected Scan
-ClientScanner.getScan()
+ScannerCallable.getScan()
 
 
 (package private) Scan
@@ -627,8 +627,8 @@ service.
 
 
 ResultScanner
-HTable.getScanner(Scanscan)
-The underlying HTable must 
not be closed.
+AsyncTable.getScanner(Scanscan)
+Returns a scanner on the current table as specified by the 
Scan 
object.
 
 
 
@@ -644,8 +644,8 @@ service.
 
 
 ResultScanner
-AsyncTable.getScanner(Scanscan)
-Returns a scanner on the current table as specified by the 
Scan 
object.
+HTable.getScanner(Scanscan)
+The underlying HTable must 
not be closed.
 
 
 
@@ -682,16 +682,16 @@ service.
 
 
 void
-AsyncTableImpl.scan(Scanscan,
-ScanResultConsumerconsumer)
-
-
-void
 AsyncTable.scan(Scanscan,
 ScanResultConsumerconsumer)
 The scan API uses the observer pattern.
 
 
+
+void
+AsyncTableImpl.scan(Scanscan,
+ScanResultConsumerconsumer)
+
 
 private void
 AsyncTableImpl.scan0(Scanscan,
@@ -699,11 +699,11 @@ service.
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListResult
-RawAsyncTableImpl.scanAll(Scanscan)
+AsyncTableImpl.scanAll(Scanscan)
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListResult
-AsyncTableImpl.scanAll(Scanscan)
+RawAsyncTableImpl.scanAll(Scanscan)
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListResult
@@ -1291,17 +1291,17 @@ service.
 
 
 private Scan
-TableSnapshotInputFormatImpl.RecordReader.scan
+TableInputFormatBase.scan
+Holds the details for the internal scanner.
+
 
 
 private Scan
-TableRecordReaderImpl.scan
+TableSnapshotInputFormatImpl.RecordReader.scan
 
 
 private Scan
-TableInputFormatBase.scan
-Holds the details for the internal scanner.
-
+TableRecordReaderImpl.scan
 
 
 
@@ -1356,14 +1356,14 @@ service.
 
 
 Scan
-TableSplit.getScan()
-Returns a Scan object from the stored string 
representation.
+TableInputFormatBase.getScan()
+Gets the scan defining the actual details like columns 
etc.
 
 
 
 Scan
-TableInputFormatBase.getScan()
-Gets the scan defining the actual details like columns 
etc.
+TableSplit.getScan()
+Returns a Scan object from the stored string 
representation.
 
 
 
@@ -1574,13 +1574,13 @@ service.
 
 
 void
-TableRecordReaderImpl.setScan(Scanscan)
+TableInputFormatBase.setScan(Scanscan)
 Sets the scan defining the actual details like columns 
etc.
 
 
 
 void
-TableInputFormatBase.setScan(Scanscan)
+TableRecordReaderImpl.setScan(Scanscan)
 Sets the scan defining the actual details like columns 
etc.
 
 
@@ -1647,6 +1647,12 @@ service.
 
 
 
+static void
+MultiTableSnapshotInputFormat.setInput(org.apache.hadoop.conf.Configurationconfiguration,
+http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionScansnapshotScans,
+org.apache.hadoop.fs.PathtmpRestoreDir)
+
+
 void
 MultiTableSnapshotInputFormatImpl.setInput(org.apache.hadoop.conf.Configurationconf,
 http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in 

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-31 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1837997e/testdevapidocs/org/apache/hadoop/hbase/master/MockRegionServer.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/master/MockRegionServer.html 
b/testdevapidocs/org/apache/hadoop/hbase/master/MockRegionServer.html
index 65a8a90..5c7b270 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/master/MockRegionServer.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/master/MockRegionServer.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-class MockRegionServer
+class MockRegionServer
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService.BlockingInterface,
 org.apache.hadoop.hbase.regionserver.RegionServerServices
 A mock RegionServer implementation.
@@ -580,39 +580,34 @@ implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminSe
 
 
 
-org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse
-splitRegion(org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcControllercontroller,
-   
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequestrequest)
-
-
 void
 stop(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringwhy)
 
-
+
 org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse
 stopServer(org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcControllercontroller,
   
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequestrequest)
 
-
+
 void
 unassign(byte[]regionName)
 
-
+
 org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse
 updateConfiguration(org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcControllercontroller,

org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequestrequest)
 
-
+
 org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse
 updateFavoredNodes(org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcControllercontroller,
   
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequestrequest)
 
-
+
 void
 updateRegionFavoredNodesMapping(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringencodedRegionName,
http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNamefavoredNodes)
 
-
+
 org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionResponse
 warmupRegion(org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcControllercontroller,
 

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
index c9a18a3..c80f6d8 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
@@ -2492,2617 +2492,2627 @@
 2484  return;
 2485}
 2486  }
-2487  
errors.reportError(ERROR_CODE.LINGERING_SPLIT_PARENT, "Region "
-2488  + descriptiveName + " is a 
split parent in META, in HDFS, "
-2489  + "and not deployed on any 
region server. This could be transient, "
-2490  + "consider to run the catalog 
janitor first!");
-2491  if (shouldFixSplitParents()) {
-2492setShouldRerun();
-2493resetSplitParent(hbi);
-2494  }
-2495} else if (inMeta  !inHdfs 
 !isDeployed) {
-2496  
errors.reportError(ERROR_CODE.NOT_IN_HDFS_OR_DEPLOYED, "Region "
-2497  + descriptiveName + " found in 
META, but not in HDFS "
-2498  + "or deployed on any region 
server.");
-2499  if (shouldFixMeta()) {
-2500deleteMetaRegion(hbi);
-2501  }
-2502} else if (inMeta  !inHdfs 
 isDeployed) {
-2503  
errors.reportError(ERROR_CODE.NOT_IN_HDFS, "Region " + descriptiveName
-2504  + " found in META, but not in 
HDFS, " +
-2505  "and deployed on " + 
Joiner.on(", ").join(hbi.deployedOn));
-2506  // We treat HDFS as ground truth.  
Any information in meta is transient
-2507  // and equivalent data can be 
regenerated.  So, lets unassign and remove
-2508  // these problems from META.
-2509  if (shouldFixAssignments()) {
-2510errors.print("Trying to fix 
unassigned region...");
-2511undeployRegions(hbi);
-2512  }
-2513  if (shouldFixMeta()) {
-2514// wait for it to complete
-2515deleteMetaRegion(hbi);
-2516  }
-2517} else if (inMeta  inHdfs 
 !isDeployed  shouldBeDeployed) {
-2518  
errors.reportError(ERROR_CODE.NOT_DEPLOYED, "Region " + descriptiveName
-2519  + " not deployed on any region 
server.");
-2520  tryAssignmentRepair(hbi, "Trying 
to fix unassigned region...");
-2521} else if (inMeta  inHdfs 
 isDeployed  !shouldBeDeployed) {
-2522  
errors.reportError(ERROR_CODE.SHOULD_NOT_BE_DEPLOYED,
-2523  "Region " + descriptiveName + 
" should not be deployed according " +
-2524  "to META, but is deployed on " 
+ Joiner.on(", ").join(hbi.deployedOn));
-2525  if (shouldFixAssignments()) {
-2526errors.print("Trying to close 
the region " + descriptiveName);
-2527setShouldRerun();
-2528
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2529  }
-2530} else if (inMeta  inHdfs 
 isMultiplyDeployed) {
-2531  
errors.reportError(ERROR_CODE.MULTI_DEPLOYED, "Region " + descriptiveName
-2532  + " is listed in hbase:meta on 
region server " + hbi.metaEntry.regionServer
-2533  + " but is multiply assigned 
to region servers " +
-2534  Joiner.on(", 
").join(hbi.deployedOn));
-2535  // If we are trying to fix the 
errors
-2536  if (shouldFixAssignments()) {
-2537errors.print("Trying to fix 
assignment error...");
-2538setShouldRerun();
-2539
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2540  }
-2541} else if (inMeta  inHdfs 
 isDeployed  !deploymentMatchesMeta) {
-2542  
errors.reportError(ERROR_CODE.SERVER_DOES_NOT_MATCH_META, "Region "
-2543  + descriptiveName + " listed 
in hbase:meta on region server " +
-2544  hbi.metaEntry.regionServer + " 
but found on region server " +
-2545  hbi.deployedOn.get(0));
-2546  // If we are trying to fix the 
errors
-2547  if (shouldFixAssignments()) {
-2548errors.print("Trying to fix 
assignment error...");
-2549setShouldRerun();
-2550
HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, 
hbi.deployedOn);
-2551
HBaseFsckRepair.waitUntilAssigned(admin, hbi.getHdfsHRI());
-2552  }
-2553} else {
-2554  
errors.reportError(ERROR_CODE.UNKNOWN, "Region " + descriptiveName +
-2555  " is in an unforeseen state:" 
+
-2556  " inMeta=" + inMeta +
-2557  " inHdfs=" + inHdfs +
-2558  " isDeployed=" + isDeployed 
+
-2559  " isMultiplyDeployed=" + 
isMultiplyDeployed +
-2560  " deploymentMatchesMeta=" + 
deploymentMatchesMeta +
-2561  " shouldBeDeployed=" + 
shouldBeDeployed);
-2562}
-2563  }
-2564
-2565  /**
-2566   * Checks tables integrity. Goes over 
all regions and scans the tables.
-2567   * Collects all the pieces for 

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21766f4a/devapidocs/src-html/org/apache/hadoop/hbase/security/visibility/VisibilityNewVersionBehaivorTracker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/security/visibility/VisibilityNewVersionBehaivorTracker.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/security/visibility/VisibilityNewVersionBehaivorTracker.html
new file mode 100644
index 000..7f46600
--- /dev/null
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/security/visibility/VisibilityNewVersionBehaivorTracker.html
@@ -0,0 +1,274 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+Source code
+
+
+
+
+001/**
+002 *
+003 * Licensed to the Apache Software 
Foundation (ASF) under one
+004 * or more contributor license 
agreements.  See the NOTICE file
+005 * distributed with this work for 
additional information
+006 * regarding copyright ownership.  The 
ASF licenses this file
+007 * to you under the Apache License, 
Version 2.0 (the
+008 * "License"); you may not use this file 
except in compliance
+009 * with the License.  You may obtain a 
copy of the License at
+010 *
+011 * 
http://www.apache.org/licenses/LICENSE-2.0
+012 *
+013 * Unless required by applicable law or 
agreed to in writing, software
+014 * distributed under the License is 
distributed on an "AS IS" BASIS,
+015 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+016 * See the License for the specific 
language governing permissions and
+017 * limitations under the License.
+018 */
+019package 
org.apache.hadoop.hbase.security.visibility;
+020
+021import java.io.IOException;
+022import java.util.ArrayList;
+023import java.util.HashMap;
+024import java.util.List;
+025import java.util.Map;
+026import java.util.NavigableMap;
+027import java.util.NavigableSet;
+028import java.util.SortedMap;
+029import java.util.SortedSet;
+030import java.util.TreeMap;
+031import java.util.TreeSet;
+032
+033import org.apache.commons.logging.Log;
+034import 
org.apache.commons.logging.LogFactory;
+035import org.apache.hadoop.hbase.Cell;
+036import 
org.apache.hadoop.hbase.KeyValue;
+037import org.apache.hadoop.hbase.Tag;
+038import 
org.apache.hadoop.hbase.regionserver.querymatcher.NewVersionBehaviorTracker;
+039
+040/**
+041 * Similar to MvccSensitiveTracker but 
tracks the visibility expression also before
+042 * deciding if a Cell can be considered 
deleted
+043 */
+044public class 
VisibilityNewVersionBehaivorTracker extends NewVersionBehaviorTracker {
+045
+046  private static final Log LOG = 
LogFactory.getLog(VisibilityNewVersionBehaivorTracker.class);
+047
+048  public 
VisibilityNewVersionBehaivorTracker(NavigableSetbyte[] columns, int 
minVersion,
+049  int maxVersion,
+050  int resultMaxVersions, long 
oldestUnexpiredTS) {
+051super(columns, minVersion, 
maxVersion, resultMaxVersions, oldestUnexpiredTS);
+052  }
+053
+054  private static class TagInfo {
+055ListTag tags;
+056Byte format;
+057
+058private TagInfo(Cell c) {
+059  tags = new ArrayList();
+060  format = 
VisibilityUtils.extractVisibilityTags(c, tags);
+061}
+062
+063private TagInfo() {
+064  tags = new ArrayList();
+065}
+066  }
+067
+068  private class 
VisibilityDeleteVersionsNode extends DeleteVersionsNode {
+069private TagInfo tagInfo;
+070
+071// timestamp, 
setmvcc
+072// Key is ts of version deletes, 
value is its mvccs.
+073// We may delete more than one time 
for a version.
+074private MapLong, 
SortedMapLong, TagInfo deletesMap = new HashMap();
+075
+076// mvcc, setmvcc
+077// Key is mvcc of version deletes, 
value is mvcc of visible puts before the delete effect.
+078private NavigableMapLong, 
SortedSetLong mvccCountingMap = new TreeMap();
+079
+080protected 
VisibilityDeleteVersionsNode(long ts, long mvcc, TagInfo tagInfo) {
+081  this.tagInfo = tagInfo;
+082  this.ts = ts;
+083  this.mvcc = mvcc;
+084  mvccCountingMap.put(Long.MAX_VALUE, 
new TreeSetLong());
+085}
+086
+087protected 
VisibilityDeleteVersionsNode getDeepCopy() {
+088  VisibilityDeleteVersionsNode node = 
new VisibilityDeleteVersionsNode(ts, mvcc, tagInfo);
+089  for (Map.EntryLong, 
SortedMapLong, TagInfo e : deletesMap.entrySet()) {
+090node.deletesMap.put(e.getKey(), 
new TreeMap(e.getValue()));
+091  }
+092  for (Map.EntryLong, 
SortedSetLong e : mvccCountingMap.entrySet()) {
+093
node.mvccCountingMap.put(e.getKey(), new TreeSet(e.getValue()));
+094  }
+095  return node;
+096}
+097
+098public void addVersionDelete(Cell 
cell) {
+099  SortedMapLong, TagInfo set 
= deletesMap.get(cell.getTimestamp());
+100  if (set == null) {
+101set = new TreeMap();
+102
deletesMap.put(cell.getTimestamp(), set);
+103  }
+104  set.put(cell.getSequenceId(), new 
TagInfo(cell));
+105  // The init 

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d5075d7/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.EntryIterator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.EntryIterator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.EntryIterator.html
index 0b1b520..44308b5 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.EntryIterator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.EntryIterator.html
@@ -33,854 +33,840 @@
 025import org.apache.commons.logging.Log;
 026import 
org.apache.commons.logging.LogFactory;
 027import 
org.apache.hadoop.fs.FSDataInputStream;
-028import 
org.apache.hadoop.hbase.ProcedureInfo;
-029import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-030import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-031import 
org.apache.hadoop.hbase.procedure2.Procedure;
-032import 
org.apache.hadoop.hbase.procedure2.ProcedureUtil;
-033import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator;
-034import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStoreTracker;
-035import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
-036import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureWALEntry;
-037
-038/**
-039 * Helper class that loads the procedures 
stored in a WAL
-040 */
-041@InterfaceAudience.Private
-042@InterfaceStability.Evolving
-043public class ProcedureWALFormatReader {
-044  private static final Log LOG = 
LogFactory.getLog(ProcedureWALFormatReader.class);
-045
-046  // 
==
-047  //  We read the WALs in reverse order 
from the newest to the oldest.
-048  //  We have different entry types:
-049  //   - INIT: Procedure submitted by the 
user (also known as 'root procedure')
-050  //   - INSERT: Children added to the 
procedure parentId:[childId, ...]
-051  //   - UPDATE: The specified procedure 
was updated
-052  //   - DELETE: The procedure was 
removed (finished/rolledback and result TTL expired)
-053  //
-054  // In the WAL we can find multiple 
times the same procedure as UPDATE or INSERT.
-055  // We read the WAL from top to bottom, 
so every time we find an entry of the
-056  // same procedure, that will be the 
"latest" update (Caveat: with multiple threads writing
-057  // the store, this assumption does not 
hold).
-058  //
-059  // We keep two in-memory maps:
-060  //  - localProcedureMap: is the map 
containing the entries in the WAL we are processing
-061  //  - procedureMap: is the map 
containing all the procedures we found up to the WAL in process.
-062  // localProcedureMap is merged with the 
procedureMap once we reach the WAL EOF.
-063  //
-064  // Since we are reading the WALs in 
reverse order (newest to oldest),
-065  // if we find an entry related to a 
procedure we already have in 'procedureMap' we can discard it.
-066  //
-067  // The WAL is append-only so the last 
procedure in the WAL is the one that
-068  // was in execution at the time we 
crashed/closed the server.
-069  // Given that, the procedure replay 
order can be inferred by the WAL order.
-070  //
-071  // Example:
-072  //WAL-2: [A, B, A, C, D]
-073  //WAL-1: [F, G, A, F, B]
-074  //Replay-Order: [D, C, A, B, F, 
G]
-075  //
-076  // The "localProcedureMap" keeps a 
"replayOrder" list. Every time we add the
-077  // record to the map that record is 
moved to the head of the "replayOrder" list.
-078  // Using the example above:
-079  //WAL-2 
localProcedureMap.replayOrder is [D, C, A, B]
-080  //WAL-1 
localProcedureMap.replayOrder is [F, G]
-081  //
-082  // Each time we reach the WAL-EOF, the 
"replayOrder" list is merged/appended in 'procedureMap'
-083  // so using the example above we end up 
with: [D, C, A, B] + [F, G] as replay order.
-084  //
-085  //  Fast Start: INIT/INSERT record and 
StackIDs
-086  // 
-
-087  // We have two special records, INIT 
and INSERT, that track the first time
-088  // the procedure was added to the WAL. 
We can use this information to be able
-089  // to start procedures before reaching 
the end of the WAL, or before reading all WALs.
-090  // But in some cases, the WAL with that 
record can be already gone.
-091  // As an alternative, we can use the 
stackIds on each procedure,
-092  // to identify when a procedure is 
ready to start.
-093  // If there are gaps in the sum of the 
stackIds we need to read more WALs.
-094  //
-095  // Example (all procs child of A):
-096  //   WAL-2: [A, B]   A 
stackIds = [0, 4], B stackIds = [1, 5]
-097  //   WAL-1: [A, B, C, 

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0383a9c2/testdevapidocs/org/apache/hadoop/hbase/IntegrationTestDDLMasterFailover.CreateTableAction.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/IntegrationTestDDLMasterFailover.CreateTableAction.html
 
b/testdevapidocs/org/apache/hadoop/hbase/IntegrationTestDDLMasterFailover.CreateTableAction.html
index c3dc6a0..9d3fdd0 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/IntegrationTestDDLMasterFailover.CreateTableAction.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/IntegrationTestDDLMasterFailover.CreateTableAction.html
@@ -123,7 +123,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private class IntegrationTestDDLMasterFailover.CreateTableAction
+private class IntegrationTestDDLMasterFailover.CreateTableAction
 extends IntegrationTestDDLMasterFailover.TableAction
 
 
@@ -178,7 +178,7 @@ extends Method and Description
 
 
-private 
org.apache.hadoop.hbase.HTableDescriptor
+private 
org.apache.hadoop.hbase.client.TableDescriptor
 createTableDesc()
 
 
@@ -220,7 +220,7 @@ extends 
 
 CreateTableAction
-privateCreateTableAction()
+privateCreateTableAction()
 
 
 
@@ -237,7 +237,7 @@ extends 
 
 perform
-voidperform()
+voidperform()
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Specified by:
@@ -253,7 +253,7 @@ extends 
 
 createTableDesc
-privateorg.apache.hadoop.hbase.HTableDescriptorcreateTableDesc()
+privateorg.apache.hadoop.hbase.client.TableDescriptorcreateTableDesc()
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0383a9c2/testdevapidocs/org/apache/hadoop/hbase/IntegrationTestDDLMasterFailover.DeleteColumnFamilyAction.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/IntegrationTestDDLMasterFailover.DeleteColumnFamilyAction.html
 
b/testdevapidocs/org/apache/hadoop/hbase/IntegrationTestDDLMasterFailover.DeleteColumnFamilyAction.html
index a0bb650..1504c33 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/IntegrationTestDDLMasterFailover.DeleteColumnFamilyAction.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/IntegrationTestDDLMasterFailover.DeleteColumnFamilyAction.html
@@ -128,7 +128,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private class IntegrationTestDDLMasterFailover.DeleteColumnFamilyAction
+private class IntegrationTestDDLMasterFailover.DeleteColumnFamilyAction
 extends IntegrationTestDDLMasterFailover.ColumnAction
 
 
@@ -192,7 +192,7 @@ extends IntegrationTestDDLMasterFailover.ColumnAction
-selectFamily
+selectFamily
 
 
 
@@ -228,7 +228,7 @@ extends 
 
 DeleteColumnFamilyAction
-privateDeleteColumnFamilyAction()
+privateDeleteColumnFamilyAction()
 
 
 
@@ -245,7 +245,7 @@ extends 
 
 perform
-voidperform()
+voidperform()
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Specified by:

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0383a9c2/testdevapidocs/org/apache/hadoop/hbase/IntegrationTestDDLMasterFailover.DeleteNamespaceAction.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/IntegrationTestDDLMasterFailover.DeleteNamespaceAction.html
 
b/testdevapidocs/org/apache/hadoop/hbase/IntegrationTestDDLMasterFailover.DeleteNamespaceAction.html
index eced443..4f87dc3 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/IntegrationTestDDLMasterFailover.DeleteNamespaceAction.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/IntegrationTestDDLMasterFailover.DeleteNamespaceAction.html
@@ -123,7 +123,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private class IntegrationTestDDLMasterFailover.DeleteNamespaceAction
+private class IntegrationTestDDLMasterFailover.DeleteNamespaceAction
 extends IntegrationTestDDLMasterFailover.NamespaceAction
 
 
@@ -223,7 +223,7 @@ extends 
 
 DeleteNamespaceAction
-privateDeleteNamespaceAction()
+privateDeleteNamespaceAction()
 
 
 
@@ -240,7 +240,7 @@ extends 
 
 perform
-voidperform()
+voidperform()
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Specified by:

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0383a9c2/testdevapidocs/org/apache/hadoop/hbase/IntegrationTestDDLMasterFailover.MasterAction.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/IntegrationTestDDLMasterFailover.MasterAction.html
 
b/testdevapidocs/org/apache/hadoop/hbase/IntegrationTestDDLMasterFailover.MasterAction.html
index 884dcec..2907faa 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/IntegrationTestDDLMasterFailover.MasterAction.html
+++ 

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f391bcef/devapidocs/src-html/org/apache/hadoop/hbase/client/Increment.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/Increment.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/Increment.html
index 366fcc2..a49bc05 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/Increment.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/Increment.html
@@ -92,254 +92,260 @@
 084for (Map.EntryString, byte[] 
entry : i.getAttributesMap().entrySet()) {
 085  this.setAttribute(entry.getKey(), 
entry.getValue());
 086}
-087  }
-088
-089  /**
-090   * Add the specified KeyValue to this 
operation.
-091   * @param cell individual Cell
-092   * @return this
-093   * @throws java.io.IOException e
-094   */
-095  public Increment add(Cell cell) throws 
IOException{
-096byte [] family = 
CellUtil.cloneFamily(cell);
-097ListCell list = 
getCellList(family);
-098//Checking that the row of the kv is 
the same as the put
-099if (!CellUtil.matchingRow(cell, 
this.row)) {
-100  throw new WrongRowIOException("The 
row in " + cell +
-101" doesn't match the original one 
" +  Bytes.toStringBinary(this.row));
-102}
-103list.add(cell);
-104familyMap.put(family, list);
-105return this;
-106  }
-107
-108  /**
-109   * Increment the column from the 
specific family with the specified qualifier
-110   * by the specified amount.
-111   * p
-112   * Overrides previous calls to 
addColumn for this family and qualifier.
-113   * @param family family name
-114   * @param qualifier column qualifier
-115   * @param amount amount to increment 
by
-116   * @return the Increment object
-117   */
-118  public Increment addColumn(byte [] 
family, byte [] qualifier, long amount) {
-119if (family == null) {
-120  throw new 
IllegalArgumentException("family cannot be null");
-121}
-122ListCell list = 
getCellList(family);
-123KeyValue kv = 
createPutKeyValue(family, qualifier, ts, Bytes.toBytes(amount));
-124list.add(kv);
-125
familyMap.put(CellUtil.cloneFamily(kv), list);
-126return this;
-127  }
-128
-129  /**
-130   * Gets the TimeRange used for this 
increment.
-131   * @return TimeRange
-132   */
-133  public TimeRange getTimeRange() {
-134return this.tr;
-135  }
-136
-137  /**
-138   * Sets the TimeRange to be used on the 
Get for this increment.
-139   * p
-140   * This is useful for when you have 
counters that only last for specific
-141   * periods of time (ie. counters that 
are partitioned by time).  By setting
-142   * the range of valid times for this 
increment, you can potentially gain
-143   * some performance with a more optimal 
Get operation.
-144   * p
-145   * This range is used as [minStamp, 
maxStamp).
-146   * @param minStamp minimum timestamp 
value, inclusive
-147   * @param maxStamp maximum timestamp 
value, exclusive
-148   * @throws IOException if invalid time 
range
-149   * @return this
-150   */
-151  public Increment setTimeRange(long 
minStamp, long maxStamp)
-152  throws IOException {
-153tr = new TimeRange(minStamp, 
maxStamp);
-154return this;
-155  }
-156  
-157  /**
-158   * @param returnResults True (default) 
if the increment operation should return the results. A
-159   *  client that is not 
interested in the result can save network bandwidth setting this
-160   *  to false.
-161   */
-162  public Increment 
setReturnResults(boolean returnResults) {
-163
super.setReturnResults(returnResults);
-164return this;
-165  }
-166
-167  /**
-168   * @return current setting for 
returnResults
-169   */
-170  // This method makes public the 
superclasses's protected method.
-171  public boolean isReturnResults() {
-172return super.isReturnResults();
-173  }
-174
-175  /**
-176   * Method for retrieving the number of 
families to increment from
-177   * @return number of families
-178   */
-179  @Override
-180  public int numFamilies() {
-181return this.familyMap.size();
-182  }
-183
-184  /**
-185   * Method for checking if any families 
have been inserted into this Increment
-186   * @return true if familyMap is non 
empty false otherwise
-187   */
-188  public boolean hasFamilies() {
-189return !this.familyMap.isEmpty();
-190  }
-191
-192  /**
-193   * Before 0.95, when you called 
Increment#getFamilyMap(), you got back
-194   * a map of families to a list of 
Longs.  Now, {@link #getFamilyCellMap()} returns
-195   * families by list of Cells.  This 
method has been added so you can have the
-196   * old behavior.
-197   * @return Map of families to a Map of 
qualifiers and their Long increments.
-198   * @since 0.95.0
-199   */
-200  public Mapbyte[], 
NavigableMapbyte [], Long getFamilyMapOfLongs() {
-201NavigableMapbyte[], 
ListCell map = super.getFamilyCellMap();
-202Mapbyte [], 
NavigableMapbyte[], 

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca5b0275/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.BlockAdder.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.BlockAdder.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.BlockAdder.html
index cf44d69..3fdfe16 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.BlockAdder.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.BlockAdder.html
@@ -36,8 +36,8 @@
 028import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT;
 029import static 
org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage.PIPELINE_SETUP_CREATE;
 030
-031import 
com.google.common.base.Throwables;
-032import 
com.google.common.collect.ImmutableMap;
+031import 
org.apache.hadoop.hbase.shaded.com.google.common.base.Throwables;
+032import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableMap;
 033import 
com.google.protobuf.CodedOutputStream;
 034
 035import io.netty.bootstrap.Bootstrap;

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca5b0275/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.CancelOnClose.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.CancelOnClose.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.CancelOnClose.html
index cf44d69..3fdfe16 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.CancelOnClose.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.CancelOnClose.html
@@ -36,8 +36,8 @@
 028import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT;
 029import static 
org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage.PIPELINE_SETUP_CREATE;
 030
-031import 
com.google.common.base.Throwables;
-032import 
com.google.common.collect.ImmutableMap;
+031import 
org.apache.hadoop.hbase.shaded.com.google.common.base.Throwables;
+032import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableMap;
 033import 
com.google.protobuf.CodedOutputStream;
 034
 035import io.netty.bootstrap.Bootstrap;

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca5b0275/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.ChecksumCreater.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.ChecksumCreater.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.ChecksumCreater.html
index cf44d69..3fdfe16 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.ChecksumCreater.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.ChecksumCreater.html
@@ -36,8 +36,8 @@
 028import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT;
 029import static 
org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage.PIPELINE_SETUP_CREATE;
 030
-031import 
com.google.common.base.Throwables;
-032import 
com.google.common.collect.ImmutableMap;
+031import 
org.apache.hadoop.hbase.shaded.com.google.common.base.Throwables;
+032import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableMap;
 033import 
com.google.protobuf.CodedOutputStream;
 034
 035import io.netty.bootstrap.Bootstrap;

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca5b0275/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.DFSClientAdaptor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.DFSClientAdaptor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.DFSClientAdaptor.html
index cf44d69..3fdfe16 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.DFSClientAdaptor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.DFSClientAdaptor.html
@@ -36,8 +36,8 @@
 028import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT;
 029import static 
org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage.PIPELINE_SETUP_CREATE;
 030
-031import 
com.google.common.base.Throwables;

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9eba7fcf/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.ZookeeperTask.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.ZookeeperTask.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.ZookeeperTask.html
index e1fbce4..873e17f 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.ZookeeperTask.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.ZookeeperTask.html
@@ -1089,497 +1089,498 @@
 1081}
 1082  }
 1083  MapString, AtomicLong 
actualReadTableLatency = regionSink.getReadLatencyMap();
-1084  for (String tableName : 
this.configuredReadTableTimeouts.keySet()) {
-1085if 
(actualReadTableLatency.containsKey(tableName)) {
-1086  Long actual = 
actualReadTableLatency.get(tableName).longValue();
-1087  Long configured = 
this.configuredReadTableTimeouts.get(tableName);
-1088  LOG.info("Read operation 
for " + tableName + " took " + actual +
-1089" ms. The configured 
read timeout was " + configured + " ms.");
-1090  if (actual  
configured) {
-1091LOG.error("Read 
operation for " + tableName + " exceeded the configured read timeout.");
-1092  }
-1093} else {
-1094  LOG.error("Read operation 
for " + tableName + " failed!");
-1095}
-1096  }
-1097  if (this.writeSniffing) {
-1098String writeTableStringName 
= this.writeTableName.getNameAsString();
-1099long actualWriteLatency = 
regionSink.getWriteLatency().longValue();
-1100LOG.info("Write operation 
for " + writeTableStringName + " took " + actualWriteLatency + " ms. The 
configured write timeout was " +
-1101  
this.configuredWriteTableTimeout + " ms.");
-1102// Check that the writeTable 
write operation latency does not exceed the configured timeout.
-1103if (actualWriteLatency  
this.configuredWriteTableTimeout) {
-1104  LOG.error("Write operation 
for " + writeTableStringName + " exceeded the configured write timeout.");
-1105}
-1106  }
-1107} catch (Exception e) {
-1108  LOG.error("Run regionMonitor 
failed", e);
-1109  this.errorCode = 
ERROR_EXIT_CODE;
-1110}
-  }
-1112  this.done = true;
-1113}
-1114
-1115private String[] 
generateMonitorTables(String[] monitorTargets) throws IOException {
-1116  String[] returnTables = null;
-1117
-1118  if (this.useRegExp) {
-1119Pattern pattern = null;
-1120HTableDescriptor[] tds = null;
-1121SetString tmpTables = 
new TreeSet();
-1122try {
-1123  if (LOG.isDebugEnabled()) {
-1124
LOG.debug(String.format("reading list of tables"));
-1125  }
-1126  tds = 
this.admin.listTables(pattern);
-1127  if (tds == null) {
-1128tds = new 
HTableDescriptor[0];
-1129  }
-1130  for (String monitorTarget : 
monitorTargets) {
-1131pattern = 
Pattern.compile(monitorTarget);
-1132for (HTableDescriptor td : 
tds) {
-1133  if 
(pattern.matcher(td.getNameAsString()).matches()) {
-1134
tmpTables.add(td.getNameAsString());
-1135  }
-1136}
-1137  }
-1138} catch (IOException e) {
-1139  LOG.error("Communicate with 
admin failed", e);
-1140  throw e;
-1141}
-1142
-1143if (tmpTables.size()  0) {
-1144  returnTables = 
tmpTables.toArray(new String[tmpTables.size()]);
-1145} else {
-1146  String msg = "No HTable found, 
tablePattern:" + Arrays.toString(monitorTargets);
-1147  LOG.error(msg);
-1148  this.errorCode = 
INIT_ERROR_EXIT_CODE;
-1149  throw new 
TableNotFoundException(msg);
-1150}
-1151  } else {
-1152returnTables = monitorTargets;
-1153  }
-1154
-1155  return returnTables;
-1156}
-1157
-1158/*
-1159 * canary entry point to monitor all 
the tables.
-1160 */
-1161private 
ListFutureVoid sniff(TaskType taskType, RegionStdOutSink 
regionSink) throws Exception {
-1162  if (LOG.isDebugEnabled()) {
-1163LOG.debug(String.format("reading 
list of tables"));
-1164  }
-1165  ListFutureVoid 
taskFutures = new LinkedList();
-1166  for (HTableDescriptor table : 
admin.listTables()) {
-1167if 
(admin.isTableEnabled(table.getTableName())
-1168 
(!table.getTableName().equals(writeTableName))) {
-1169  AtomicLong readLatency = 
regionSink.initializeAndGetReadLatencyForTable(table.getNameAsString());
-1170  
taskFutures.addAll(Canary.sniff(admin, sink, table, executor, taskType, 
this.rawScanEnabled, readLatency));
-1171 

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/17128d27/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
index eb9099e..35d5549 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
@@ -232,2671 +232,2699 @@
 224import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownResponse;
 225import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest;
 226import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse;
-227import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest;
-228import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterResponse;
-229import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest;
-230import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse;
-231import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
-232import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponse;
-233import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest;
-234import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse;
-235import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest;
-236import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse;
-237import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest;
-238import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse;
-239import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest;
-240import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse;
-241import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest;
-242import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse;
-243import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest;
-244import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse;
-245import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest;
-246import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse;
-247import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
-248import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-249import 
org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
-250import 
org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-251import 
org.apache.hadoop.hbase.util.Bytes;
-252import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-253import 
org.apache.hadoop.hbase.util.ForeignExceptionUtil;
-254import 
org.apache.hadoop.hbase.util.Pair;
-255
-256/**
-257 * The implementation of AsyncAdmin.
-258 */
-259@InterfaceAudience.Private
-260public class RawAsyncHBaseAdmin 
implements AsyncAdmin {
-261  public static final String 
FLUSH_TABLE_PROCEDURE_SIGNATURE = "flush-table-proc";
-262
-263  private static final Log LOG = 
LogFactory.getLog(AsyncHBaseAdmin.class);
+227import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionRequest;
+228import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionResponse;
+229import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest;
+230import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterResponse;
+231import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest;
+232import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse;
+233import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
+234import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponse;
+235import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest;
+236import 

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2777c693/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestStore.MyStoreHook.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestStore.MyStoreHook.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestStore.MyStoreHook.html
new file mode 100644
index 000..6c65fd1
--- /dev/null
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestStore.MyStoreHook.html
@@ -0,0 +1,1810 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+Source code
+
+
+
+
+001/*
+002 *
+003 * Licensed to the Apache Software 
Foundation (ASF) under one
+004 * or more contributor license 
agreements.  See the NOTICE file
+005 * distributed with this work for 
additional information
+006 * regarding copyright ownership.  The 
ASF licenses this file
+007 * to you under the Apache License, 
Version 2.0 (the
+008 * "License"); you may not use this file 
except in compliance
+009 * with the License.  You may obtain a 
copy of the License at
+010 *
+011 * 
http://www.apache.org/licenses/LICENSE-2.0
+012 *
+013 * Unless required by applicable law or 
agreed to in writing, software
+014 * distributed under the License is 
distributed on an "AS IS" BASIS,
+015 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+016 * See the License for the specific 
language governing permissions and
+017 * limitations under the License.
+018 */
+019
+020package 
org.apache.hadoop.hbase.regionserver;
+021
+022import static 
org.junit.Assert.assertEquals;
+023import static 
org.junit.Assert.assertFalse;
+024import static 
org.junit.Assert.assertNull;
+025import static 
org.junit.Assert.assertTrue;
+026import static org.mockito.Matchers.any;
+027import static org.mockito.Mockito.spy;
+028import static 
org.mockito.Mockito.times;
+029import static 
org.mockito.Mockito.verify;
+030
+031import java.io.IOException;
+032import java.lang.ref.SoftReference;
+033import 
java.security.PrivilegedExceptionAction;
+034import java.util.ArrayList;
+035import java.util.Arrays;
+036import java.util.Collection;
+037import java.util.Collections;
+038import java.util.Iterator;
+039import java.util.List;
+040import java.util.ListIterator;
+041import java.util.NavigableSet;
+042import java.util.TreeSet;
+043import 
java.util.concurrent.ConcurrentSkipListSet;
+044import 
java.util.concurrent.CountDownLatch;
+045import 
java.util.concurrent.ExecutorService;
+046import java.util.concurrent.Executors;
+047import java.util.concurrent.TimeUnit;
+048import 
java.util.concurrent.atomic.AtomicBoolean;
+049import 
java.util.concurrent.atomic.AtomicInteger;
+050import java.util.function.Consumer;
+051
+052import org.apache.commons.logging.Log;
+053import 
org.apache.commons.logging.LogFactory;
+054import 
org.apache.hadoop.conf.Configuration;
+055import 
org.apache.hadoop.fs.FSDataOutputStream;
+056import org.apache.hadoop.fs.FileStatus;
+057import org.apache.hadoop.fs.FileSystem;
+058import 
org.apache.hadoop.fs.FilterFileSystem;
+059import 
org.apache.hadoop.fs.LocalFileSystem;
+060import org.apache.hadoop.fs.Path;
+061import 
org.apache.hadoop.fs.permission.FsPermission;
+062import org.apache.hadoop.hbase.Cell;
+063import 
org.apache.hadoop.hbase.CellComparator;
+064import 
org.apache.hadoop.hbase.CellUtil;
+065import 
org.apache.hadoop.hbase.HBaseConfiguration;
+066import 
org.apache.hadoop.hbase.HBaseTestingUtility;
+067import 
org.apache.hadoop.hbase.HColumnDescriptor;
+068import 
org.apache.hadoop.hbase.HConstants;
+069import 
org.apache.hadoop.hbase.HRegionInfo;
+070import 
org.apache.hadoop.hbase.HTableDescriptor;
+071import 
org.apache.hadoop.hbase.KeyValue;
+072import 
org.apache.hadoop.hbase.MemoryCompactionPolicy;
+073import 
org.apache.hadoop.hbase.TableName;
+074import 
org.apache.hadoop.hbase.client.Get;
+075import 
org.apache.hadoop.hbase.client.Scan;
+076import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+077import 
org.apache.hadoop.hbase.io.compress.Compression;
+078import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+079import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
+080import 
org.apache.hadoop.hbase.io.hfile.HFile;
+081import 
org.apache.hadoop.hbase.io.hfile.HFileContext;
+082import 
org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
+083import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
+084import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration;
+085import 
org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor;
+086import 
org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher;
+087import 
org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController;
+088import 
org.apache.hadoop.hbase.security.User;
+089import 
org.apache.hadoop.hbase.testclassification.MediumTests;
+090import 

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/90c7dfe4/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.html
index 25d6b70..635798d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.html
@@ -1234,540 +1234,541 @@
 1226   * @param result the result returned 
by the append
 1227   * @throws IOException if an error 
occurred on the coprocessor
 1228   */
-1229  public void postAppend(final Append 
append, final Result result) throws IOException {
-1230execOperation(coprocessors.isEmpty() 
? null : new RegionOperation() {
-1231  @Override
-1232  public void call(RegionObserver 
oserver, ObserverContextRegionCoprocessorEnvironment ctx)
-1233  throws IOException {
-1234oserver.postAppend(ctx, append, 
result);
-1235  }
-1236});
-1237  }
-1238
-1239  /**
-1240   * @param increment increment object
-1241   * @param result the result returned 
by postIncrement
-1242   * @throws IOException if an error 
occurred on the coprocessor
-1243   */
-1244  public Result postIncrement(final 
Increment increment, Result result) throws IOException {
-1245return 
execOperationWithResult(result,
-1246coprocessors.isEmpty() ? null : 
new RegionOperationWithResultResult() {
-1247  @Override
-1248  public void call(RegionObserver 
oserver, ObserverContextRegionCoprocessorEnvironment ctx)
-1249  throws IOException {
-1250
setResult(oserver.postIncrement(ctx, increment, getResult()));
-1251  }
-1252});
-1253  }
-1254
-1255  /**
-1256   * @param scan the Scan 
specification
-1257   * @return scanner id to return to 
client if default operation should be
-1258   * bypassed, null otherwise
-1259   * @exception IOException Exception
-1260   */
-1261  public RegionScanner 
preScannerOpen(final Scan scan) throws IOException {
-1262return execOperationWithResult(true, 
null,
-1263coprocessors.isEmpty() ? null : 
new RegionOperationWithResultRegionScanner() {
-1264  @Override
-1265  public void call(RegionObserver 
oserver, ObserverContextRegionCoprocessorEnvironment ctx)
-1266  throws IOException {
-1267
setResult(oserver.preScannerOpen(ctx, scan, getResult()));
-1268  }
-1269});
-1270  }
-1271
-1272  /**
-1273   * See
-1274   * {@link 
RegionObserver#preStoreScannerOpen(ObserverContext,
-1275   *Store, Scan, NavigableSet, 
KeyValueScanner)}
-1276   */
-1277  public KeyValueScanner 
preStoreScannerOpen(final Store store, final Scan scan,
-1278  final NavigableSetbyte[] 
targetCols, final long readPt) throws IOException {
-1279return 
execOperationWithResult(null,
-1280coprocessors.isEmpty() ? null : 
new RegionOperationWithResultKeyValueScanner() {
-1281  @Override
-1282  public void call(RegionObserver 
oserver, ObserverContextRegionCoprocessorEnvironment ctx)
-1283  throws IOException {
-1284
setResult(oserver.preStoreScannerOpen(ctx, store, scan, targetCols, 
getResult(), readPt));
-1285  }
-1286});
-1287  }
-1288
-1289  /**
-1290   * @param scan the Scan 
specification
-1291   * @param s the scanner
-1292   * @return the scanner instance to 
use
-1293   * @exception IOException Exception
-1294   */
-1295  public RegionScanner 
postScannerOpen(final Scan scan, RegionScanner s) throws IOException {
-1296return execOperationWithResult(s,
-1297coprocessors.isEmpty() ? null : 
new RegionOperationWithResultRegionScanner() {
-1298  @Override
-1299  public void call(RegionObserver 
oserver, ObserverContextRegionCoprocessorEnvironment ctx)
-1300  throws IOException {
-1301
setResult(oserver.postScannerOpen(ctx, scan, getResult()));
-1302  }
-1303});
-1304  }
-1305
-1306  /**
-1307   * @param s the scanner
-1308   * @param results the result set 
returned by the region server
-1309   * @param limit the maximum number of 
results to return
-1310   * @return 'has next' indication to 
client if bypassing default behavior, or
-1311   * null otherwise
-1312   * @exception IOException Exception
-1313   */
-1314  public Boolean preScannerNext(final 
InternalScanner s,
-1315  final ListResult results, 
final int limit) throws IOException {
-1316return execOperationWithResult(true, 
false,
-1317coprocessors.isEmpty() ? null : 
new RegionOperationWithResultBoolean() {
-1318  @Override
-1319  public void call(RegionObserver 
oserver, ObserverContextRegionCoprocessorEnvironment ctx)
-1320  throws IOException {
-1321
setResult(oserver.preScannerNext(ctx, s, results, limit, 

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0821e51a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.PBHelper.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.PBHelper.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.PBHelper.html
index f729c99..0a32350 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.PBHelper.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.PBHelper.html
@@ -54,757 +54,756 @@
 046import 
io.netty.channel.ChannelPipeline;
 047import io.netty.channel.EventLoop;
 048import 
io.netty.channel.SimpleChannelInboundHandler;
-049import 
io.netty.channel.socket.nio.NioSocketChannel;
-050import 
io.netty.handler.codec.protobuf.ProtobufDecoder;
-051import 
io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
-052import 
io.netty.handler.timeout.IdleStateEvent;
-053import 
io.netty.handler.timeout.IdleStateHandler;
-054import io.netty.util.concurrent.Future;
-055import 
io.netty.util.concurrent.FutureListener;
-056import 
io.netty.util.concurrent.Promise;
-057
-058import java.io.IOException;
-059import 
java.lang.reflect.InvocationTargetException;
-060import java.lang.reflect.Method;
-061import java.util.ArrayList;
-062import java.util.EnumSet;
-063import java.util.List;
-064import java.util.concurrent.TimeUnit;
-065
-066import org.apache.commons.logging.Log;
-067import 
org.apache.commons.logging.LogFactory;
-068import 
org.apache.hadoop.conf.Configuration;
-069import 
org.apache.hadoop.crypto.CryptoProtocolVersion;
-070import 
org.apache.hadoop.crypto.Encryptor;
-071import org.apache.hadoop.fs.CreateFlag;
-072import org.apache.hadoop.fs.FileSystem;
-073import 
org.apache.hadoop.fs.FileSystemLinkResolver;
-074import org.apache.hadoop.fs.Path;
-075import 
org.apache.hadoop.fs.UnresolvedLinkException;
-076import 
org.apache.hadoop.fs.permission.FsPermission;
-077import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-078import 
org.apache.hadoop.hbase.client.ConnectionUtils;
-079import 
org.apache.hadoop.hbase.util.CancelableProgressable;
-080import 
org.apache.hadoop.hbase.util.FSUtils;
-081import 
org.apache.hadoop.hdfs.DFSClient;
-082import 
org.apache.hadoop.hdfs.DFSOutputStream;
-083import 
org.apache.hadoop.hdfs.DistributedFileSystem;
-084import 
org.apache.hadoop.hdfs.protocol.ClientProtocol;
-085import 
org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-086import 
org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-087import 
org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-088import 
org.apache.hadoop.hdfs.protocol.LocatedBlock;
-089import 
org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
-090import 
org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
-091import 
org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
-092import 
org.apache.hadoop.hdfs.protocol.datatransfer.Op;
-093import 
org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck;
-094import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto;
-095import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
-096import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto;
-097import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto;
-098import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto;
-099import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto;
-100import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto;
-101import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
-102import 
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
-103import 
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto;
-104import 
org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
-105import 
org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
-106import 
org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
-107import 
org.apache.hadoop.io.EnumSetWritable;
-108import 
org.apache.hadoop.ipc.RemoteException;
-109import org.apache.hadoop.net.NetUtils;
-110import 
org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
-111import 
org.apache.hadoop.security.token.Token;
-112import 
org.apache.hadoop.util.DataChecksum;
-113
-114/**
-115 * Helper class for implementing {@link 
FanOutOneBlockAsyncDFSOutput}.
-116 */
-117@InterfaceAudience.Private
-118public final class 
FanOutOneBlockAsyncDFSOutputHelper {
+049import 
io.netty.handler.codec.protobuf.ProtobufDecoder;
+050import 
io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
+051import 

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d27954a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ProcedureBiConsumer.html
index 16c0042..71844ce 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ProcedureBiConsumer.html
@@ -126,2499 +126,2543 @@
 118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
 119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
 120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse;
-135import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
-136import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
-137import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
-138import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
-139import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest;
-140import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesResponse;
-141import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
-142import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
-143import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest;
-144import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse;
-145import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
-146import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse;
-147import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest;
-148import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse;
-149import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledRequest;
-150import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledResponse;
-151import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest;
-152import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse;
-153import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest;
-154import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse;
-155import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest;
-156import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse;
-157import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
-158import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
-159import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
-160import 

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9fb0764b/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html
index ef45d65..ce7419d 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html
@@ -92,6 +92,27 @@
 Provides HBase Client
 
 
+
+org.apache.hadoop.hbase.constraint
+
+Restrict the domain of a data attribute, often times to 
fulfill business rules/requirements.
+
+
+
+org.apache.hadoop.hbase.mapreduce
+
+Provides HBase http://wiki.apache.org/hadoop/HadoopMapReduce;>MapReduce
+Input/OutputFormats, a table indexing MapReduce job, and utility methods.
+
+
+
+org.apache.hadoop.hbase.regionserver
+
+
+
+org.apache.hadoop.hbase.rest.client
+
+
 
 
 
@@ -166,6 +187,10 @@
 
 
 
+private TableDescriptor
+HBaseAdmin.CreateTableFuture.desc
+
+
 static TableDescriptor
 TableDescriptorBuilder.NAMESPACE_TABLEDESC
 Table descriptor for namespace table
@@ -202,6 +227,47 @@
 TableDescriptorBuilder.copy(TableDescriptordesc)
 
 
+TableDescriptor
+Table.getDescriptor()
+Gets the table 
descriptor for this table.
+
+
+
+TableDescriptor
+HTable.getDescriptor()
+
+
+TableDescriptor
+HTableWrapper.getDescriptor()
+
+
+protected TableDescriptor
+HBaseAdmin.CreateTableFuture.getTableDescriptor()
+
+
+protected TableDescriptor
+HBaseAdmin.TableFuture.getTableDescriptor()
+
+
+(package private) static TableDescriptor
+HBaseAdmin.getTableDescriptor(TableNametableName,
+  Connectionconnection,
+  RpcRetryingCallerFactoryrpcCallerFactory,
+  RpcControllerFactoryrpcControllerFactory,
+  intoperationTimeout,
+  intrpcTimeout)
+
+
+TableDescriptor
+Admin.listTableDescriptor(TableNametableName)
+Method for getting the tableDescriptor
+
+
+
+TableDescriptor
+HBaseAdmin.listTableDescriptor(TableNametableName)
+
+
 static TableDescriptor
 TableDescriptorBuilder.parseFrom(byte[]pbBytes)
 The input should be created by TableDescriptorBuilder.toByteArray(org.apache.hadoop.hbase.client.TableDescriptor).
@@ -283,6 +349,80 @@
 RawAsyncHBaseAdmin.getTableDescriptor(TableNametableName)
 
 
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableDescriptor
+Admin.listTableDescriptors()
+List all the userspace tables.
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableDescriptor
+HBaseAdmin.listTableDescriptors()
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableDescriptor
+Admin.listTableDescriptors(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableNametableNames)
+Get tableDescriptors
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableDescriptor
+HBaseAdmin.listTableDescriptors(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableNametableNames)
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableDescriptor
+Admin.listTableDescriptors(http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in java.util.regex">Patternpattern)
+List all the userspace tables matching the given 
pattern.
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableDescriptor
+HBaseAdmin.listTableDescriptors(http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in 
java.util.regex">Patternpattern)
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableDescriptor
+Admin.listTableDescriptors(http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in java.util.regex">Patternpattern,
+booleanincludeSysTables)
+List all the tables matching the given pattern.
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableDescriptor
+HBaseAdmin.listTableDescriptors(http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in java.util.regex">Patternpattern,
+booleanincludeSysTables)
+

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca9f6925/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/MemStoreCompactor.Action.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/MemStoreCompactor.Action.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/MemStoreCompactor.Action.html
index f2fcfe3..2cdeeac 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/MemStoreCompactor.Action.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/MemStoreCompactor.Action.html
@@ -137,6 +137,67 @@ the order they are declared.
 
 
 
+
+Methods in org.apache.hadoop.hbase.regionserver
 with parameters of type MemStoreCompactor.Action
+
+Modifier and Type
+Method and Description
+
+
+
+private ImmutableSegment
+SegmentFactory.createImmutableSegment(org.apache.hadoop.conf.Configurationconf,
+  CellComparatorcomparator,
+  MemStoreSegmentsIteratoriterator,
+  MemStoreLABmemStoreLAB,
+  intnumOfCells,
+  MemStoreCompactor.Actionaction,
+  CompactingMemStore.IndexTypeidxType)
+
+
+private void
+CellArrayImmutableSegment.initializeCellSet(intnumOfCells,
+ MemStoreSegmentsIteratoriterator,
+ MemStoreCompactor.Actionaction)
+
+
+private void
+CellChunkImmutableSegment.initializeCellSet(intnumOfCells,
+ MemStoreSegmentsIteratoriterator,
+ MemStoreCompactor.Actionaction)
+
+
+
+
+Constructors in org.apache.hadoop.hbase.regionserver
 with parameters of type MemStoreCompactor.Action
+
+Constructor and Description
+
+
+
+CellArrayImmutableSegment(CellComparatorcomparator,
+ MemStoreSegmentsIteratoriterator,
+ MemStoreLABmemStoreLAB,
+ intnumOfCells,
+ MemStoreCompactor.Actionaction)
+
+ C-tor to be used when new CellArrayImmutableSegment is a result of compaction 
of a
+ list of older ImmutableSegments.
+
+
+
+CellChunkImmutableSegment(CellComparatorcomparator,
+ MemStoreSegmentsIteratoriterator,
+ MemStoreLABmemStoreLAB,
+ intnumOfCells,
+ MemStoreCompactor.Actionaction)
+
+ C-tor to be used when new CellChunkImmutableSegment is built as a result of 
compaction/merge
+ of a list of older ImmutableSegments.
+
+
+
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca9f6925/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/MemStoreLAB.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/MemStoreLAB.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/MemStoreLAB.html
index 99936e9..5b8c763 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/MemStoreLAB.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/MemStoreLAB.html
@@ -172,6 +172,16 @@
 
 
 
+private ImmutableSegment
+SegmentFactory.createImmutableSegment(org.apache.hadoop.conf.Configurationconf,
+  CellComparatorcomparator,
+  MemStoreSegmentsIteratoriterator,
+  MemStoreLABmemStoreLAB,
+  intnumOfCells,
+  MemStoreCompactor.Actionaction,
+  CompactingMemStore.IndexTypeidxType)
+
+
 private MutableSegment
 SegmentFactory.generateMutableSegment(org.apache.hadoop.conf.Configurationconf,
   CellComparatorcomparator,
@@ -186,32 +196,41 @@
 
 
 
-ImmutableSegment(CellComparatorcomparator,
-MemStoreSegmentsIteratoriterator,
-MemStoreLABmemStoreLAB)
+CellArrayImmutableSegment(CellComparatorcomparator,
+ MemStoreSegmentsIteratoriterator,
+ MemStoreLABmemStoreLAB,
+ intnumOfCells,
+ MemStoreCompactor.Actionaction)
 
- C-tor to be used when new SKIP-LIST BASED ImmutableSegment is a result of 
compaction of a
+ C-tor to be used when new CellArrayImmutableSegment is a result of compaction 
of a
  list of older ImmutableSegments.
 
 
 
-ImmutableSegment(CellComparatorcomparator,
-MemStoreSegmentsIteratoriterator,
-MemStoreLABmemStoreLAB,
-intnumOfCells,
-ImmutableSegment.Typetype,
-booleanmerge)
+CellChunkImmutableSegment(CellComparatorcomparator,
+ MemStoreSegmentsIteratoriterator,
+ 

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8e3b63ca/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaHostCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaHostCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaHostCostFunction.html
index c895448..545d4da 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaHostCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaHostCostFunction.html
@@ -1294,425 +1294,426 @@
 1286  }
 1287
 1288  // We normalize locality to be a 
score between 0 and 1.0 representing how good it
-1289  // is compared to how good it 
could be
-1290  locality /= bestLocality;
-1291}
-1292
-1293@Override
-1294protected void regionMoved(int 
region, int oldServer, int newServer) {
-1295  int oldEntity = type == 
LocalityType.SERVER ? oldServer : cluster.serverIndexToRackIndex[oldServer];
-1296  int newEntity = type == 
LocalityType.SERVER ? newServer : cluster.serverIndexToRackIndex[newServer];
-1297  if (this.services == null) {
-1298return;
-1299  }
-1300  double localityDelta = 
getWeightedLocality(region, newEntity) - getWeightedLocality(region, 
oldEntity);
-1301  double normalizedDelta = 
localityDelta / bestLocality;
-1302  locality += normalizedDelta;
-1303}
-1304
-1305@Override
-1306double cost() {
-1307  return 1 - locality;
-1308}
-1309
-1310private int 
getMostLocalEntityForRegion(int region) {
-1311  return 
cluster.getOrComputeRegionsToMostLocalEntities(type)[region];
-1312}
-1313
-1314private double 
getWeightedLocality(int region, int entity) {
-1315  return 
cluster.getOrComputeWeightedLocality(region, entity, type);
-1316}
-1317
-1318  }
-1319
-1320  static class 
ServerLocalityCostFunction extends LocalityBasedCostFunction {
-1321
-1322private static final String 
LOCALITY_COST_KEY = "hbase.master.balancer.stochastic.localityCost";
-1323private static final float 
DEFAULT_LOCALITY_COST = 25;
-1324
-1325
ServerLocalityCostFunction(Configuration conf, MasterServices srv) {
-1326  super(
-1327  conf,
-1328  srv,
-1329  LocalityType.SERVER,
-1330  LOCALITY_COST_KEY,
-1331  DEFAULT_LOCALITY_COST
-1332  );
-1333}
-1334
-1335@Override
-1336int regionIndexToEntityIndex(int 
region) {
-1337  return 
cluster.regionIndexToServerIndex[region];
-1338}
-1339  }
-1340
-1341  static class RackLocalityCostFunction 
extends LocalityBasedCostFunction {
-1342
-1343private static final String 
RACK_LOCALITY_COST_KEY = "hbase.master.balancer.stochastic.rackLocalityCost";
-1344private static final float 
DEFAULT_RACK_LOCALITY_COST = 15;
-1345
-1346public 
RackLocalityCostFunction(Configuration conf, MasterServices services) {
-1347  super(
-1348  conf,
-1349  services,
-1350  LocalityType.RACK,
-1351  RACK_LOCALITY_COST_KEY,
-1352  DEFAULT_RACK_LOCALITY_COST
-1353  );
-1354}
-1355
-1356@Override
-1357int regionIndexToEntityIndex(int 
region) {
-1358  return 
cluster.getRackForRegion(region);
-1359}
-1360  }
-1361
-1362  /**
-1363   * Base class the allows writing costs 
functions from rolling average of some
-1364   * number from RegionLoad.
-1365   */
-1366  abstract static class 
CostFromRegionLoadFunction extends CostFunction {
-1367
-1368private ClusterStatus clusterStatus 
= null;
-1369private MapString, 
DequeBalancerRegionLoad loads = null;
-1370private double[] stats = null;
-1371
CostFromRegionLoadFunction(Configuration conf) {
-1372  super(conf);
-1373}
-1374
-1375void setClusterStatus(ClusterStatus 
status) {
-1376  this.clusterStatus = status;
-1377}
-1378
-1379void setLoads(MapString, 
DequeBalancerRegionLoad l) {
-1380  this.loads = l;
-1381}
-1382
-1383@Override
-1384double cost() {
-1385  if (clusterStatus == null || loads 
== null) {
-1386return 0;
-1387  }
-1388
-1389  if (stats == null || stats.length 
!= cluster.numServers) {
-1390stats = new 
double[cluster.numServers];
-1391  }
-1392
-1393  for (int i =0; i  
stats.length; i++) {
-1394//Cost this server has from 
RegionLoad
-1395long cost = 0;
-1396
-1397// for every region on this 
server get the rl
-1398for(int 
regionIndex:cluster.regionsPerServer[i]) {
-1399  
CollectionBalancerRegionLoad regionLoadList =  
cluster.regionLoads[regionIndex];
-1400
-1401  // Now if we found a region 
load get the type of cost that was requested.

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aecb1286/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer.html
index 01496d6..dc12c09 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer.html
@@ -48,2406 +48,2267 @@
 040
 041import io.netty.util.Timeout;
 042import io.netty.util.TimerTask;
-043import java.util.stream.Stream;
-044import org.apache.commons.io.IOUtils;
-045import org.apache.commons.logging.Log;
-046import 
org.apache.commons.logging.LogFactory;
-047import 
org.apache.hadoop.hbase.HRegionInfo;
-048import 
org.apache.hadoop.hbase.HRegionLocation;
-049import 
org.apache.hadoop.hbase.MetaTableAccessor;
-050import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-051import 
org.apache.hadoop.hbase.NotServingRegionException;
-052import 
org.apache.hadoop.hbase.ProcedureInfo;
-053import 
org.apache.hadoop.hbase.RegionLocations;
-054import 
org.apache.hadoop.hbase.ServerName;
-055import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-056import 
org.apache.hadoop.hbase.HConstants;
-057import 
org.apache.hadoop.hbase.TableExistsException;
-058import 
org.apache.hadoop.hbase.TableName;
-059import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-060import 
org.apache.hadoop.hbase.TableNotDisabledException;
-061import 
org.apache.hadoop.hbase.TableNotEnabledException;
-062import 
org.apache.hadoop.hbase.TableNotFoundException;
-063import 
org.apache.hadoop.hbase.UnknownRegionException;
-064import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-065import 
org.apache.hadoop.hbase.classification.InterfaceStability;
-066import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-067import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-068import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-069import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-070import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-071import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-072import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-073import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-074import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-075import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-076import 
org.apache.hadoop.hbase.replication.ReplicationException;
-077import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-078import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-079import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-080import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-081import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-082import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-100import 

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a719cd00/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html
index 89f7c43..e8eb593 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html
@@ -69,15 +69,15 @@
 061  requiredArguments = {
 062@org.jamon.annotations.Argument(name 
= "master", type = "HMaster")},
 063  optionalArguments = {
-064@org.jamon.annotations.Argument(name 
= "assignmentManager", type = "AssignmentManager"),
-065@org.jamon.annotations.Argument(name 
= "catalogJanitorEnabled", type = "boolean"),
-066@org.jamon.annotations.Argument(name 
= "format", type = "String"),
-067@org.jamon.annotations.Argument(name 
= "serverManager", type = "ServerManager"),
-068@org.jamon.annotations.Argument(name 
= "metaLocation", type = "ServerName"),
-069@org.jamon.annotations.Argument(name 
= "filter", type = "String"),
-070@org.jamon.annotations.Argument(name 
= "servers", type = "ListServerName"),
-071@org.jamon.annotations.Argument(name 
= "frags", type = "MapString,Integer"),
-072@org.jamon.annotations.Argument(name 
= "deadServers", type = "SetServerName")})
+064@org.jamon.annotations.Argument(name 
= "filter", type = "String"),
+065@org.jamon.annotations.Argument(name 
= "frags", type = "MapString,Integer"),
+066@org.jamon.annotations.Argument(name 
= "servers", type = "ListServerName"),
+067@org.jamon.annotations.Argument(name 
= "deadServers", type = "SetServerName"),
+068@org.jamon.annotations.Argument(name 
= "format", type = "String"),
+069@org.jamon.annotations.Argument(name 
= "metaLocation", type = "ServerName"),
+070@org.jamon.annotations.Argument(name 
= "assignmentManager", type = "AssignmentManager"),
+071@org.jamon.annotations.Argument(name 
= "catalogJanitorEnabled", type = "boolean"),
+072@org.jamon.annotations.Argument(name 
= "serverManager", type = "ServerManager")})
 073public class MasterStatusTmpl
 074  extends 
org.jamon.AbstractTemplateProxy
 075{
@@ -118,159 +118,159 @@
 110  return m_master;
 111}
 112private HMaster m_master;
-113// 29, 1
-114public void 
setAssignmentManager(AssignmentManager assignmentManager)
+113// 26, 1
+114public void setFilter(String 
filter)
 115{
-116  // 29, 1
-117  m_assignmentManager = 
assignmentManager;
-118  m_assignmentManager__IsNotDefault = 
true;
+116  // 26, 1
+117  m_filter = filter;
+118  m_filter__IsNotDefault = true;
 119}
-120public AssignmentManager 
getAssignmentManager()
+120public String getFilter()
 121{
-122  return m_assignmentManager;
+122  return m_filter;
 123}
-124private AssignmentManager 
m_assignmentManager;
-125public boolean 
getAssignmentManager__IsNotDefault()
+124private String m_filter;
+125public boolean 
getFilter__IsNotDefault()
 126{
-127  return 
m_assignmentManager__IsNotDefault;
+127  return m_filter__IsNotDefault;
 128}
-129private boolean 
m_assignmentManager__IsNotDefault;
-130// 25, 1
-131public void 
setCatalogJanitorEnabled(boolean catalogJanitorEnabled)
+129private boolean 
m_filter__IsNotDefault;
+130// 21, 1
+131public void 
setFrags(MapString,Integer frags)
 132{
-133  // 25, 1
-134  m_catalogJanitorEnabled = 
catalogJanitorEnabled;
-135  
m_catalogJanitorEnabled__IsNotDefault = true;
+133  // 21, 1
+134  m_frags = frags;
+135  m_frags__IsNotDefault = true;
 136}
-137public boolean 
getCatalogJanitorEnabled()
+137public MapString,Integer 
getFrags()
 138{
-139  return m_catalogJanitorEnabled;
+139  return m_frags;
 140}
-141private boolean 
m_catalogJanitorEnabled;
-142public boolean 
getCatalogJanitorEnabled__IsNotDefault()
+141private MapString,Integer 
m_frags;
+142public boolean 
getFrags__IsNotDefault()
 143{
-144  return 
m_catalogJanitorEnabled__IsNotDefault;
+144  return m_frags__IsNotDefault;
 145}
-146private boolean 
m_catalogJanitorEnabled__IsNotDefault;
-147// 27, 1
-148public void setFormat(String 
format)
+146private boolean 
m_frags__IsNotDefault;
+147// 23, 1
+148public void 
setServers(ListServerName servers)
 149{
-150  // 27, 1
-151  m_format = format;
-152  m_format__IsNotDefault = true;
+150  // 23, 1
+151  m_servers = servers;
+152  m_servers__IsNotDefault = true;
 153}
-154public String getFormat()
+154public ListServerName 
getServers()
 155{
-156  return m_format;
+156  return m_servers;
 157}
-158

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.Intf.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.Intf.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.Intf.html
index acb426d..20455ea 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.Intf.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.Intf.html
@@ -34,9 +34,9 @@
 026@org.jamon.annotations.Argument(name 
= "regionServer", type = "HRegionServer")},
 027  optionalArguments = {
 028@org.jamon.annotations.Argument(name 
= "bcn", type = "String"),
-029@org.jamon.annotations.Argument(name 
= "format", type = "String"),
-030@org.jamon.annotations.Argument(name 
= "bcv", type = "String"),
-031@org.jamon.annotations.Argument(name 
= "filter", type = "String")})
+029@org.jamon.annotations.Argument(name 
= "filter", type = "String"),
+030@org.jamon.annotations.Argument(name 
= "format", type = "String"),
+031@org.jamon.annotations.Argument(name 
= "bcv", type = "String")})
 032public class RSStatusTmpl
 033  extends 
org.jamon.AbstractTemplateProxy
 034{
@@ -94,57 +94,57 @@
 086  return m_bcn__IsNotDefault;
 087}
 088private boolean 
m_bcn__IsNotDefault;
-089// 22, 1
-090public void setFormat(String 
format)
+089// 21, 1
+090public void setFilter(String 
filter)
 091{
-092  // 22, 1
-093  m_format = format;
-094  m_format__IsNotDefault = true;
+092  // 21, 1
+093  m_filter = filter;
+094  m_filter__IsNotDefault = true;
 095}
-096public String getFormat()
+096public String getFilter()
 097{
-098  return m_format;
+098  return m_filter;
 099}
-100private String m_format;
-101public boolean 
getFormat__IsNotDefault()
+100private String m_filter;
+101public boolean 
getFilter__IsNotDefault()
 102{
-103  return m_format__IsNotDefault;
+103  return m_filter__IsNotDefault;
 104}
-105private boolean 
m_format__IsNotDefault;
-106// 24, 1
-107public void setBcv(String bcv)
+105private boolean 
m_filter__IsNotDefault;
+106// 22, 1
+107public void setFormat(String 
format)
 108{
-109  // 24, 1
-110  m_bcv = bcv;
-111  m_bcv__IsNotDefault = true;
+109  // 22, 1
+110  m_format = format;
+111  m_format__IsNotDefault = true;
 112}
-113public String getBcv()
+113public String getFormat()
 114{
-115  return m_bcv;
+115  return m_format;
 116}
-117private String m_bcv;
-118public boolean 
getBcv__IsNotDefault()
+117private String m_format;
+118public boolean 
getFormat__IsNotDefault()
 119{
-120  return m_bcv__IsNotDefault;
+120  return m_format__IsNotDefault;
 121}
-122private boolean 
m_bcv__IsNotDefault;
-123// 21, 1
-124public void setFilter(String 
filter)
+122private boolean 
m_format__IsNotDefault;
+123// 24, 1
+124public void setBcv(String bcv)
 125{
-126  // 21, 1
-127  m_filter = filter;
-128  m_filter__IsNotDefault = true;
+126  // 24, 1
+127  m_bcv = bcv;
+128  m_bcv__IsNotDefault = true;
 129}
-130public String getFilter()
+130public String getBcv()
 131{
-132  return m_filter;
+132  return m_bcv;
 133}
-134private String m_filter;
-135public boolean 
getFilter__IsNotDefault()
+134private String m_bcv;
+135public boolean 
getBcv__IsNotDefault()
 136{
-137  return m_filter__IsNotDefault;
+137  return m_bcv__IsNotDefault;
 138}
-139private boolean 
m_filter__IsNotDefault;
+139private boolean 
m_bcv__IsNotDefault;
 140  }
 141  @Override
 142  protected 
org.jamon.AbstractTemplateProxy.ImplData makeImplData()
@@ -163,24 +163,24 @@
 155return this;
 156  }
 157  
-158  protected String format;
-159  public final 
org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl setFormat(String 
p_format)
+158  protected String filter;
+159  public final 
org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl setFilter(String 
p_filter)
 160  {
-161
(getImplData()).setFormat(p_format);
+161
(getImplData()).setFilter(p_filter);
 162return this;
 163  }
 164  
-165  protected String bcv;
-166  public final 
org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl setBcv(String p_bcv)
+165  protected String format;
+166  public final 
org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl setFormat(String 
p_format)
 167  {
-168(getImplData()).setBcv(p_bcv);
+168
(getImplData()).setFormat(p_format);
 169return this;
 170  }
 171  
-172  protected String filter;
-173  public final 
org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl setFilter(String 
p_filter)
+172  

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/77a552c4/devapidocs/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
index c41f4ff..43b852d 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
@@ -134,7 +134,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class SplitTableRegionProcedure
+public class SplitTableRegionProcedure
 extends AbstractStateMachineRegionProcedureorg.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionState
 The procedure to split a region in a table.
  Takes lock on the parent region.
@@ -509,7 +509,7 @@ extends 
 
 LOG
-private static finalorg.apache.commons.logging.Log LOG
+private static finalorg.apache.commons.logging.Log LOG
 
 
 
@@ -518,7 +518,7 @@ extends 
 
 traceEnabled
-privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean traceEnabled
+privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean traceEnabled
 
 
 
@@ -527,7 +527,7 @@ extends 
 
 daughter_1_HRI
-privateHRegionInfo daughter_1_HRI
+privateHRegionInfo daughter_1_HRI
 
 
 
@@ -536,7 +536,7 @@ extends 
 
 daughter_2_HRI
-privateHRegionInfo daughter_2_HRI
+privateHRegionInfo daughter_2_HRI
 
 
 
@@ -545,7 +545,7 @@ extends 
 
 EXPECTED_SPLIT_STATES
-private staticRegionState.State[] EXPECTED_SPLIT_STATES
+private staticRegionState.State[] EXPECTED_SPLIT_STATES
 
 
 
@@ -562,7 +562,7 @@ extends 
 
 SplitTableRegionProcedure
-publicSplitTableRegionProcedure()
+publicSplitTableRegionProcedure()
 
 
 
@@ -571,7 +571,7 @@ extends 
 
 SplitTableRegionProcedure
-publicSplitTableRegionProcedure(MasterProcedureEnvenv,
+publicSplitTableRegionProcedure(MasterProcedureEnvenv,
  HRegionInforegionToSplit,
  byte[]splitRow)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -595,7 +595,7 @@ extends 
 
 checkSplitRow
-private staticvoidcheckSplitRow(HRegionInforegionToSplit,
+private staticvoidcheckSplitRow(HRegionInforegionToSplit,
   byte[]splitRow)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
@@ -610,7 +610,7 @@ extends 
 
 getDaughterRegionIdTimestamp
-private staticlonggetDaughterRegionIdTimestamp(HRegionInfohri)
+private staticlonggetDaughterRegionIdTimestamp(HRegionInfohri)
 Calculate daughter regionid to use.
 
 Parameters:
@@ -626,7 +626,7 @@ extends 
 
 executeFromState
-protectedStateMachineProcedure.FlowexecuteFromState(MasterProcedureEnvenv,
+protectedStateMachineProcedure.FlowexecuteFromState(MasterProcedureEnvenv,
   
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStatestate)
throws http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
 title="class or interface in java.lang">InterruptedException
 Description copied from 
class:StateMachineProcedure
@@ -649,7 +649,7 @@ extends 
 
 rollbackState
-protectedvoidrollbackState(MasterProcedureEnvenv,
+protectedvoidrollbackState(MasterProcedureEnvenv,
  
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStatestate)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException,
  http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
 title="class or interface in java.lang">InterruptedException
@@ -671,7 +671,7 @@ extends 
 
 isRollbackSupported
-protectedbooleanisRollbackSupported(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStatestate)
+protectedbooleanisRollbackSupported(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStatestate)
 Description copied from 
class:StateMachineProcedure
 Used by the default implementation of abort() to know if 
the current state can be aborted
  and rollback can be triggered.
@@ -687,7 +687,7 @@ extends 
 
 getState

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-06-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b44796ef/devapidocs/org/apache/hadoop/hbase/client/Result.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/Result.html 
b/devapidocs/org/apache/hadoop/hbase/client/Result.html
index 95fe742..540a5b5 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/Result.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/Result.html
@@ -4,7 +4,7 @@
 
 
 
-Result (Apache HBase 2.0.0-SNAPSHOT API)
+Result (Apache HBase 3.0.0-SNAPSHOT API)
 
 
 
@@ -12,13 +12,13 @@
 
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":9,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":9,"i14":9,"i15":9,"i16":9,"i17":9,"i18":9,"i19":9,"i20":9,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":9,"i32":10,"i33":10,"i34":10,"i35":10,"i36":42,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":9,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":9,"i14":9,"i15":9,"i16":9,"i17":9,"i18":9,"i19":9,"i20":9,"i21":9,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":9,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":42,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete 
Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -178,48 +178,52 @@ implements 
 
 
+private Cursor
+cursor 
+
+
 static Result
 EMPTY_RESULT 
 
-
+
 private http://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true";
 title="class or interface in java.lang">Boolean
 exists 
 
-
+
 private http://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true";
 title="class or interface in java.util">NavigableMap<byte[],http://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true";
 title="class or interface in java.util">NavigableMap<byte[],http://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true";
 title="class or interface in java.util">NavigableMap<http://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true";
 title="class or interface in java.lang">Long,byte[]>>>
 familyMap 
 
-
+
 private static int
 

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6ad4f21a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/BloomType.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/BloomType.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/BloomType.html
index 39e9322..c4cff08 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/BloomType.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/BloomType.html
@@ -289,7 +289,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 private BloomType
-StoreFile.cfBloomType
+HStoreFile.cfBloomType
 Bloom filter type specified in column family 
configuration.
 
 
@@ -341,50 +341,48 @@ the order they are declared.
 
 
 
-StoreFile(org.apache.hadoop.fs.FileSystemfs,
- org.apache.hadoop.fs.Pathp,
- org.apache.hadoop.conf.Configurationconf,
- CacheConfigcacheConf,
- BloomTypecfBloomType)
+HStoreFile(org.apache.hadoop.fs.FileSystemfs,
+  org.apache.hadoop.fs.Pathp,
+  org.apache.hadoop.conf.Configurationconf,
+  CacheConfigcacheConf,
+  BloomTypecfBloomType)
 Deprecated.
 Now we will specific 
whether the StoreFile is for primary replica when
- constructing, so please use
- StoreFile.StoreFile(FileSystem,
 Path, Configuration, CacheConfig, BloomType, boolean)
- directly.
+ constructing, so please use HStoreFile.HStoreFile(FileSystem,
 Path, Configuration,
+ CacheConfig, BloomType, boolean) directly.
 
 
 
 
-StoreFile(org.apache.hadoop.fs.FileSystemfs,
- org.apache.hadoop.fs.Pathp,
- org.apache.hadoop.conf.Configurationconf,
- CacheConfigcacheConf,
- BloomTypecfBloomType,
- booleanprimaryReplica)
+HStoreFile(org.apache.hadoop.fs.FileSystemfs,
+  org.apache.hadoop.fs.Pathp,
+  org.apache.hadoop.conf.Configurationconf,
+  CacheConfigcacheConf,
+  BloomTypecfBloomType,
+  booleanprimaryReplica)
 Constructor, loads a reader and it's indices, etc.
 
 
 
-StoreFile(org.apache.hadoop.fs.FileSystemfs,
- StoreFileInfofileInfo,
- org.apache.hadoop.conf.Configurationconf,
- CacheConfigcacheConf,
- BloomTypecfBloomType)
+HStoreFile(org.apache.hadoop.fs.FileSystemfs,
+  StoreFileInfofileInfo,
+  org.apache.hadoop.conf.Configurationconf,
+  CacheConfigcacheConf,
+  BloomTypecfBloomType)
 Deprecated.
 Now we will specific 
whether the StoreFile is for primary replica when
- constructing, so please use
- StoreFile.StoreFile(FileSystem,
 StoreFileInfo, Configuration, CacheConfig, BloomType, boolean)
- directly.
+ constructing, so please use HStoreFile.HStoreFile(FileSystem,
 StoreFileInfo,
+ Configuration, CacheConfig, BloomType, boolean) 
directly.
 
 
 
 
-StoreFile(org.apache.hadoop.fs.FileSystemfs,
- StoreFileInfofileInfo,
- org.apache.hadoop.conf.Configurationconf,
- CacheConfigcacheConf,
- BloomTypecfBloomType,
- booleanprimaryReplica)
+HStoreFile(org.apache.hadoop.fs.FileSystemfs,
+  StoreFileInfofileInfo,
+  org.apache.hadoop.conf.Configurationconf,
+  CacheConfigcacheConf,
+  BloomTypecfBloomType,
+  booleanprimaryReplica)
 Constructor, loads a reader and it's indices, etc.
 
 
@@ -482,7 +480,7 @@ the order they are declared.
  intmaxKeys,
  HFile.Writerwriter)
 Creates a new general (Row or RowCol) Bloom filter at the 
time of
- StoreFile writing.
+ StoreFile writing.
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6ad4f21a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HRegionFileSystem.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HRegionFileSystem.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HRegionFileSystem.html
index 242e2f7..5889dd7 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HRegionFileSystem.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HRegionFileSystem.html
@@ -135,7 +135,7 @@
 private Pairorg.apache.hadoop.fs.Path,org.apache.hadoop.fs.Path
 SplitTableRegionProcedure.splitStoreFile(HRegionFileSystemregionFs,
   byte[]family,
-  StoreFilesf)
+  StoreFilesf)
 
 
 private Pairhttp://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
 title="class or interface in java.lang">Integer,http://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
 title="class or interface in java.lang">Integer
@@ -155,7 +155,7 @@
 
 StoreFileSplitter(HRegionFileSystemregionFs,
   

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9d35424/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestReplicaWithCluster.RegionServerHostingPrimayMetaRegionSlowCopro.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestReplicaWithCluster.RegionServerHostingPrimayMetaRegionSlowCopro.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestReplicaWithCluster.RegionServerHostingPrimayMetaRegionSlowCopro.html
deleted file mode 100644
index ebf0532..000
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestReplicaWithCluster.RegionServerHostingPrimayMetaRegionSlowCopro.html
+++ /dev/null
@@ -1,719 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-Source code
-
-
-
-
-001/**
-002 *
-003 * Licensed to the Apache Software 
Foundation (ASF) under one
-004 * or more contributor license 
agreements.  See the NOTICE file
-005 * distributed with this work for 
additional information
-006 * regarding copyright ownership.  The 
ASF licenses this file
-007 * to you under the Apache License, 
Version 2.0 (the
-008 * "License"); you may not use this file 
except in compliance
-009 * with the License.  You may obtain a 
copy of the License at
-010 *
-011 * 
http://www.apache.org/licenses/LICENSE-2.0
-012 *
-013 * Unless required by applicable law or 
agreed to in writing, software
-014 * distributed under the License is 
distributed on an "AS IS" BASIS,
-015 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
-016 * See the License for the specific 
language governing permissions and
-017 * limitations under the License.
-018 */
-019
-020
-021package org.apache.hadoop.hbase.client;
-022
-023import java.io.IOException;
-024import java.util.ArrayList;
-025import java.util.Arrays;
-026import java.util.List;
-027import 
java.util.concurrent.CountDownLatch;
-028import java.util.concurrent.TimeUnit;
-029import 
java.util.concurrent.atomic.AtomicLong;
-030import 
java.util.concurrent.atomic.AtomicReference;
-031
-032import org.apache.commons.logging.Log;
-033import 
org.apache.commons.logging.LogFactory;
-034import 
org.apache.hadoop.conf.Configuration;
-035import org.apache.hadoop.fs.Path;
-036import org.apache.hadoop.hbase.Cell;
-037import 
org.apache.hadoop.hbase.HBaseConfiguration;
-038import 
org.apache.hadoop.hbase.HBaseTestingUtility;
-039import 
org.apache.hadoop.hbase.HColumnDescriptor;
-040import 
org.apache.hadoop.hbase.HConstants;
-041import 
org.apache.hadoop.hbase.HTableDescriptor;
-042import 
org.apache.hadoop.hbase.RegionLocations;
-043import org.apache.hadoop.hbase.Waiter;
-044
-045import 
org.apache.hadoop.hbase.client.replication.ReplicationAdmin;
-046import 
org.apache.hadoop.hbase.coprocessor.ObserverContext;
-047import 
org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-048import 
org.apache.hadoop.hbase.coprocessor.RegionObserver;
-049import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-050import 
org.apache.hadoop.hbase.regionserver.RegionScanner;
-051import 
org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
-052import 
org.apache.hadoop.hbase.regionserver.StorefileRefresherChore;
-053import 
org.apache.hadoop.hbase.regionserver.TestHRegionServerBulkLoad;
-054import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-055import 
org.apache.hadoop.hbase.testclassification.ClientTests;
-056import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-057import 
org.apache.hadoop.hbase.util.Bytes;
-058import 
org.apache.hadoop.hbase.util.Pair;
-059import 
org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
-060import org.junit.AfterClass;
-061import org.junit.Assert;
-062import org.junit.BeforeClass;
-063import org.junit.Test;
-064import 
org.junit.experimental.categories.Category;
-065
-066@Category({MediumTests.class, 
ClientTests.class})
-067public class TestReplicaWithCluster {
-068  private static final Log LOG = 
LogFactory.getLog(TestReplicaWithCluster.class);
-069
-070  private static final int NB_SERVERS = 
3;
-071  private static final byte[] row = 
TestReplicaWithCluster.class.getName().getBytes();
-072  private static final 
HBaseTestingUtility HTU = new HBaseTestingUtility();
-073
-074  // second minicluster used in testing 
of replication
-075  private static HBaseTestingUtility 
HTU2;
-076  private static final byte[] f = 
HConstants.CATALOG_FAMILY;
-077
-078  private final static int REFRESH_PERIOD 
= 1000;
-079  private final static int 
META_SCAN_TIMEOUT_IN_MILLISEC = 200;
-080
-081  /**
-082   * This copro is used to synchronize 
the tests.
-083   */
-084  public static class SlowMeCopro 
implements RegionObserver {
-085static final AtomicLong sleepTime = 
new AtomicLong(0);
-086static final 
AtomicReferenceCountDownLatch cdl = new AtomicReference(new 
CountDownLatch(0));
-087
-088public SlowMeCopro() {
-089}
-090
-091@Override
-092public void 

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6dd31117/devapidocs/org/apache/hadoop/hbase/master/assignment/FailedRemoteDispatchException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/FailedRemoteDispatchException.html
 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/FailedRemoteDispatchException.html
new file mode 100644
index 000..c963ece
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/FailedRemoteDispatchException.html
@@ -0,0 +1,276 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+FailedRemoteDispatchException (Apache HBase 2.0.0-SNAPSHOT API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.master.assignment
+Class 
FailedRemoteDispatchException
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in java.lang">java.lang.Throwable
+
+
+http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">java.lang.Exception
+
+
+http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">java.io.IOException
+
+
+org.apache.hadoop.hbase.HBaseIOException
+
+
+org.apache.hadoop.hbase.master.assignment.FailedRemoteDispatchException
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable
+
+
+
+@InterfaceAudience.Private
+public class FailedRemoteDispatchException
+extends HBaseIOException
+Used internally signaling failed queue of a remote procedure
+ operation.
+
+See Also:
+Serialized
 Form
+
+
+
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+FailedRemoteDispatchException(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringmsg)
+
+
+
+
+
+
+
+
+
+Method Summary
+
+
+
+
+Methods inherited from classjava.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in java.lang">Throwable
+http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#addSuppressed-java.lang.Throwable-;
 title="class or interface in java.lang">addSuppressed, http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#fillInStackTrace--;
 title="class or interface in java.lang">fillInStackTrace, http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#getCause--;
 title="class or interface in java.lang">getCause, http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#getLocalizedMessage--;
 title="class or interface in java.lang">getLocalizedMessage, http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#getMessage--;
 title="class or interface in java.lang">getMessage, http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#getStackTrace--;
 title="class or inter
 face in java.lang">getStackTrace, http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#getSuppressed--;
 title="class or interface in java.lang">getSuppressed, http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#initCause-java.lang.Throwable-;
 title="class or interface in java.lang">initCause, http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#printStackTrace--;
 title="class or interface in java.lang">printStackTrace, http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#printStackTrace-java.io.PrintStream-;
 title="class or interface in java.lang">printStackTrace, http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#printStackTrace-java.io.PrintWriter-;
 title="class or interface in java.lang">printStackTrace, 

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dab57116/devapidocs/org/apache/hadoop/hbase/quotas/QuotaObserverChore.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/quotas/QuotaObserverChore.html 
b/devapidocs/org/apache/hadoop/hbase/quotas/QuotaObserverChore.html
new file mode 100644
index 000..c89bd7c
--- /dev/null
+++ b/devapidocs/org/apache/hadoop/hbase/quotas/QuotaObserverChore.html
@@ -0,0 +1,1120 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+QuotaObserverChore (Apache HBase 2.0.0-SNAPSHOT API)
+
+
+
+
+
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":9,"i4":10,"i5":10,"i6":10,"i7":9,"i8":9,"i9":10,"i10":10,"i11":10,"i12":9,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10};
+var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.quotas
+Class 
QuotaObserverChore
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.ScheduledChore
+
+
+org.apache.hadoop.hbase.quotas.QuotaObserverChore
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true;
 title="class or interface in java.lang">Runnable
+
+
+
+@InterfaceAudience.Private
+public class QuotaObserverChore
+extends ScheduledChore
+Reads the currently received Region filesystem-space use 
reports and acts on those which
+ violate a defined quota.
+
+
+
+
+
+
+
+
+
+
+
+Nested Class Summary
+
+Nested Classes
+
+Modifier and Type
+Class and Description
+
+
+(package private) static class
+QuotaObserverChore.TablesWithQuotas
+A container which encapsulates the tables that have either 
a table quota or are contained in a
+ namespace which have a namespace quota.
+
+
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields
+
+Modifier and Type
+Field and Description
+
+
+private 
org.apache.hadoop.conf.Configuration
+conf
+
+
+private Connection
+conn
+
+
+private static 
org.apache.commons.logging.Log
+LOG
+
+
+private MetricsMaster
+metrics
+
+
+private http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,SpaceQuotaSnapshot
+namespaceQuotaSnapshots
+
+
+private QuotaSnapshotStorehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+namespaceSnapshotStore
+
+
+(package private) static long
+QUOTA_OBSERVER_CHORE_DELAY_DEFAULT
+
+
+(package private) static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+QUOTA_OBSERVER_CHORE_DELAY_KEY
+
+
+(package private) static int
+QUOTA_OBSERVER_CHORE_PERIOD_DEFAULT
+
+
+(package private) static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+QUOTA_OBSERVER_CHORE_PERIOD_KEY
+
+
+(package private) static double
+QUOTA_OBSERVER_CHORE_REPORT_PERCENT_DEFAULT
+
+
+(package private) static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+QUOTA_OBSERVER_CHORE_REPORT_PERCENT_KEY
+
+
+(package private) static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+QUOTA_OBSERVER_CHORE_TIMEUNIT_DEFAULT
+
+
+(package private) static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+QUOTA_OBSERVER_CHORE_TIMEUNIT_KEY
+
+
+private MasterQuotaManager
+quotaManager
+
+
+private http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in 

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestReplicaWithCluster.RegionServerStoppedCopro.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestReplicaWithCluster.RegionServerStoppedCopro.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestReplicaWithCluster.RegionServerStoppedCopro.html
index a519d7c..ebf0532 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestReplicaWithCluster.RegionServerStoppedCopro.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestReplicaWithCluster.RegionServerStoppedCopro.html
@@ -47,538 +47,612 @@
 039import 
org.apache.hadoop.hbase.HColumnDescriptor;
 040import 
org.apache.hadoop.hbase.HConstants;
 041import 
org.apache.hadoop.hbase.HTableDescriptor;
-042import org.apache.hadoop.hbase.Waiter;
-043
-044import 
org.apache.hadoop.hbase.client.replication.ReplicationAdmin;
-045import 
org.apache.hadoop.hbase.coprocessor.ObserverContext;
-046import 
org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-047import 
org.apache.hadoop.hbase.coprocessor.RegionObserver;
-048import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-049import 
org.apache.hadoop.hbase.regionserver.RegionScanner;
-050import 
org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
-051import 
org.apache.hadoop.hbase.regionserver.StorefileRefresherChore;
-052import 
org.apache.hadoop.hbase.regionserver.TestHRegionServerBulkLoad;
-053import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-054import 
org.apache.hadoop.hbase.testclassification.ClientTests;
-055import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-056import 
org.apache.hadoop.hbase.util.Bytes;
-057import 
org.apache.hadoop.hbase.util.Pair;
-058import 
org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
-059import org.junit.AfterClass;
-060import org.junit.Assert;
-061import org.junit.BeforeClass;
-062import org.junit.Test;
-063import 
org.junit.experimental.categories.Category;
-064
-065@Category({MediumTests.class, 
ClientTests.class})
-066public class TestReplicaWithCluster {
-067  private static final Log LOG = 
LogFactory.getLog(TestReplicaWithCluster.class);
-068
-069  private static final int NB_SERVERS = 
3;
-070  private static final byte[] row = 
TestReplicaWithCluster.class.getName().getBytes();
-071  private static final 
HBaseTestingUtility HTU = new HBaseTestingUtility();
-072
-073  // second minicluster used in testing 
of replication
-074  private static HBaseTestingUtility 
HTU2;
-075  private static final byte[] f = 
HConstants.CATALOG_FAMILY;
-076
-077  private final static int REFRESH_PERIOD 
= 1000;
-078
-079  /**
-080   * This copro is used to synchronize 
the tests.
-081   */
-082  public static class SlowMeCopro 
implements RegionObserver {
-083static final AtomicLong sleepTime = 
new AtomicLong(0);
-084static final 
AtomicReferenceCountDownLatch cdl = new AtomicReference(new 
CountDownLatch(0));
-085
-086public SlowMeCopro() {
-087}
-088
-089@Override
-090public void preGetOp(final 
ObserverContextRegionCoprocessorEnvironment e,
-091 final Get get, 
final ListCell results) throws IOException {
-092
-093  if 
(e.getEnvironment().getRegion().getRegionInfo().getReplicaId() == 0) {
-094CountDownLatch latch = 
cdl.get();
-095try {
-096  if (sleepTime.get()  0) {
-097LOG.info("Sleeping for " + 
sleepTime.get() + " ms");
-098
Thread.sleep(sleepTime.get());
-099  } else if (latch.getCount() 
 0) {
-100LOG.info("Waiting for the 
counterCountDownLatch");
-101latch.await(2, 
TimeUnit.MINUTES); // To help the tests to finish.
-102if (latch.getCount()  0) 
{
-103  throw new 
RuntimeException("Can't wait more");
-104}
-105  }
-106} catch (InterruptedException e1) 
{
-107  LOG.error(e1);
-108}
-109  } else {
-110LOG.info("We're not the primary 
replicas.");
-111  }
-112}
-113  }
-114
-115  /**
-116   * This copro is used to simulate 
region server down exception for Get and Scan
-117   */
-118  public static class 
RegionServerStoppedCopro implements RegionObserver {
-119
-120public RegionServerStoppedCopro() {
-121}
-122
-123@Override
-124public void preGetOp(final 
ObserverContextRegionCoprocessorEnvironment e,
-125final Get get, final 
ListCell results) throws IOException {
-126
-127  int replicaId = 
e.getEnvironment().getRegion().getRegionInfo().getReplicaId();
+042import 
org.apache.hadoop.hbase.RegionLocations;
+043import org.apache.hadoop.hbase.Waiter;
+044
+045import 
org.apache.hadoop.hbase.client.replication.ReplicationAdmin;
+046import 
org.apache.hadoop.hbase.coprocessor.ObserverContext;
+047import 

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b4cf63f/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.MasterRpcCall.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.MasterRpcCall.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.MasterRpcCall.html
index 4fd4af0..1b2d845 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.MasterRpcCall.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.MasterRpcCall.html
@@ -186,2271 +186,2272 @@
 178import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse;
 179import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest;
 180import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse;
-181import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-182import 
org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
-183import 
org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-184import 
org.apache.hadoop.hbase.util.Bytes;
-185import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-186import 
org.apache.hadoop.hbase.util.ForeignExceptionUtil;
-187import 
org.apache.hadoop.hbase.util.Pair;
-188
-189/**
-190 * The implementation of AsyncAdmin.
-191 */
-192@InterfaceAudience.Private
-193@InterfaceStability.Evolving
-194public class AsyncHBaseAdmin implements 
AsyncAdmin {
-195  public static final String 
FLUSH_TABLE_PROCEDURE_SIGNATURE = "flush-table-proc";
-196
-197  private static final Log LOG = 
LogFactory.getLog(AsyncHBaseAdmin.class);
-198
-199  private final AsyncConnectionImpl 
connection;
-200
-201  private final RawAsyncTable 
metaTable;
-202
-203  private final long rpcTimeoutNs;
-204
-205  private final long 
operationTimeoutNs;
-206
-207  private final long pauseNs;
-208
-209  private final int maxAttempts;
-210
-211  private final int startLogErrorsCnt;
-212
-213  private final NonceGenerator ng;
-214
-215  AsyncHBaseAdmin(AsyncConnectionImpl 
connection) {
-216this.connection = connection;
-217this.metaTable = 
connection.getRawTable(META_TABLE_NAME);
-218this.rpcTimeoutNs = 
connection.connConf.getRpcTimeoutNs();
-219this.operationTimeoutNs = 
connection.connConf.getOperationTimeoutNs();
-220this.pauseNs = 
connection.connConf.getPauseNs();
-221this.maxAttempts = 
connection.connConf.getMaxRetries();
-222this.startLogErrorsCnt = 
connection.connConf.getStartLogErrorsCnt();
-223this.ng = 
connection.getNonceGenerator();
-224  }
-225
-226  private T 
MasterRequestCallerBuilderT newMasterCaller() {
-227return 
this.connection.callerFactory.T masterRequest()
-228.rpcTimeout(rpcTimeoutNs, 
TimeUnit.NANOSECONDS)
-229
.operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS)
-230.pause(pauseNs, 
TimeUnit.NANOSECONDS).maxAttempts(maxAttempts)
-231
.startLogErrorsCnt(startLogErrorsCnt);
-232  }
-233
-234  private T 
AdminRequestCallerBuilderT newAdminCaller() {
-235return 
this.connection.callerFactory.T adminRequest()
-236.rpcTimeout(rpcTimeoutNs, 
TimeUnit.NANOSECONDS)
-237
.operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS)
-238.pause(pauseNs, 
TimeUnit.NANOSECONDS).maxAttempts(maxAttempts)
-239
.startLogErrorsCnt(startLogErrorsCnt);
-240  }
-241
-242  @FunctionalInterface
-243  private interface 
MasterRpcCallRESP, REQ {
-244void call(MasterService.Interface 
stub, HBaseRpcController controller, REQ req,
-245RpcCallbackRESP done);
-246  }
-247
-248  @FunctionalInterface
-249  private interface AdminRpcCallRESP, 
REQ {
-250void call(AdminService.Interface 
stub, HBaseRpcController controller, REQ req,
-251RpcCallbackRESP done);
-252  }
-253
-254  @FunctionalInterface
-255  private interface ConverterD, S 
{
-256D convert(S src) throws 
IOException;
-257  }
-258
-259  private PREQ, PRESP, RESP 
CompletableFutureRESP call(HBaseRpcController controller,
-260  MasterService.Interface stub, PREQ 
preq, MasterRpcCallPRESP, PREQ rpcCall,
-261  ConverterRESP, PRESP 
respConverter) {
-262CompletableFutureRESP future 
= new CompletableFuture();
-263rpcCall.call(stub, controller, preq, 
new RpcCallbackPRESP() {
-264
-265  @Override
-266  public void run(PRESP resp) {
-267if (controller.failed()) {
-268  
future.completeExceptionally(controller.getFailed());
-269} else {
-270  try {
-271
future.complete(respConverter.convert(resp));
-272  } catch (IOException e) {
-273
future.completeExceptionally(e);
-274  }
-275}
-276  }
-277});
-278return future;
-279  }
-280
-281  //TODO abstract call and adminCall into 
a single method.

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f55ebeaa/hbase-spark/mail-lists.html
--
diff --git a/hbase-spark/mail-lists.html b/hbase-spark/mail-lists.html
index 3b34be6..f32604c 100644
--- a/hbase-spark/mail-lists.html
+++ b/hbase-spark/mail-lists.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-05-14
+Last Published: 2017-05-15
   | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Spark

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f55ebeaa/hbase-spark/plugin-management.html
--
diff --git a/hbase-spark/plugin-management.html 
b/hbase-spark/plugin-management.html
index 7705526..d495230 100644
--- a/hbase-spark/plugin-management.html
+++ b/hbase-spark/plugin-management.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-05-14
+Last Published: 2017-05-15
   | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Spark

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f55ebeaa/hbase-spark/plugins.html
--
diff --git a/hbase-spark/plugins.html b/hbase-spark/plugins.html
index 41c48ef..91c67ca 100644
--- a/hbase-spark/plugins.html
+++ b/hbase-spark/plugins.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-05-14
+Last Published: 2017-05-15
   | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Spark

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f55ebeaa/hbase-spark/project-info.html
--
diff --git a/hbase-spark/project-info.html b/hbase-spark/project-info.html
index 61bebc4..a74281e 100644
--- a/hbase-spark/project-info.html
+++ b/hbase-spark/project-info.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-05-14
+Last Published: 2017-05-15
   | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Spark

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f55ebeaa/hbase-spark/project-reports.html
--
diff --git a/hbase-spark/project-reports.html b/hbase-spark/project-reports.html
index 14a56f0..5a451c0 100644
--- a/hbase-spark/project-reports.html
+++ b/hbase-spark/project-reports.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2017-05-14
+Last Published: 2017-05-15
   | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Spark

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f55ebeaa/hbase-spark/project-summary.html
--
diff --git a/hbase-spark/project-summary.html b/hbase-spark/project-summary.html
index e68bc91..73ce5d8 100644
--- a/hbase-spark/project-summary.html
+++ b/hbase-spark/project-summary.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-  

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8e0a5167/testdevapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.MockMapping.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.MockMapping.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.MockMapping.html
index fb4fa30..d501058 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.MockMapping.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.MockMapping.html
@@ -29,660 +29,639 @@
 021import static 
org.junit.Assert.assertNull;
 022import static 
org.junit.Assert.assertTrue;
 023
-024import java.io.IOException;
-025import java.util.ArrayList;
-026import java.util.Arrays;
-027import java.util.HashMap;
-028import java.util.HashSet;
-029import java.util.LinkedList;
-030import java.util.List;
-031import java.util.Map;
-032import java.util.Map.Entry;
-033import java.util.Queue;
-034import java.util.Random;
-035import java.util.Set;
-036import java.util.SortedSet;
-037import java.util.TreeMap;
-038import java.util.TreeSet;
-039
-040import com.google.protobuf.Service;
-041import org.apache.commons.logging.Log;
-042import 
org.apache.commons.logging.LogFactory;
-043import 
org.apache.hadoop.conf.Configuration;
-044import 
org.apache.hadoop.hbase.ChoreService;
-045import 
org.apache.hadoop.hbase.CoordinatedStateManager;
-046import 
org.apache.hadoop.hbase.HBaseConfiguration;
-047import 
org.apache.hadoop.hbase.HColumnDescriptor;
-048import 
org.apache.hadoop.hbase.HRegionInfo;
-049import 
org.apache.hadoop.hbase.HTableDescriptor;
-050import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-051import 
org.apache.hadoop.hbase.ProcedureInfo;
-052import 
org.apache.hadoop.hbase.ServerName;
-053import 
org.apache.hadoop.hbase.TableDescriptors;
-054import 
org.apache.hadoop.hbase.TableName;
-055import 
org.apache.hadoop.hbase.TableNotDisabledException;
-056import 
org.apache.hadoop.hbase.TableNotFoundException;
-057import 
org.apache.hadoop.hbase.client.ClusterConnection;
-058import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-059import 
org.apache.hadoop.hbase.master.RackManager;
-060import 
org.apache.hadoop.hbase.master.RegionPlan;
-061import 
org.apache.hadoop.hbase.executor.ExecutorService;
-062import 
org.apache.hadoop.hbase.master.*;
-063import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
-064import 
org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
-065import 
org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
-066import 
org.apache.hadoop.hbase.quotas.MasterQuotaManager;
-067import 
org.apache.hadoop.hbase.security.User;
-068import 
org.apache.hadoop.hbase.util.Bytes;
-069import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-070import 
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-071import 
org.apache.hadoop.net.DNSToSwitchMapping;
-072import org.junit.Assert;
-073import org.junit.BeforeClass;
-074
-075/**
-076 * Class used to be the base of unit 
tests on load balancers. It gives helper
-077 * methods to create maps of {@link 
ServerName} to lists of {@link HRegionInfo}
-078 * and to check list of region plans.
-079 *
-080 */
-081public class BalancerTestBase {
-082  private static final Log LOG = 
LogFactory.getLog(BalancerTestBase.class);
-083  protected static Random rand = new 
Random();
-084  static int regionId = 0;
-085  protected static Configuration conf;
-086  protected static StochasticLoadBalancer 
loadBalancer;
-087
-088  @BeforeClass
-089  public static void beforeAllTests() 
throws Exception {
-090conf = HBaseConfiguration.create();
-091
conf.setClass("hbase.util.ip.to.rack.determiner", MockMapping.class, 
DNSToSwitchMapping.class);
-092
conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 0.75f);
-093conf.setFloat("hbase.regions.slop", 
0.0f);
-094
conf.setFloat("hbase.master.balancer.stochastic.localityCost", 0);
-095loadBalancer = new 
StochasticLoadBalancer();
-096loadBalancer.setConf(conf);
-097  }
-098
-099  protected int[] largeCluster = new 
int[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-100  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-101  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-102  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-103  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-104  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-105  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-106  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/709b8fcc/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
index 1642d61..17bc96c 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
@@ -2317,7 +2317,7 @@
 2309  }
 2310
 2311  public long restoreSnapshot(final 
SnapshotDescription snapshotDesc,
-2312  final long nonceGroup, final long 
nonce, final boolean restoreAcl) throws IOException {
+2312  final long nonceGroup, final long 
nonce) throws IOException {
 2313checkInitialized();
 2314
getSnapshotManager().checkSnapshotSupport();
 2315
@@ -2329,1032 +2329,1031 @@
 2321new 
MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
 2322  @Override
 2323  protected void run() throws 
IOException {
-2324  setProcId(
-2325
getSnapshotManager().restoreOrCloneSnapshot(snapshotDesc, getNonceKey(), 
restoreAcl));
-2326  }
-2327
-2328  @Override
-2329  protected String getDescription() 
{
-2330return 
"RestoreSnapshotProcedure";
-2331  }
-2332});
-2333  }
-2334
-2335  @Override
-2336  public void checkTableModifiable(final 
TableName tableName)
-2337  throws IOException, 
TableNotFoundException, TableNotDisabledException {
-2338if (isCatalogTable(tableName)) {
-2339  throw new IOException("Can't 
modify catalog tables");
-2340}
-2341if 
(!MetaTableAccessor.tableExists(getConnection(), tableName)) {
-2342  throw new 
TableNotFoundException(tableName);
-2343}
-2344if 
(!getTableStateManager().isTableState(tableName, TableState.State.DISABLED)) 
{
-2345  throw new 
TableNotDisabledException(tableName);
-2346}
-2347  }
-2348
-2349  /**
-2350   * @return cluster status
-2351   */
-2352  public ClusterStatus 
getClusterStatus() throws InterruptedIOException {
-2353// Build Set of backup masters from 
ZK nodes
-2354ListString 
backupMasterStrings;
-2355try {
-2356  backupMasterStrings = 
ZKUtil.listChildrenNoWatch(this.zooKeeper,
-2357
this.zooKeeper.znodePaths.backupMasterAddressesZNode);
-2358} catch (KeeperException e) {
-2359  
LOG.warn(this.zooKeeper.prefix("Unable to list backup servers"), e);
-2360  backupMasterStrings = null;
-2361}
-2362
-2363ListServerName backupMasters 
= null;
-2364if (backupMasterStrings != null 
 !backupMasterStrings.isEmpty()) {
-2365  backupMasters = new 
ArrayList(backupMasterStrings.size());
-2366  for (String s: 
backupMasterStrings) {
-2367try {
-2368  byte [] bytes;
-2369  try {
-2370bytes = 
ZKUtil.getData(this.zooKeeper, ZKUtil.joinZNode(
-2371
this.zooKeeper.znodePaths.backupMasterAddressesZNode, s));
-2372  } catch (InterruptedException 
e) {
-2373throw new 
InterruptedIOException();
-2374  }
-2375  if (bytes != null) {
-2376ServerName sn;
-2377try {
-2378  sn = 
ProtobufUtil.parseServerNameFrom(bytes);
-2379} catch 
(DeserializationException e) {
-2380  LOG.warn("Failed parse, 
skipping registering backup server", e);
-2381  continue;
-2382}
-2383backupMasters.add(sn);
-2384  }
-2385} catch (KeeperException e) {
-2386  
LOG.warn(this.zooKeeper.prefix("Unable to get information about " +
-2387   "backup servers"), 
e);
-2388}
-2389  }
-2390  Collections.sort(backupMasters, 
new ComparatorServerName() {
-2391@Override
-2392public int compare(ServerName 
s1, ServerName s2) {
-2393  return 
s1.getServerName().compareTo(s2.getServerName());
-2394}});
-2395}
-2396
-2397String clusterId = fileSystemManager 
!= null ?
-2398  
fileSystemManager.getClusterId().toString() : null;
-2399SetRegionState 
regionsInTransition = assignmentManager != null ?
-2400  
assignmentManager.getRegionStates().getRegionsInTransition() : null;
-2401String[] coprocessors = cpHost != 
null ? getMasterCoprocessors() : null;
-2402boolean balancerOn = 
loadBalancerTracker != null ?
-2403  loadBalancerTracker.isBalancerOn() 
: false;
-2404MapServerName, ServerLoad 
onlineServers = null;
-2405SetServerName deadServers = 
null;
-2406if (serverManager != null) {
-2407  deadServers = 
serverManager.getDeadServers().copyServerNames();
-2408  onlineServers = 
serverManager.getOnlineServers();
-2409}
-2410return new 
ClusterStatus(VersionInfo.getVersion(), clusterId,
-2411  onlineServers, deadServers, 
serverName, backupMasters,
-2412  regionsInTransition, coprocessors, 

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.BlockReaderThread.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.BlockReaderThread.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.BlockReaderThread.html
index b535394..1b4407c 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.BlockReaderThread.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.BlockReaderThread.html
@@ -679,187 +679,188 @@
 671  while (System.currentTimeMillis() 
 endTime) {
 672int blockId = 
rand.nextInt(NUM_TEST_BLOCKS);
 673long offset = 
offsets.get(blockId);
-674boolean pread = 
rand.nextBoolean();
-675boolean withOnDiskSize = 
rand.nextBoolean();
-676long expectedSize =
-677  (blockId == NUM_TEST_BLOCKS - 1 
? fileSize
-678  : offsets.get(blockId + 1)) 
- offset;
-679
-680HFileBlock b;
-681try {
-682  long onDiskSizeArg = 
withOnDiskSize ? expectedSize : -1;
-683  b = hbr.readBlockData(offset, 
onDiskSizeArg, pread);
-684} catch (IOException ex) {
-685  LOG.error("Error in client " + 
clientId + " trying to read block at "
-686  + offset + ", pread=" + 
pread + ", withOnDiskSize=" +
-687  withOnDiskSize, ex);
-688  return false;
-689}
-690
-691assertEquals(types.get(blockId), 
b.getBlockType());
-692assertEquals(expectedSize, 
b.getOnDiskSizeWithHeader());
-693assertEquals(offset, 
b.getOffset());
-694
-695++numBlocksRead;
-696if (pread)
-697  ++numPositionalRead;
-698if (withOnDiskSize)
-699  ++numWithOnDiskSize;
-700  }
-701  LOG.info("Client " + clientId + " 
successfully read " + numBlocksRead +
-702" blocks (with pread: " + 
numPositionalRead + ", with onDiskSize " +
-703"specified: " + numWithOnDiskSize 
+ ")");
-704
-705  return true;
-706}
-707
-708  }
-709
-710  @Test
-711  public void testConcurrentReading() 
throws Exception {
-712testConcurrentReadingInternals();
-713  }
-714
-715  protected void 
testConcurrentReadingInternals() throws IOException,
-716  InterruptedException, 
ExecutionException {
-717for (Compression.Algorithm 
compressAlgo : COMPRESSION_ALGORITHMS) {
-718  Path path = new 
Path(TEST_UTIL.getDataTestDir(), "concurrent_reading");
-719  Random rand = defaultRandom();
-720  ListLong offsets = new 
ArrayList();
-721  ListBlockType types = new 
ArrayList();
-722  writeBlocks(rand, compressAlgo, 
path, offsets, null, types, null);
-723  FSDataInputStream is = 
fs.open(path);
-724  long fileSize = 
fs.getFileStatus(path).getLen();
-725  HFileContext meta = new 
HFileContextBuilder()
-726  
.withHBaseCheckSum(true)
-727  
.withIncludesMvcc(includesMemstoreTS)
-728  
.withIncludesTags(includesTag)
-729  
.withCompression(compressAlgo)
-730  .build();
-731  HFileBlock.FSReader hbr = new 
HFileBlock.FSReaderImpl(is, fileSize, meta);
-732
-733  Executor exec = 
Executors.newFixedThreadPool(NUM_READER_THREADS);
-734  
ExecutorCompletionServiceBoolean ecs = new 
ExecutorCompletionService(exec);
-735
-736  for (int i = 0; i  
NUM_READER_THREADS; ++i) {
-737ecs.submit(new 
BlockReaderThread("reader_" + (char) ('A' + i), hbr,
-738offsets, types, fileSize));
-739  }
-740
-741  for (int i = 0; i  
NUM_READER_THREADS; ++i) {
-742FutureBoolean result = 
ecs.take();
-743assertTrue(result.get());
-744if (detailedLogging) {
-745  LOG.info(String.valueOf(i + 
1)
-746+ " reader threads finished 
successfully (algo=" + compressAlgo
-747+ ")");
-748}
-749  }
-750
-751  is.close();
-752}
-753  }
-754
-755  private long writeBlocks(Random rand, 
Compression.Algorithm compressAlgo,
-756  Path path, ListLong 
expectedOffsets, ListLong expectedPrevOffsets,
-757  ListBlockType 
expectedTypes, ListByteBuffer expectedContents
-758  ) throws IOException {
-759boolean cacheOnWrite = 
expectedContents != null;
-760FSDataOutputStream os = 
fs.create(path);
-761HFileContext meta = new 
HFileContextBuilder()
-762
.withHBaseCheckSum(true)
-763
.withIncludesMvcc(includesMemstoreTS)
-764
.withIncludesTags(includesTag)
-765
.withCompression(compressAlgo)
-766
.withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
-767.build();
-768HFileBlock.Writer hbw = new 

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dd7176bf/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.html
index 8b22aa1..f2c44db 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.html
@@ -100,4135 +100,4164 @@
 092import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
 093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
 094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-105import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-106import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
-107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
-108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-135import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-136import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
-137import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
-138import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
-139import 

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7ef4c5a9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html
index 17d1bcb..6dfd1d4 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html
@@ -35,863 +35,863 @@
 027import java.net.InetSocketAddress;
 028import java.net.UnknownHostException;
 029import java.nio.ByteBuffer;
-030import java.util.ArrayList;
-031import java.util.Collections;
-032import java.util.HashMap;
-033import java.util.Iterator;
-034import java.util.List;
-035import java.util.Map;
-036import java.util.Map.Entry;
-037import java.util.NavigableMap;
-038import java.util.Set;
-039import java.util.TreeSet;
-040import 
java.util.concurrent.ConcurrentHashMap;
-041import 
java.util.concurrent.ConcurrentMap;
-042import 
java.util.concurrent.atomic.AtomicLong;
-043import 
java.util.concurrent.atomic.LongAdder;
-044
-045import 
org.apache.commons.lang.mutable.MutableObject;
-046import org.apache.commons.logging.Log;
-047import 
org.apache.commons.logging.LogFactory;
-048import 
org.apache.hadoop.conf.Configuration;
-049import org.apache.hadoop.fs.Path;
-050import 
org.apache.hadoop.hbase.ByteBufferCell;
-051import org.apache.hadoop.hbase.Cell;
-052import 
org.apache.hadoop.hbase.CellScannable;
-053import 
org.apache.hadoop.hbase.CellScanner;
-054import 
org.apache.hadoop.hbase.CellUtil;
-055import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-056import 
org.apache.hadoop.hbase.DroppedSnapshotException;
-057import 
org.apache.hadoop.hbase.HBaseIOException;
-058import 
org.apache.hadoop.hbase.HConstants;
-059import 
org.apache.hadoop.hbase.HRegionInfo;
-060import 
org.apache.hadoop.hbase.HTableDescriptor;
-061import 
org.apache.hadoop.hbase.MultiActionResultTooLarge;
-062import 
org.apache.hadoop.hbase.NotServingRegionException;
-063import 
org.apache.hadoop.hbase.ServerName;
-064import 
org.apache.hadoop.hbase.TableName;
-065import 
org.apache.hadoop.hbase.UnknownScannerException;
-066import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-067import 
org.apache.hadoop.hbase.client.Append;
-068import 
org.apache.hadoop.hbase.client.ConnectionUtils;
-069import 
org.apache.hadoop.hbase.client.Delete;
-070import 
org.apache.hadoop.hbase.client.Durability;
-071import 
org.apache.hadoop.hbase.client.Get;
-072import 
org.apache.hadoop.hbase.client.Increment;
-073import 
org.apache.hadoop.hbase.client.Mutation;
-074import 
org.apache.hadoop.hbase.client.Put;
-075import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-076import 
org.apache.hadoop.hbase.client.Result;
-077import 
org.apache.hadoop.hbase.client.RowMutations;
-078import 
org.apache.hadoop.hbase.client.Scan;
-079import 
org.apache.hadoop.hbase.client.VersionInfoUtil;
-080import 
org.apache.hadoop.hbase.conf.ConfigurationObserver;
-081import 
org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
-082import 
org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException;
-083import 
org.apache.hadoop.hbase.exceptions.ScannerResetException;
-084import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-085import 
org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
-086import 
org.apache.hadoop.hbase.ipc.HBaseRPCErrorHandler;
-087import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-088import 
org.apache.hadoop.hbase.ipc.PriorityFunction;
-089import 
org.apache.hadoop.hbase.ipc.QosPriority;
-090import 
org.apache.hadoop.hbase.ipc.RpcCallContext;
-091import 
org.apache.hadoop.hbase.ipc.RpcCallback;
-092import 
org.apache.hadoop.hbase.ipc.RpcServer;
-093import 
org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface;
-094import 
org.apache.hadoop.hbase.ipc.RpcServerFactory;
-095import 
org.apache.hadoop.hbase.ipc.RpcServerInterface;
-096import 
org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
-097import 
org.apache.hadoop.hbase.ipc.ServerRpcController;
-098import 
org.apache.hadoop.hbase.master.MasterRpcServices;
-099import 
org.apache.hadoop.hbase.quotas.OperationQuota;
-100import 
org.apache.hadoop.hbase.quotas.RegionServerQuotaManager;
-101import 
org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl;
-102import 
org.apache.hadoop.hbase.regionserver.Leases.Lease;
-103import 
org.apache.hadoop.hbase.regionserver.Leases.LeaseStillHeldException;
-104import 
org.apache.hadoop.hbase.regionserver.Region.Operation;
-105import 
org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope;
-106import 
org.apache.hadoop.hbase.regionserver.handler.OpenMetaHandler;
-107import 

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/31df4674/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AbortProcedureFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AbortProcedureFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AbortProcedureFuture.html
index 0610ad0..8b22aa1 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AbortProcedureFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AbortProcedureFuture.html
@@ -2665,1591 +2665,1570 @@
 2657  @Override
 2658  public byte[] 
execProcedureWithRet(String signature, String instance, MapString, 
String props)
 2659  throws IOException {
-2660ProcedureDescription.Builder builder 
= ProcedureDescription.newBuilder();
-2661
builder.setSignature(signature).setInstance(instance);
-2662for (EntryString, String 
entry : props.entrySet()) {
-2663  NameStringPair pair = 
NameStringPair.newBuilder().setName(entry.getKey())
-2664  
.setValue(entry.getValue()).build();
-2665  builder.addConfiguration(pair);
-2666}
-2667
-2668final ExecProcedureRequest request = 
ExecProcedureRequest.newBuilder()
-2669
.setProcedure(builder.build()).build();
-2670// run the procedure on the master
-2671ExecProcedureResponse response = 
executeCallable(new MasterCallableExecProcedureResponse(
-2672getConnection(), 
getRpcControllerFactory()) {
-2673  @Override
-2674  protected ExecProcedureResponse 
rpcCall() throws Exception {
-2675return 
master.execProcedureWithRet(getRpcController(), request);
-2676  }
-2677});
-2678
-2679return response.hasReturnData() ? 
response.getReturnData().toByteArray() : null;
-2680  }
-2681
-2682  @Override
-2683  public void execProcedure(String 
signature, String instance, MapString, String props)
-2684  throws IOException {
-2685ProcedureDescription.Builder builder 
= ProcedureDescription.newBuilder();
-2686
builder.setSignature(signature).setInstance(instance);
-2687for (EntryString, String 
entry : props.entrySet()) {
-2688  NameStringPair pair = 
NameStringPair.newBuilder().setName(entry.getKey())
-2689  
.setValue(entry.getValue()).build();
-2690  builder.addConfiguration(pair);
-2691}
-2692
-2693final ExecProcedureRequest request = 
ExecProcedureRequest.newBuilder()
-2694
.setProcedure(builder.build()).build();
-2695// run the procedure on the master
-2696ExecProcedureResponse response = 
executeCallable(new MasterCallableExecProcedureResponse(
-2697getConnection(), 
getRpcControllerFactory()) {
-2698  @Override
-2699  protected ExecProcedureResponse 
rpcCall() throws Exception {
-2700return 
master.execProcedure(getRpcController(), request);
-2701  }
-2702});
-2703
-2704long start = 
EnvironmentEdgeManager.currentTime();
-2705long max = 
response.getExpectedTimeout();
-2706long maxPauseTime = max / 
this.numRetries;
-2707int tries = 0;
-2708LOG.debug("Waiting a max of " + max 
+ " ms for procedure '" +
-2709signature + " : " + instance + 
"'' to complete. (max " + maxPauseTime + " ms per retry)");
-2710boolean done = false;
-2711while (tries == 0
-2712|| 
((EnvironmentEdgeManager.currentTime() - start)  max  !done)) {
-2713  try {
-2714// sleep a backoff = 
pauseTime amount
-2715long sleep = 
getPauseTime(tries++);
-2716sleep = sleep  maxPauseTime 
? maxPauseTime : sleep;
-2717LOG.debug("(#" + tries + ") 
Sleeping: " + sleep +
-2718  "ms while waiting for 
procedure completion.");
-2719Thread.sleep(sleep);
-2720  } catch (InterruptedException e) 
{
-2721throw 
(InterruptedIOException)new 
InterruptedIOException("Interrupted").initCause(e);
-2722  }
-2723  LOG.debug("Getting current status 
of procedure from master...");
-2724  done = 
isProcedureFinished(signature, instance, props);
-2725}
-2726if (!done) {
-2727  throw new IOException("Procedure 
'" + signature + " : " + instance
-2728  + "' wasn't completed in 
expectedTime:" + max + " ms");
-2729}
+2660ProcedureDescription desc = 
ProtobufUtil.buildProcedureDescription(signature, instance, props);
+2661final ExecProcedureRequest request 
=
+2662
ExecProcedureRequest.newBuilder().setProcedure(desc).build();
+2663// run the procedure on the master
+2664ExecProcedureResponse response = 
executeCallable(
+2665  new 
MasterCallableExecProcedureResponse(getConnection(), 
getRpcControllerFactory()) {
+2666@Override
+2667protected ExecProcedureResponse 
rpcCall() throws Exception {
+2668  return 
master.execProcedureWithRet(getRpcController(), request);
+2669}
+2670  });
+2671
+2672return 

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6f2e75f2/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.State.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.State.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.State.html
index a665139..3fedd0b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.State.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.State.html
@@ -879,1201 +879,1221 @@
 871// includes the header size also.
 872private int 
unencodedDataSizeWritten;
 873
-874/**
-875 * Bytes to be written to the file 
system, including the header. Compressed
-876 * if compression is turned on. It 
also includes the checksum data that
-877 * immediately follows the block 
data. (header + data + checksums)
-878 */
-879private ByteArrayOutputStream 
onDiskBlockBytesWithHeader;
-880
-881/**
-882 * The size of the checksum data on 
disk. It is used only if data is
-883 * not compressed. If data is 
compressed, then the checksums are already
-884 * part of onDiskBytesWithHeader. If 
data is uncompressed, then this
-885 * variable stores the checksum data 
for this block.
-886 */
-887private byte[] onDiskChecksum = 
HConstants.EMPTY_BYTE_ARRAY;
-888
-889/**
-890 * Current block's start offset in 
the {@link HFile}. Set in
-891 * {@link 
#writeHeaderAndData(FSDataOutputStream)}.
-892 */
-893private long startOffset;
-894
-895/**
-896 * Offset of previous block by block 
type. Updated when the next block is
-897 * started.
-898 */
-899private long[] prevOffsetByType;
-900
-901/** The offset of the previous block 
of the same type */
-902private long prevOffset;
-903/** Meta data that holds information 
about the hfileblock**/
-904private HFileContext fileContext;
-905
-906/**
-907 * @param dataBlockEncoder data block 
encoding algorithm to use
-908 */
-909public Writer(HFileDataBlockEncoder 
dataBlockEncoder, HFileContext fileContext) {
-910  if 
(fileContext.getBytesPerChecksum()  HConstants.HFILEBLOCK_HEADER_SIZE) {
-911throw new 
RuntimeException("Unsupported value of bytesPerChecksum. " +
-912" Minimum is " + 
HConstants.HFILEBLOCK_HEADER_SIZE + " but the configured value is " +
-913
fileContext.getBytesPerChecksum());
-914  }
-915  this.dataBlockEncoder = 
dataBlockEncoder != null?
-916  dataBlockEncoder: 
NoOpDataBlockEncoder.INSTANCE;
-917  this.dataBlockEncodingCtx = 
this.dataBlockEncoder.
-918  
newDataBlockEncodingContext(HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext);
-919  // TODO: This should be lazily 
instantiated since we usually do NOT need this default encoder
-920  this.defaultBlockEncodingCtx = new 
HFileBlockDefaultEncodingContext(null,
-921  
HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext);
-922  // TODO: Set BAOS initial size. Use 
fileContext.getBlocksize() and add for header/checksum
-923  baosInMemory = new 
ByteArrayOutputStream();
-924  prevOffsetByType = new 
long[BlockType.values().length];
-925  for (int i = 0; i  
prevOffsetByType.length; ++i) {
-926prevOffsetByType[i] = UNSET;
-927  }
-928  // TODO: Why fileContext saved away 
when we have dataBlockEncoder and/or
-929  // defaultDataBlockEncoder?
-930  this.fileContext = fileContext;
-931}
-932
-933/**
-934 * Starts writing into the block. The 
previous block's data is discarded.
-935 *
-936 * @return the stream the user can 
write their data into
-937 * @throws IOException
-938 */
-939DataOutputStream 
startWriting(BlockType newBlockType)
-940throws IOException {
-941  if (state == State.BLOCK_READY 
 startOffset != -1) {
-942// We had a previous block that 
was written to a stream at a specific
-943// offset. Save that offset as 
the last offset of a block of that type.
-944
prevOffsetByType[blockType.getId()] = startOffset;
-945  }
-946
-947  startOffset = -1;
-948  blockType = newBlockType;
-949
-950  baosInMemory.reset();
-951  
baosInMemory.write(HConstants.HFILEBLOCK_DUMMY_HEADER);
-952
-953  state = State.WRITING;
-954
-955  // We will compress it later in 
finishBlock()
-956  userDataStream = new 
ByteBufferWriterDataOutputStream(baosInMemory);
-957  if (newBlockType == BlockType.DATA) 
{
-958
this.dataBlockEncoder.startBlockEncoding(dataBlockEncodingCtx, 
userDataStream);
-959  }
-960  this.unencodedDataSizeWritten = 
0;
-961  return userDataStream;
-962}
-963
-964/**
-965 * Writes the Cell to this block
-966 * @param cell
-967 * @throws IOException
-968 */
-969void write(Cell cell) 

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/efd0601e/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptorBuilder.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptorBuilder.html
 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptorBuilder.html
new file mode 100644
index 000..74871af
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptorBuilder.html
@@ -0,0 +1,293 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+Uses of Class org.apache.hadoop.hbase.client.TableDescriptorBuilder 
(Apache HBase 2.0.0-SNAPSHOT API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+
+
+
+Uses of 
Classorg.apache.hadoop.hbase.client.TableDescriptorBuilder
+
+
+
+
+
+Packages that use TableDescriptorBuilder
+
+Package
+Description
+
+
+
+org.apache.hadoop.hbase.client
+
+Provides HBase Client
+
+
+
+
+
+
+
+
+
+
+Uses of TableDescriptorBuilder in 
org.apache.hadoop.hbase.client
+
+Methods in org.apache.hadoop.hbase.client
 that return TableDescriptorBuilder
+
+Modifier and Type
+Method and Description
+
+
+
+TableDescriptorBuilder
+TableDescriptorBuilder.addCoprocessor(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringclassName)
+
+
+TableDescriptorBuilder
+TableDescriptorBuilder.addCoprocessor(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringclassName,
+  org.apache.hadoop.fs.PathjarFilePath,
+  intpriority,
+  http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringkvs)
+
+
+TableDescriptorBuilder
+TableDescriptorBuilder.addCoprocessorWithSpec(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringspecStr)
+
+
+TableDescriptorBuilder
+TableDescriptorBuilder.addFamily(HColumnDescriptorfamily)
+
+
+TableDescriptorBuilder
+TableDescriptorBuilder.modifyFamily(HColumnDescriptorfamily)
+
+
+static TableDescriptorBuilder
+TableDescriptorBuilder.newBuilder(byte[]pbBytes)
+The input should be created by toByteArray(org.apache.hadoop.hbase.client.TableDescriptor).
+
+
+
+static TableDescriptorBuilder
+TableDescriptorBuilder.newBuilder(TableDescriptordesc)
+Copy all configuration, values, families, and name from the 
input.
+
+
+
+static TableDescriptorBuilder
+TableDescriptorBuilder.newBuilder(TableNamename)
+
+
+TableDescriptorBuilder
+TableDescriptorBuilder.remove(byte[]key)
+
+
+TableDescriptorBuilder
+TableDescriptorBuilder.remove(Byteskey)
+
+
+TableDescriptorBuilder
+TableDescriptorBuilder.removeConfiguration(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringkey)
+
+
+TableDescriptorBuilder
+TableDescriptorBuilder.removeCoprocessor(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringclassName)
+
+
+TableDescriptorBuilder
+TableDescriptorBuilder.removeFamily(byte[]column)
+
+
+TableDescriptorBuilder
+TableDescriptorBuilder.setCompactionEnabled(booleanisEnable)
+
+
+TableDescriptorBuilder
+TableDescriptorBuilder.setConfiguration(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringkey,
+http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringvalue)
+
+
+TableDescriptorBuilder
+TableDescriptorBuilder.setDurability(Durabilitydurability)
+
+
+TableDescriptorBuilder
+TableDescriptorBuilder.setFlushPolicyClassName(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringclazz)
+
+
+TableDescriptorBuilder

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/10601a30/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DisableTableFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DisableTableFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DisableTableFuture.html
index 8c56a67..8e3d847 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DisableTableFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DisableTableFuture.html
@@ -2506,1742 +2506,1743 @@
 2498  public void restoreSnapshot(final 
String snapshotName)
 2499  throws IOException, 
RestoreSnapshotException {
 2500boolean takeFailSafeSnapshot =
-2501  
conf.getBoolean("hbase.snapshot.restore.take.failsafe.snapshot", false);
-2502restoreSnapshot(snapshotName, 
takeFailSafeSnapshot);
-2503  }
-2504
-2505  @Override
-2506  public void restoreSnapshot(final 
byte[] snapshotName, final boolean takeFailSafeSnapshot)
-2507  throws IOException, 
RestoreSnapshotException {
-2508
restoreSnapshot(Bytes.toString(snapshotName), takeFailSafeSnapshot);
-2509  }
-2510
-2511  /*
-2512   * Check whether the snapshot exists 
and contains disabled table
-2513   *
-2514   * @param snapshotName name of the 
snapshot to restore
-2515   * @throws IOException if a remote or 
network exception occurs
-2516   * @throws RestoreSnapshotException if 
no valid snapshot is found
-2517   */
-2518  private TableName 
getTableNameBeforeRestoreSnapshot(final String snapshotName)
-2519  throws IOException, 
RestoreSnapshotException {
-2520TableName tableName = null;
-2521for (SnapshotDescription 
snapshotInfo: listSnapshots()) {
-2522  if 
(snapshotInfo.getName().equals(snapshotName)) {
-2523tableName = 
snapshotInfo.getTableName();
-2524break;
-2525  }
-2526}
-2527
-2528if (tableName == null) {
-2529  throw new 
RestoreSnapshotException(
-2530"Unable to find the table name 
for snapshot=" + snapshotName);
-2531}
-2532return tableName;
-2533  }
-2534
-2535  @Override
-2536  public void restoreSnapshot(final 
String snapshotName, final boolean takeFailSafeSnapshot)
-2537  throws IOException, 
RestoreSnapshotException {
-2538TableName tableName = 
getTableNameBeforeRestoreSnapshot(snapshotName);
-2539
-2540// The table does not exists, switch 
to clone.
-2541if (!tableExists(tableName)) {
-2542  cloneSnapshot(snapshotName, 
tableName);
-2543  return;
-2544}
-2545
-2546// Check if the table is disabled
-2547if (!isTableDisabled(tableName)) {
-2548  throw new 
TableNotDisabledException(tableName);
-2549}
-2550
-2551// Take a snapshot of the current 
state
-2552String failSafeSnapshotSnapshotName 
= null;
-2553if (takeFailSafeSnapshot) {
-2554  failSafeSnapshotSnapshotName = 
conf.get("hbase.snapshot.restore.failsafe.name",
-2555
"hbase-failsafe-{snapshot.name}-{restore.timestamp}");
-2556  failSafeSnapshotSnapshotName = 
failSafeSnapshotSnapshotName
-2557.replace("{snapshot.name}", 
snapshotName)
-2558.replace("{table.name}", 
tableName.toString().replace(TableName.NAMESPACE_DELIM, '.'))
-2559.replace("{restore.timestamp}", 
String.valueOf(EnvironmentEdgeManager.currentTime()));
-2560  LOG.info("Taking restore-failsafe 
snapshot: " + failSafeSnapshotSnapshotName);
-2561  
snapshot(failSafeSnapshotSnapshotName, tableName);
-2562}
-2563
-2564try {
-2565  // Restore snapshot
-2566  get(
-2567
internalRestoreSnapshotAsync(snapshotName, tableName),
-2568syncWaitTimeout,
-2569TimeUnit.MILLISECONDS);
-2570} catch (IOException e) {
-2571  // Somthing went wrong during the 
restore...
-2572  // if the pre-restore snapshot is 
available try to rollback
-2573  if (takeFailSafeSnapshot) {
-2574try {
-2575  get(
-2576
internalRestoreSnapshotAsync(failSafeSnapshotSnapshotName, tableName),
-2577syncWaitTimeout,
-2578TimeUnit.MILLISECONDS);
-2579  String msg = "Restore 
snapshot=" + snapshotName +
-2580" failed. Rollback to 
snapshot=" + failSafeSnapshotSnapshotName + " succeeded.";
-2581  LOG.error(msg, e);
-2582  throw new 
RestoreSnapshotException(msg, e);
-2583} catch (IOException ex) {
-2584  String msg = "Failed to 
restore and rollback to snapshot=" + failSafeSnapshotSnapshotName;
-2585  LOG.error(msg, ex);
-2586  throw new 
RestoreSnapshotException(msg, e);
-2587}
-2588  } else {
-2589throw new 
RestoreSnapshotException("Failed to restore snapshot=" + snapshotName, e);
-2590  }
-2591}
-2592
-2593// If the restore is succeeded, 
delete the pre-restore snapshot
-2594if (takeFailSafeSnapshot) {
-2595  

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/662ea7dc/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyTableFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyTableFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyTableFuture.html
index b798d4b..8c56a67 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyTableFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyTableFuture.html
@@ -3877,425 +3877,371 @@
 3869  throw new 
ReplicationException("tableCfs is null");
 3870}
 3871ReplicationPeerConfig peerConfig = 
getReplicationPeerConfig(id);
-3872MapTableName, 
ListString preTableCfs = peerConfig.getTableCFsMap();
-3873if (preTableCfs == null) {
-3874  
peerConfig.setTableCFsMap(tableCfs);
-3875} else {
-3876  for (Map.EntryTableName, ? 
extends CollectionString entry : tableCfs.entrySet()) {
-3877TableName table = 
entry.getKey();
-3878CollectionString 
appendCfs = entry.getValue();
-3879if 
(preTableCfs.containsKey(table)) {
-3880  ListString cfs = 
preTableCfs.get(table);
-3881  if (cfs == null || appendCfs 
== null || appendCfs.isEmpty()) {
-3882preTableCfs.put(table, 
null);
-3883  } else {
-3884SetString cfSet = 
new HashSetString(cfs);
-3885cfSet.addAll(appendCfs);
-3886preTableCfs.put(table, 
Lists.newArrayList(cfSet));
-3887  }
-3888} else {
-3889  if (appendCfs == null || 
appendCfs.isEmpty()) {
-3890preTableCfs.put(table, 
null);
-3891  } else {
-3892preTableCfs.put(table, 
Lists.newArrayList(appendCfs));
-3893  }
-3894}
-3895  }
-3896}
-3897updateReplicationPeerConfig(id, 
peerConfig);
-3898  }
-3899
-3900  @Override
-3901  public void 
removeReplicationPeerTableCFs(String id,
-3902  MapTableName, ? extends 
CollectionString tableCfs) throws ReplicationException,
-3903  IOException {
-3904if (tableCfs == null) {
-3905  throw new 
ReplicationException("tableCfs is null");
-3906}
-3907ReplicationPeerConfig peerConfig = 
getReplicationPeerConfig(id);
-3908MapTableName, 
ListString preTableCfs = peerConfig.getTableCFsMap();
-3909if (preTableCfs == null) {
-3910  throw new 
ReplicationException("Table-Cfs for peer" + id + " is null");
-3911}
-3912for (Map.EntryTableName, ? 
extends CollectionString entry : tableCfs.entrySet()) {
-3913
-3914  TableName table = 
entry.getKey();
-3915  CollectionString removeCfs 
= entry.getValue();
-3916  if 
(preTableCfs.containsKey(table)) {
-3917ListString cfs = 
preTableCfs.get(table);
-3918if (cfs == null  
(removeCfs == null || removeCfs.isEmpty())) {
-3919  preTableCfs.remove(table);
-3920} else if (cfs != null 
 (removeCfs != null  !removeCfs.isEmpty())) {
-3921  SetString cfSet = new 
HashSetString(cfs);
-3922  cfSet.removeAll(removeCfs);
-3923  if (cfSet.isEmpty()) {
-3924preTableCfs.remove(table);
-3925  } else {
-3926preTableCfs.put(table, 
Lists.newArrayList(cfSet));
-3927  }
-3928} else if (cfs == null 
 (removeCfs != null  !removeCfs.isEmpty())) {
-3929  throw new 
ReplicationException("Cannot remove cf of table: " + table
-3930  + " which doesn't specify 
cfs from table-cfs config in peer: " + id);
-3931} else if (cfs != null 
 (removeCfs == null || removeCfs.isEmpty())) {
-3932  throw new 
ReplicationException("Cannot remove table: " + table
-3933  + " which has specified 
cfs from table-cfs config in peer: " + id);
-3934}
-3935  } else {
-3936throw new 
ReplicationException("No table: " + table + " in table-cfs config of peer: " + 
id);
-3937  }
-3938}
-3939updateReplicationPeerConfig(id, 
peerConfig);
-3940  }
-3941
-3942  @Override
-3943  public 
ListReplicationPeerDescription listReplicationPeers() throws 
IOException {
-3944return 
listReplicationPeers((Pattern)null);
-3945  }
-3946
-3947  @Override
-3948  public 
ListReplicationPeerDescription listReplicationPeers(String regex) 
throws IOException {
-3949return 
listReplicationPeers(Pattern.compile(regex));
-3950  }
-3951
-3952  @Override
-3953  public 
ListReplicationPeerDescription listReplicationPeers(Pattern pattern)
-3954  throws IOException {
-3955return executeCallable(new 
MasterCallableListReplicationPeerDescription(getConnection(),
-3956getRpcControllerFactory()) {
-3957  @Override
-3958  protected 
ListReplicationPeerDescription rpcCall() throws Exception {
-3959
ListReplicationProtos.ReplicationPeerDescription peersList = 
master.listReplicationPeers(
-3960  

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b4bae59/devapidocs/src-html/org/apache/hadoop/hbase/io/HalfStoreFileReader.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/HalfStoreFileReader.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/HalfStoreFileReader.html
index b870884..30b3ad0 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/io/HalfStoreFileReader.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/io/HalfStoreFileReader.html
@@ -28,346 +28,347 @@
 020
 021import java.io.IOException;
 022import java.nio.ByteBuffer;
-023
-024import org.apache.commons.logging.Log;
-025import 
org.apache.commons.logging.LogFactory;
-026import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-027import 
org.apache.hadoop.conf.Configuration;
-028import org.apache.hadoop.fs.FileSystem;
-029import org.apache.hadoop.fs.Path;
-030import org.apache.hadoop.hbase.Cell;
-031import 
org.apache.hadoop.hbase.HConstants;
-032import 
org.apache.hadoop.hbase.KeyValue;
-033import 
org.apache.hadoop.hbase.client.Scan;
-034import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
-035import 
org.apache.hadoop.hbase.io.hfile.HFileScanner;
-036import 
org.apache.hadoop.hbase.regionserver.StoreFileReader;
-037import 
org.apache.hadoop.hbase.util.Bytes;
-038
-039/**
-040 * A facade for a {@link 
org.apache.hadoop.hbase.io.hfile.HFile.Reader} that serves up
-041 * either the top or bottom half of a 
HFile where 'bottom' is the first half
-042 * of the file containing the keys that 
sort lowest and 'top' is the second half
-043 * of the file with keys that sort 
greater than those of the bottom half.
-044 * The top includes the split files 
midkey, of the key that follows if it does
-045 * not exist in the file.
-046 *
-047 * pThis type works in tandem 
with the {@link Reference} type.  This class
-048 * is used reading while Reference is 
used writing.
-049 *
-050 * pThis file is not splitable.  
Calls to {@link #midkey()} return null.
-051 */
-052@InterfaceAudience.Private
-053public class HalfStoreFileReader extends 
StoreFileReader {
-054  private static final Log LOG = 
LogFactory.getLog(HalfStoreFileReader.class);
-055  final boolean top;
-056  // This is the key we split around.  
Its the first possible entry on a row:
-057  // i.e. empty column and a timestamp of 
LATEST_TIMESTAMP.
-058  protected final byte [] splitkey;
-059
-060  protected final Cell splitCell;
-061
-062  private Cell firstKey = null;
-063
-064  private boolean firstKeySeeked = 
false;
-065
-066  /**
-067   * Creates a half file reader for a 
normal hfile.
-068   * @param fs fileystem to read from
-069   * @param p path to hfile
-070   * @param cacheConf
-071   * @param r original reference file 
(contains top or bottom)
-072   * @param conf Configuration
-073   * @throws IOException
-074   */
-075  public HalfStoreFileReader(final 
FileSystem fs, final Path p,
-076  final CacheConfig cacheConf, final 
Reference r, final Configuration conf)
-077  throws IOException {
-078super(fs, p, cacheConf, conf);
-079// This is not actual midkey for this 
half-file; its just border
-080// around which we split top and 
bottom.  Have to look in files to find
-081// actual last and first keys for 
bottom and top halves.  Half-files don't
-082// have an actual midkey themselves. 
No midkey is how we indicate file is
-083// not splittable.
-084this.splitkey = r.getSplitKey();
-085this.splitCell = new 
KeyValue.KeyOnlyKeyValue(this.splitkey, 0, this.splitkey.length);
-086// Is it top or bottom half?
-087this.top = 
Reference.isTopFileRegion(r.getFileRegion());
-088  }
-089
-090  /**
-091   * Creates a half file reader for a 
hfile referred to by an hfilelink.
-092   * @param fs fileystem to read from
-093   * @param p path to hfile
-094   * @param in {@link 
FSDataInputStreamWrapper}
-095   * @param size Full size of the hfile 
file
-096   * @param cacheConf
-097   * @param r original reference file 
(contains top or bottom)
-098   * @param conf Configuration
-099   * @throws IOException
-100   */
-101  public HalfStoreFileReader(final 
FileSystem fs, final Path p, final FSDataInputStreamWrapper in,
-102  long size, final CacheConfig 
cacheConf,  final Reference r, final Configuration conf)
-103  throws IOException {
-104super(fs, p, in, size, cacheConf, 
conf);
-105// This is not actual midkey for this 
half-file; its just border
-106// around which we split top and 
bottom.  Have to look in files to find
-107// actual last and first keys for 
bottom and top halves.  Half-files don't
-108// have an actual midkey themselves. 
No midkey is how we indicate file is
-109// not splittable.
-110this.splitkey = r.getSplitKey();
-111this.splitCell = new 
KeyValue.KeyOnlyKeyValue(this.splitkey, 0, this.splitkey.length);
-112// Is it top or bottom half?
-113this.top = 

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2fcc2ae0/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionTask.TaskType.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionTask.TaskType.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionTask.TaskType.html
index 88dff07..af5536f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionTask.TaskType.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionTask.TaskType.html
@@ -77,16 +77,16 @@
 069import 
org.apache.hadoop.hbase.TableName;
 070import 
org.apache.hadoop.hbase.TableNotEnabledException;
 071import 
org.apache.hadoop.hbase.TableNotFoundException;
-072import 
org.apache.hadoop.hbase.client.Admin;
-073import 
org.apache.hadoop.hbase.client.Connection;
-074import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-075import 
org.apache.hadoop.hbase.client.Get;
-076import 
org.apache.hadoop.hbase.client.Put;
-077import 
org.apache.hadoop.hbase.client.RegionLocator;
-078import 
org.apache.hadoop.hbase.client.ResultScanner;
-079import 
org.apache.hadoop.hbase.client.Scan;
-080import 
org.apache.hadoop.hbase.client.Table;
-081import 
org.apache.hadoop.hbase.client.Scan.ReadType;
+072import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+073import 
org.apache.hadoop.hbase.client.Admin;
+074import 
org.apache.hadoop.hbase.client.Connection;
+075import 
org.apache.hadoop.hbase.client.ConnectionFactory;
+076import 
org.apache.hadoop.hbase.client.Get;
+077import 
org.apache.hadoop.hbase.client.Put;
+078import 
org.apache.hadoop.hbase.client.RegionLocator;
+079import 
org.apache.hadoop.hbase.client.ResultScanner;
+080import 
org.apache.hadoop.hbase.client.Scan;
+081import 
org.apache.hadoop.hbase.client.Table;
 082import 
org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
 083import 
org.apache.hadoop.hbase.tool.Canary.RegionTask.TaskType;
 084import 
org.apache.hadoop.hbase.util.Bytes;
@@ -118,1387 +118,1345 @@
 110 * 3. zookeeper mode - for each zookeeper 
instance, selects a zNode and
 111 * outputs some information about failure 
or latency.
 112 */
-113public final class Canary implements Tool 
{
-114  // Sink interface used by the canary to 
outputs information
-115  public interface Sink {
-116public long getReadFailureCount();
-117public long incReadFailureCount();
-118public void 
publishReadFailure(HRegionInfo region, Exception e);
-119public void 
publishReadFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
-120public void 
updateReadFailedHostList(HRegionInfo region, String serverName);
-121public MapString,String 
getReadFailures();
-122public void 
publishReadTiming(HRegionInfo region, HColumnDescriptor column, long msTime);
-123public long getWriteFailureCount();
-124public void 
publishWriteFailure(HRegionInfo region, Exception e);
-125public void 
publishWriteFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
-126public void 
publishWriteTiming(HRegionInfo region, HColumnDescriptor column, long 
msTime);
-127public void 
updateWriteFailedHostList(HRegionInfo region, String serverName);
-128public MapString,String 
getWriteFailures();
-129  }
-130  // new extended sink for output 
regionserver mode info
-131  // do not change the Sink interface 
directly due to maintaining the API
-132  public interface ExtendedSink extends 
Sink {
-133public void publishReadFailure(String 
table, String server);
-134public void publishReadTiming(String 
table, String server, long msTime);
-135  }
-136
-137  // Simple implementation of canary sink 
that allows to plot on
-138  // file or standard output timings or 
failures.
-139  public static class StdOutSink 
implements Sink {
-140private AtomicLong readFailureCount = 
new AtomicLong(0),
-141writeFailureCount = new 
AtomicLong(0);
-142
-143private MapString, String 
readFailures = new ConcurrentHashMap();
-144private MapString, String 
writeFailures = new ConcurrentHashMap();
-145
-146@Override
-147public long getReadFailureCount() {
-148  return readFailureCount.get();
-149}
-150
-151@Override
-152public long incReadFailureCount() {
-153  return 
readFailureCount.incrementAndGet();
-154}
-155
-156@Override
-157public void 
publishReadFailure(HRegionInfo region, Exception e) {
-158  
readFailureCount.incrementAndGet();
-159  LOG.error(String.format("read from 
region %s failed", region.getRegionNameAsString()), e);
-160}
-161
-162@Override
-163public void 
publishReadFailure(HRegionInfo region, HColumnDescriptor column, Exception e) 
{
-164  
readFailureCount.incrementAndGet();
-165  LOG.error(String.format("read from 
region %s column family %s failed",
-166
region.getRegionNameAsString(), column.getNameAsString()), e);

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e4348f53/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer.html
index 31517f6..ac4a9b3 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer.html
@@ -64,1374 +64,1421 @@
 056import 
org.apache.hadoop.hbase.client.Scan.ReadType;
 057import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
 058import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-059import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-060import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-061import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-062import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-063import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-064import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-065import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-066import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-067import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-068import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-069import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-070import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-071import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-072import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-073import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-074import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-075import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-076import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-077import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-078import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-079import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-080import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-081import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-082import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesResponse;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
-101import 

[03/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e57d1b63/apidocs/org/apache/hadoop/hbase/client/Operation.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/client/Operation.html 
b/apidocs/org/apache/hadoop/hbase/client/Operation.html
deleted file mode 100644
index 3895232..000
--- a/apidocs/org/apache/hadoop/hbase/client/Operation.html
+++ /dev/null
@@ -1,434 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-
-
-
-Operation (Apache HBase 2.0.0-SNAPSHOT API)
-
-
-
-
-
-var methods = {"i0":6,"i1":10,"i2":10,"i3":10,"i4":6,"i5":10,"i6":10};
-var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"],8:["t4","Concrete Methods"]};
-var altColor = "altColor";
-var rowColor = "rowColor";
-var tableTab = "tableTab";
-var activeTableTab = "activeTableTab";
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-PrevClass
-NextClass
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-Summary:
-Nested|
-Field|
-Constr|
-Method
-
-
-Detail:
-Field|
-Constr|
-Method
-
-
-
-
-
-
-
-
-org.apache.hadoop.hbase.client
-Class Operation
-
-
-
-http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
-
-
-org.apache.hadoop.hbase.client.Operation
-
-
-
-
-
-
-
-Direct Known Subclasses:
-OperationWithAttributes
-
-
-
-@InterfaceAudience.Public
-public abstract class Operation
-extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
-Superclass for any type that maps to a potentially 
application-level query.
- (e.g. Put, Get, Delete, Scan, Next, etc.)
- Contains methods for exposure to logging and debugging tools.
-
-
-
-
-
-
-
-
-
-
-
-Constructor Summary
-
-Constructors
-
-Constructor and Description
-
-
-Operation()
-
-
-
-
-
-
-
-
-
-Method Summary
-
-All MethodsInstance MethodsAbstract MethodsConcrete Methods
-
-Modifier and Type
-Method and Description
-
-
-abstract http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
-getFingerprint()
-Produces a Map containing a fingerprint which identifies 
the type and 
- the static schema components of a query (i.e.
-
-
-
-http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
-toJSON()
-Produces a JSON object sufficient for description of a query
- in a debugging or logging context.
-
-
-
-http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
-toJSON(intmaxCols)
-Produces a JSON object for fingerprint and details exposure 
in a
- parseable format.
-
-
-
-http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
-toMap()
-Produces a Map containing a full summary of a query.
-
-
-
-abstract http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
-toMap(intmaxCols)
-Produces a Map containing a summary of the details of a 
query 
- beyond the scope of the fingerprint (i.e.
-
-
-
-http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
-toString()
-Produces a string representation of this Operation.
-
-
-
-http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
-toString(intmaxCols)
-Produces a string representation of this Operation.
-
-
-
-
-
-