[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/thrift/DemoClient.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/thrift/DemoClient.html b/devapidocs/org/apache/hadoop/hbase/thrift/DemoClient.html index a81ad06..472ba0e 100644 --- a/devapidocs/org/apache/hadoop/hbase/thrift/DemoClient.html +++ b/devapidocs/org/apache/hadoop/hbase/thrift/DemoClient.html @@ -18,7 +18,7 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":9,"i2":9,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10}; +var methods = {"i0":10,"i1":9,"i2":9,"i3":9,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10}; var tabs = {65535:["t0","All Methods"],1:["t1","Static Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -146,6 +146,10 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? private static boolean secure + +private static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String +serverPrincipal + @@ -187,27 +191,31 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? getSubject() +private static boolean +isBoolean(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Strings) + + static void main(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String[]args) - + private void printRow(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">Listorg.apache.hadoop.hbase.thrift.generated.TRowResultrows) - + private void printRow(org.apache.hadoop.hbase.thrift.generated.TRowResultrowResult) - + private void printVersions(http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true; title="class or interface in java.nio">ByteBufferrow, http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">Listorg.apache.hadoop.hbase.thrift.generated.TCellversions) - + private void run() - + private http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String utf8(byte[]buf) @@ -263,12 +271,21 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? - + secure private staticboolean secure + + + + + +serverPrincipal +private statichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String serverPrincipal + + @@ -283,7 +300,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? DemoClient -DemoClient() +DemoClient() @@ -300,7 +317,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? main -public staticvoidmain(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String[]args) +public staticvoidmain(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String[]args) throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true; title="class or interface in java.lang">Exception Throws: @@ -308,13 +325,22 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? + + + + + +isBoolean +private staticbooleanisBoolean(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Strings) + + utf8 -privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringutf8(byte[]buf) +privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringutf8(byte[]buf) @@ -323,7 +349,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? bytes -privatebyte[]bytes(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Strings) +privatebyte[]bytes(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Strings) @@ -332,7 +358,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? run -privatevoidrun() +privatevoidrun() throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true; title="class or interface in java.lang">Exception Throws: @@ -346,7 +372,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? printVersions
[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2aec596e/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html b/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html index 2eb8361..67131e4 100644 --- a/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html +++ b/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html @@ -236,7 +236,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true; title="class or interface in java.lang">Boolean closeRegion(byte[]regionName, - http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true; title="class or interface in java.util">OptionalServerNameserverName) + http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true; title="class or interface in java.util">OptionalServerNameunused) Close a region. @@ -1446,7 +1446,7 @@ implements closeRegion publichttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true; title="class or interface in java.lang">BooleancloseRegion(byte[]regionName, - http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true; title="class or interface in java.util">OptionalServerNameserverName) + http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true; title="class or interface in java.util">OptionalServerNameunused) Description copied from interface:AsyncAdmin Close a region. For expert-admins Runs close on the regionserver. The master will not be informed of the close. @@ -1455,12 +1455,9 @@ implements closeRegionin interfaceAsyncAdmin Parameters: regionName - region name to close -serverName - The servername of the regionserver. If not present, we will use servername - found in the hbase:meta table. A server name is made of host, port and startcode. Here - is an example: host187.example.com,60020,1289493121758 +unused - Deprecated. Not used anymore after deprecation. Returns: -true if the region was closed, false if not. The return value will be wrapped by a - http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFuture. +Deprecated. Always returns true now. http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2aec596e/devapidocs/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.Retry.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.Retry.html b/devapidocs/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.Retry.html index 301bc20..ba2bf1d 100644 --- a/devapidocs/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.Retry.html +++ b/devapidocs/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.Retry.html @@ -274,7 +274,7 @@ the order they are declared. values -public staticAsyncRequestFutureImpl.Retry[]values() +public staticAsyncRequestFutureImpl.Retry[]values() Returns an array containing the constants of this enum type, in the order they are declared. This method may be used to iterate over the constants as follows: @@ -294,7 +294,7 @@ for (AsyncRequestFutureImpl.Retry c : AsyncRequestFutureImpl.Retry.values()) valueOf -public staticAsyncRequestFutureImpl.RetryvalueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringname) +public staticAsyncRequestFutureImpl.RetryvalueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringname) Returns the enum constant of this type with the specified name. The string must match exactly an identifier used to declare an enum constant in this type. (Extraneous whitespace characters are http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2aec596e/devapidocs/org/apache/hadoop/hbase/client/ClientAsyncPrefetchScanner.PrefetchRunnable.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/ClientAsyncPrefetchScanner.PrefetchRunnable.html b/devapidocs/org/apache/hadoop/hbase/client/ClientAsyncPrefetchScanner.PrefetchRunnable.html index 2eb6a09..c7669ce 100644 ---
[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1837997e/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.html index 738bf21..a70405f 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.html @@ -45,507 +45,536 @@ 037 038 final MetricsRegionServerWrapper rsWrap; 039 private final MetricHistogram putHisto; -040 private final MetricHistogram deleteHisto; -041 private final MetricHistogram getHisto; -042 private final MetricHistogram incrementHisto; -043 private final MetricHistogram appendHisto; -044 private final MetricHistogram replayHisto; -045 private final MetricHistogram scanSizeHisto; -046 private final MetricHistogram scanTimeHisto; -047 -048 private final MutableFastCounter slowPut; -049 private final MutableFastCounter slowDelete; -050 private final MutableFastCounter slowGet; -051 private final MutableFastCounter slowIncrement; -052 private final MutableFastCounter slowAppend; -053 private final MutableFastCounter splitRequest; -054 private final MutableFastCounter splitSuccess; -055 -056 private final MetricHistogram splitTimeHisto; -057 -058 // flush related metrics -059 private final MetricHistogram flushTimeHisto; -060 private final MetricHistogram flushMemstoreSizeHisto; -061 private final MetricHistogram flushOutputSizeHisto; -062 private final MutableFastCounter flushedMemstoreBytes; -063 private final MutableFastCounter flushedOutputBytes; -064 -065 // compaction related metrics -066 private final MetricHistogram compactionTimeHisto; -067 private final MetricHistogram compactionInputFileCountHisto; -068 private final MetricHistogram compactionInputSizeHisto; -069 private final MetricHistogram compactionOutputFileCountHisto; -070 private final MetricHistogram compactionOutputSizeHisto; -071 private final MutableFastCounter compactedInputBytes; -072 private final MutableFastCounter compactedOutputBytes; -073 -074 private final MetricHistogram majorCompactionTimeHisto; -075 private final MetricHistogram majorCompactionInputFileCountHisto; -076 private final MetricHistogram majorCompactionInputSizeHisto; -077 private final MetricHistogram majorCompactionOutputFileCountHisto; -078 private final MetricHistogram majorCompactionOutputSizeHisto; -079 private final MutableFastCounter majorCompactedInputBytes; -080 private final MutableFastCounter majorCompactedOutputBytes; -081 -082 // pause monitor metrics -083 private final MutableFastCounter infoPauseThresholdExceeded; -084 private final MutableFastCounter warnPauseThresholdExceeded; -085 private final MetricHistogram pausesWithGc; -086 private final MetricHistogram pausesWithoutGc; -087 -088 public MetricsRegionServerSourceImpl(MetricsRegionServerWrapper rsWrap) { -089this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT, rsWrap); -090 } +040 private final MetricHistogram putBatchHisto; +041 private final MetricHistogram deleteHisto; +042 private final MetricHistogram deleteBatchHisto; +043 private final MetricHistogram checkAndDeleteHisto; +044 private final MetricHistogram checkAndPutHisto; +045 private final MetricHistogram getHisto; +046 private final MetricHistogram incrementHisto; +047 private final MetricHistogram appendHisto; +048 private final MetricHistogram replayHisto; +049 private final MetricHistogram scanSizeHisto; +050 private final MetricHistogram scanTimeHisto; +051 +052 private final MutableFastCounter slowPut; +053 private final MutableFastCounter slowDelete; +054 private final MutableFastCounter slowGet; +055 private final MutableFastCounter slowIncrement; +056 private final MutableFastCounter slowAppend; +057 private final MutableFastCounter splitRequest; +058 private final MutableFastCounter splitSuccess; +059 +060 private final MetricHistogram splitTimeHisto; +061 +062 // flush related metrics +063 private final MetricHistogram flushTimeHisto; +064 private final MetricHistogram flushMemstoreSizeHisto; +065 private final MetricHistogram flushOutputSizeHisto; +066 private final MutableFastCounter flushedMemstoreBytes; +067 private final MutableFastCounter flushedOutputBytes; +068 +069 // compaction related metrics +070 private final MetricHistogram compactionTimeHisto; +071 private final MetricHistogram compactionInputFileCountHisto; +072 private final MetricHistogram compactionInputSizeHisto; +073 private final MetricHistogram compactionOutputFileCountHisto; +074 private final MetricHistogram
[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html index c9a18a3..c80f6d8 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html @@ -2492,2617 +2492,2627 @@ 2484 return; 2485} 2486 } -2487 errors.reportError(ERROR_CODE.LINGERING_SPLIT_PARENT, "Region " -2488 + descriptiveName + " is a split parent in META, in HDFS, " -2489 + "and not deployed on any region server. This could be transient, " -2490 + "consider to run the catalog janitor first!"); -2491 if (shouldFixSplitParents()) { -2492setShouldRerun(); -2493resetSplitParent(hbi); -2494 } -2495} else if (inMeta !inHdfs !isDeployed) { -2496 errors.reportError(ERROR_CODE.NOT_IN_HDFS_OR_DEPLOYED, "Region " -2497 + descriptiveName + " found in META, but not in HDFS " -2498 + "or deployed on any region server."); -2499 if (shouldFixMeta()) { -2500deleteMetaRegion(hbi); -2501 } -2502} else if (inMeta !inHdfs isDeployed) { -2503 errors.reportError(ERROR_CODE.NOT_IN_HDFS, "Region " + descriptiveName -2504 + " found in META, but not in HDFS, " + -2505 "and deployed on " + Joiner.on(", ").join(hbi.deployedOn)); -2506 // We treat HDFS as ground truth. Any information in meta is transient -2507 // and equivalent data can be regenerated. So, lets unassign and remove -2508 // these problems from META. -2509 if (shouldFixAssignments()) { -2510errors.print("Trying to fix unassigned region..."); -2511undeployRegions(hbi); -2512 } -2513 if (shouldFixMeta()) { -2514// wait for it to complete -2515deleteMetaRegion(hbi); -2516 } -2517} else if (inMeta inHdfs !isDeployed shouldBeDeployed) { -2518 errors.reportError(ERROR_CODE.NOT_DEPLOYED, "Region " + descriptiveName -2519 + " not deployed on any region server."); -2520 tryAssignmentRepair(hbi, "Trying to fix unassigned region..."); -2521} else if (inMeta inHdfs isDeployed !shouldBeDeployed) { -2522 errors.reportError(ERROR_CODE.SHOULD_NOT_BE_DEPLOYED, -2523 "Region " + descriptiveName + " should not be deployed according " + -2524 "to META, but is deployed on " + Joiner.on(", ").join(hbi.deployedOn)); -2525 if (shouldFixAssignments()) { -2526errors.print("Trying to close the region " + descriptiveName); -2527setShouldRerun(); -2528 HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, hbi.deployedOn); -2529 } -2530} else if (inMeta inHdfs isMultiplyDeployed) { -2531 errors.reportError(ERROR_CODE.MULTI_DEPLOYED, "Region " + descriptiveName -2532 + " is listed in hbase:meta on region server " + hbi.metaEntry.regionServer -2533 + " but is multiply assigned to region servers " + -2534 Joiner.on(", ").join(hbi.deployedOn)); -2535 // If we are trying to fix the errors -2536 if (shouldFixAssignments()) { -2537errors.print("Trying to fix assignment error..."); -2538setShouldRerun(); -2539 HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, hbi.deployedOn); -2540 } -2541} else if (inMeta inHdfs isDeployed !deploymentMatchesMeta) { -2542 errors.reportError(ERROR_CODE.SERVER_DOES_NOT_MATCH_META, "Region " -2543 + descriptiveName + " listed in hbase:meta on region server " + -2544 hbi.metaEntry.regionServer + " but found on region server " + -2545 hbi.deployedOn.get(0)); -2546 // If we are trying to fix the errors -2547 if (shouldFixAssignments()) { -2548errors.print("Trying to fix assignment error..."); -2549setShouldRerun(); -2550 HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, hbi.deployedOn); -2551 HBaseFsckRepair.waitUntilAssigned(admin, hbi.getHdfsHRI()); -2552 } -2553} else { -2554 errors.reportError(ERROR_CODE.UNKNOWN, "Region " + descriptiveName + -2555 " is in an unforeseen state:" + -2556 " inMeta=" + inMeta + -2557 " inHdfs=" + inHdfs + -2558 " isDeployed=" + isDeployed + -2559 " isMultiplyDeployed=" + isMultiplyDeployed + -2560 " deploymentMatchesMeta=" + deploymentMatchesMeta + -2561 " shouldBeDeployed=" + shouldBeDeployed); -2562} -2563 } -2564 -2565 /** -2566 * Checks tables integrity. Goes
[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21766f4a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html index 0865b8f..596b800 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html @@ -30,1795 +30,1790 @@ 022import java.util.ArrayList; 023import java.util.Collection; 024import java.util.Collections; -025import java.util.Comparator; -026import java.util.HashMap; -027import java.util.HashSet; -028import java.util.List; -029import java.util.Map; -030import java.util.Set; -031import java.util.concurrent.CopyOnWriteArrayList; -032import java.util.concurrent.Future; -033import java.util.concurrent.TimeUnit; -034import java.util.concurrent.atomic.AtomicBoolean; -035import java.util.concurrent.locks.Condition; -036import java.util.concurrent.locks.ReentrantLock; -037import java.util.stream.Collectors; -038 -039import org.apache.commons.logging.Log; -040import org.apache.commons.logging.LogFactory; -041import org.apache.hadoop.conf.Configuration; -042import org.apache.hadoop.hbase.HBaseIOException; -043import org.apache.hadoop.hbase.HConstants; -044import org.apache.hadoop.hbase.HRegionInfo; -045import org.apache.hadoop.hbase.PleaseHoldException; -046import org.apache.hadoop.hbase.RegionException; -047import org.apache.hadoop.hbase.RegionStateListener; -048import org.apache.hadoop.hbase.ServerName; -049import org.apache.hadoop.hbase.TableName; -050import org.apache.hadoop.hbase.classification.InterfaceAudience; -051import org.apache.hadoop.hbase.client.TableState; -052import org.apache.hadoop.hbase.exceptions.UnexpectedStateException; -053import org.apache.hadoop.hbase.master.balancer.FavoredStochasticBalancer; -054import org.apache.hadoop.hbase.favored.FavoredNodesManager; -055import org.apache.hadoop.hbase.favored.FavoredNodesPromoter; -056import org.apache.hadoop.hbase.master.AssignmentListener; -057import org.apache.hadoop.hbase.master.LoadBalancer; -058import org.apache.hadoop.hbase.master.MasterServices; -059import org.apache.hadoop.hbase.master.MetricsAssignmentManager; -060import org.apache.hadoop.hbase.master.NoSuchProcedureException; -061import org.apache.hadoop.hbase.master.RegionPlan; -062import org.apache.hadoop.hbase.master.RegionState; -063import org.apache.hadoop.hbase.master.RegionState.State; -064import org.apache.hadoop.hbase.master.ServerListener; -065import org.apache.hadoop.hbase.master.TableStateManager; -066import org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode; -067import org.apache.hadoop.hbase.master.assignment.RegionStates.ServerState; -068import org.apache.hadoop.hbase.master.assignment.RegionStates.ServerStateNode; -069// TODO: why are they here? -070import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType; -071import org.apache.hadoop.hbase.master.normalizer.RegionNormalizer; -072import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; -073import org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler; -074import org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait; -075import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure; -076import org.apache.hadoop.hbase.procedure2.Procedure; -077import org.apache.hadoop.hbase.procedure2.ProcedureEvent; -078import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; -079import org.apache.hadoop.hbase.procedure2.ProcedureInMemoryChore; -080import org.apache.hadoop.hbase.procedure2.util.StringUtils; -081import org.apache.hadoop.hbase.quotas.QuotaExceededException; -082import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -083import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition; -084import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; -085import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest; -086import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse; -087import org.apache.hadoop.hbase.util.Bytes; -088import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -089import org.apache.hadoop.hbase.util.Pair; -090import org.apache.hadoop.hbase.util.Threads; -091import org.apache.hadoop.hbase.util.VersionInfo; +025import java.util.HashMap; +026import java.util.HashSet; +027import java.util.List; +028import
[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d5075d7/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.DelayedProcedure.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.DelayedProcedure.html b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.DelayedProcedure.html index afd9ccc..904b921 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.DelayedProcedure.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.DelayedProcedure.html @@ -30,1916 +30,1984 @@ 022import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions; 023 024import java.io.IOException; -025import java.util.ArrayList; -026import java.util.Arrays; -027import java.util.HashSet; -028import java.util.Iterator; -029import java.util.List; -030import java.util.Map; -031import java.util.Set; -032import java.util.concurrent.atomic.AtomicBoolean; -033import java.util.concurrent.atomic.AtomicInteger; -034import java.util.concurrent.atomic.AtomicLong; -035import java.util.stream.Collectors; -036import java.util.stream.Stream; -037import java.util.concurrent.ConcurrentHashMap; -038import java.util.concurrent.CopyOnWriteArrayList; -039import java.util.concurrent.DelayQueue; -040import java.util.concurrent.TimeUnit; -041 -042import org.apache.commons.logging.Log; -043import org.apache.commons.logging.LogFactory; -044import org.apache.hadoop.conf.Configuration; -045import org.apache.hadoop.hbase.HConstants; -046import org.apache.hadoop.hbase.ProcedureInfo; -047import org.apache.hadoop.hbase.classification.InterfaceAudience; -048import org.apache.hadoop.hbase.classification.InterfaceStability; -049import org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException; -050import org.apache.hadoop.hbase.procedure2.Procedure.LockState; -051import org.apache.hadoop.hbase.procedure2.store.ProcedureStore; -052import org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator; -053import org.apache.hadoop.hbase.procedure2.util.DelayedUtil; -054import org.apache.hadoop.hbase.procedure2.util.DelayedUtil.DelayedWithTimeout; -055import org.apache.hadoop.hbase.procedure2.util.StringUtils; -056import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState; -057import org.apache.hadoop.hbase.security.User; -058import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -059import org.apache.hadoop.hbase.util.NonceKey; -060import org.apache.hadoop.hbase.util.Pair; -061import org.apache.hadoop.hbase.util.Threads; -062 -063/** -064 * Thread Pool that executes the submitted procedures. -065 * The executor has a ProcedureStore associated. -066 * Each operation is logged and on restart the pending procedures are resumed. -067 * -068 * Unless the Procedure code throws an error (e.g. invalid user input) -069 * the procedure will complete (at some point in time), On restart the pending -070 * procedures are resumed and the once failed will be rolledback. -071 * -072 * The user can add procedures to the executor via submitProcedure(proc) -073 * check for the finished state via isFinished(procId) -074 * and get the result via getResult(procId) -075 */ -076@InterfaceAudience.Private -077@InterfaceStability.Evolving -078public class ProcedureExecutorTEnvironment { -079 private static final Log LOG = LogFactory.getLog(ProcedureExecutor.class); -080 -081 public static final String CHECK_OWNER_SET_CONF_KEY = "hbase.procedure.check.owner.set"; -082 private static final boolean DEFAULT_CHECK_OWNER_SET = false; -083 -084 public static final String WORKER_KEEP_ALIVE_TIME_CONF_KEY = -085 "hbase.procedure.worker.keep.alive.time.msec"; -086 private static final long DEFAULT_WORKER_KEEP_ALIVE_TIME = Long.MAX_VALUE; -087 -088 Testing testing = null; -089 public static class Testing { -090protected boolean killIfSuspended = false; -091protected boolean killBeforeStoreUpdate = false; -092protected boolean toggleKillBeforeStoreUpdate = false; -093 -094protected boolean shouldKillBeforeStoreUpdate() { -095 final boolean kill = this.killBeforeStoreUpdate; -096 if (this.toggleKillBeforeStoreUpdate) { -097this.killBeforeStoreUpdate = !kill; -098LOG.warn("Toggle KILL before store update to: " + this.killBeforeStoreUpdate); -099 } -100 return kill; -101} -102 -103protected boolean shouldKillBeforeStoreUpdate(final boolean isSuspended) { -104 return (isSuspended !killIfSuspended) ? false : shouldKillBeforeStoreUpdate(); -105} -106 } -107 -108 public interface ProcedureExecutorListener { -109void procedureLoaded(long procId); -110void procedureAdded(long procId); -111void procedureFinished(long procId); -112 } -113 -114 /** -115 * Internal cleaner that removes the
[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0383a9c2/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactSplit.CompactionRunner.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactSplit.CompactionRunner.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactSplit.CompactionRunner.html index 06b7a03..7dabb5e 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactSplit.CompactionRunner.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactSplit.CompactionRunner.html @@ -216,519 +216,505 @@ 208return queueLists.toString(); 209 } 210 -211 public synchronized void requestRegionsMerge(final Region a, -212 final Region b, final boolean forcible, long masterSystemTime, User user) { -213try { -214 mergePool.execute(new RegionMergeRequest(a, b, this.server, forcible, masterSystemTime,user)); -215 if (LOG.isDebugEnabled()) { -216LOG.debug("Region merge requested for " + a + "," + b + ", forcible=" -217+ forcible + ". " + this); +211 public synchronized boolean requestSplit(final Region r) { +212// don't split regions that are blocking +213if (shouldSplitRegion() ((HRegion)r).getCompactPriority() = Store.PRIORITY_USER) { +214 byte[] midKey = ((HRegion)r).checkSplit(); +215 if (midKey != null) { +216requestSplit(r, midKey); +217return true; 218 } -219} catch (RejectedExecutionException ree) { -220 LOG.warn("Could not execute merge for " + a + "," + b + ", forcible=" -221 + forcible, ree); -222} -223 } -224 -225 public synchronized boolean requestSplit(final Region r) { -226// don't split regions that are blocking -227if (shouldSplitRegion() ((HRegion)r).getCompactPriority() = Store.PRIORITY_USER) { -228 byte[] midKey = ((HRegion)r).checkSplit(); -229 if (midKey != null) { -230requestSplit(r, midKey); -231return true; -232 } -233} -234return false; -235 } -236 -237 public synchronized void requestSplit(final Region r, byte[] midKey) { -238requestSplit(r, midKey, null); -239 } -240 -241 /* -242 * The User parameter allows the split thread to assume the correct user identity -243 */ -244 public synchronized void requestSplit(final Region r, byte[] midKey, User user) { -245if (midKey == null) { -246 LOG.debug("Region " + r.getRegionInfo().getRegionNameAsString() + -247" not splittable because midkey=null"); -248 if (((HRegion)r).shouldForceSplit()) { -249((HRegion)r).clearSplit(); -250 } -251 return; -252} -253try { -254 this.splits.execute(new SplitRequest(r, midKey, this.server, user)); -255 if (LOG.isDebugEnabled()) { -256LOG.debug("Splitting " + r + ", " + this); -257 } -258} catch (RejectedExecutionException ree) { -259 LOG.info("Could not execute split for " + r, ree); -260} -261 } -262 -263 @Override -264 public synchronized ListCompactionRequest requestCompaction(final Region r, final String why) -265 throws IOException { -266return requestCompaction(r, why, null); -267 } -268 -269 @Override -270 public synchronized ListCompactionRequest requestCompaction(final Region r, final String why, -271 ListPairCompactionRequest, Store requests) throws IOException { -272return requestCompaction(r, why, Store.NO_PRIORITY, requests, null); -273 } -274 -275 @Override -276 public synchronized CompactionRequest requestCompaction(final Region r, final Store s, -277 final String why, CompactionRequest request) throws IOException { -278return requestCompaction(r, s, why, Store.NO_PRIORITY, request, null); -279 } -280 -281 @Override -282 public synchronized ListCompactionRequest requestCompaction(final Region r, final String why, -283 int p, ListPairCompactionRequest, Store requests, User user) throws IOException { -284return requestCompactionInternal(r, why, p, requests, true, user); -285 } -286 -287 private ListCompactionRequest requestCompactionInternal(final Region r, final String why, -288 int p, ListPairCompactionRequest, Store requests, boolean selectNow, User user) -289 throws IOException { -290// not a special compaction request, so make our own list -291ListCompactionRequest ret = null; -292if (requests == null) { -293 ret = selectNow ? new ArrayListCompactionRequest(r.getStores().size()) : null; -294 for (Store s : r.getStores()) { -295CompactionRequest cr = requestCompactionInternal(r, s, why, p, null, selectNow, user); -296if (selectNow) ret.add(cr); -297 } -298} else { -299 Preconditions.checkArgument(selectNow); // only system requests have selectNow == false -300 ret = new
[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f391bcef/devapidocs/org/apache/hadoop/hbase/client/ScannerCallable.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/ScannerCallable.html b/devapidocs/org/apache/hadoop/hbase/client/ScannerCallable.html index c0f8071..7eca56f 100644 --- a/devapidocs/org/apache/hadoop/hbase/client/ScannerCallable.html +++ b/devapidocs/org/apache/hadoop/hbase/client/ScannerCallable.html @@ -420,7 +420,7 @@ extends RegionServerCallable -call, getConnection, getExceptionMessageAdditionalDetail, getLocation, getRow, getRpcController, getRpcControllerCellScanner, getStub, getTableName, setLocation, setRpcControllerCellScanner, setStub, sleep, throwable +call, getConnection, getExceptionMessageAdditionalDetail, getLocation, getPriority, getRow, getRpcController, getRpcControllerCellScanner, getStub, getTableName, setLocation, setRpcControllerCellScanner, setStub, sleep, throwable http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f391bcef/devapidocs/org/apache/hadoop/hbase/client/SecureBulkLoadClient.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/SecureBulkLoadClient.html b/devapidocs/org/apache/hadoop/hbase/client/SecureBulkLoadClient.html index 8c58413..616e65d 100644 --- a/devapidocs/org/apache/hadoop/hbase/client/SecureBulkLoadClient.html +++ b/devapidocs/org/apache/hadoop/hbase/client/SecureBulkLoadClient.html @@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab"; @InterfaceAudience.Private -public class SecureBulkLoadClient +public class SecureBulkLoadClient extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object Client proxy for SecureBulkLoadProtocol @@ -232,7 +232,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? table -privateTable table +privateTable table @@ -241,7 +241,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? rpcControllerFactory -private finalRpcControllerFactory rpcControllerFactory +private finalRpcControllerFactory rpcControllerFactory @@ -258,7 +258,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? SecureBulkLoadClient -publicSecureBulkLoadClient(org.apache.hadoop.conf.Configurationconf, +publicSecureBulkLoadClient(org.apache.hadoop.conf.Configurationconf, Tabletable) @@ -276,7 +276,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? prepareBulkLoad -publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringprepareBulkLoad(Connectionconn) +publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringprepareBulkLoad(Connectionconn) throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException Throws: @@ -290,7 +290,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? cleanupBulkLoad -publicvoidcleanupBulkLoad(Connectionconn, +publicvoidcleanupBulkLoad(Connectionconn, http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringbulkToken) throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException @@ -305,7 +305,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? secureBulkLoadHFiles -publicbooleansecureBulkLoadHFiles(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService.BlockingInterfaceclient, +publicbooleansecureBulkLoadHFiles(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService.BlockingInterfaceclient, http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListPairbyte[],http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringfamilyPaths, byte[]regionName, booleanassignSeqNum, @@ -334,7 +334,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? secureBulkLoadHFiles -publicbooleansecureBulkLoadHFiles(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService.BlockingInterfaceclient, +publicbooleansecureBulkLoadHFiles(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService.BlockingInterfaceclient,
[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca5b0275/devapidocs/src-html/org/apache/hadoop/hbase/CellUtil.EmptyByteBufferCell.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/CellUtil.EmptyByteBufferCell.html b/devapidocs/src-html/org/apache/hadoop/hbase/CellUtil.EmptyByteBufferCell.html index 924a575..18d00df 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/CellUtil.EmptyByteBufferCell.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/CellUtil.EmptyByteBufferCell.html @@ -572,7 +572,7 @@ 564public long heapSize() { 565 long sum = HEAP_SIZE_OVERHEAD + CellUtil.estimatedHeapSizeOf(cell); 566 if (this.tags != null) { -567sum += ClassSize.sizeOf(this.tags, this.tags.length); +567sum += ClassSize.sizeOf(this.tags); 568 } 569 return sum; 570} @@ -771,7 +771,7 @@ 763 long sum = HEAP_SIZE_OVERHEAD + CellUtil.estimatedHeapSizeOf(cell); 764 // this.tags is on heap byte[] 765 if (this.tags != null) { -766sum += ClassSize.sizeOf(this.tags, this.tags.length); +766sum += ClassSize.sizeOf(this.tags); 767 } 768 return sum; 769} @@ -897,7 +897,7 @@ 889public long heapSize() { 890 long sum = ClassSize.REFERENCE + super.heapSize(); 891 if (this.value != null) { -892sum += ClassSize.sizeOf(this.value, this.value.length); +892sum += ClassSize.sizeOf(this.value); 893 } 894 return sum; 895} @@ -997,7 +997,7 @@ 989public long heapSize() { 990 long sum = ClassSize.REFERENCE + super.heapSize(); 991 if (this.value != null) { -992sum += ClassSize.sizeOf(this.value, this.value.length); +992sum += ClassSize.sizeOf(this.value); 993 } 994 return sum; 995} http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca5b0275/devapidocs/src-html/org/apache/hadoop/hbase/CellUtil.EmptyCell.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/CellUtil.EmptyCell.html b/devapidocs/src-html/org/apache/hadoop/hbase/CellUtil.EmptyCell.html index 924a575..18d00df 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/CellUtil.EmptyCell.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/CellUtil.EmptyCell.html @@ -572,7 +572,7 @@ 564public long heapSize() { 565 long sum = HEAP_SIZE_OVERHEAD + CellUtil.estimatedHeapSizeOf(cell); 566 if (this.tags != null) { -567sum += ClassSize.sizeOf(this.tags, this.tags.length); +567sum += ClassSize.sizeOf(this.tags); 568 } 569 return sum; 570} @@ -771,7 +771,7 @@ 763 long sum = HEAP_SIZE_OVERHEAD + CellUtil.estimatedHeapSizeOf(cell); 764 // this.tags is on heap byte[] 765 if (this.tags != null) { -766sum += ClassSize.sizeOf(this.tags, this.tags.length); +766sum += ClassSize.sizeOf(this.tags); 767 } 768 return sum; 769} @@ -897,7 +897,7 @@ 889public long heapSize() { 890 long sum = ClassSize.REFERENCE + super.heapSize(); 891 if (this.value != null) { -892sum += ClassSize.sizeOf(this.value, this.value.length); +892sum += ClassSize.sizeOf(this.value); 893 } 894 return sum; 895} @@ -997,7 +997,7 @@ 989public long heapSize() { 990 long sum = ClassSize.REFERENCE + super.heapSize(); 991 if (this.value != null) { -992sum += ClassSize.sizeOf(this.value, this.value.length); +992sum += ClassSize.sizeOf(this.value); 993 } 994 return sum; 995} http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca5b0275/devapidocs/src-html/org/apache/hadoop/hbase/CellUtil.FirstOnRowByteBufferCell.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/CellUtil.FirstOnRowByteBufferCell.html b/devapidocs/src-html/org/apache/hadoop/hbase/CellUtil.FirstOnRowByteBufferCell.html index 924a575..18d00df 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/CellUtil.FirstOnRowByteBufferCell.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/CellUtil.FirstOnRowByteBufferCell.html @@ -572,7 +572,7 @@ 564public long heapSize() { 565 long sum = HEAP_SIZE_OVERHEAD + CellUtil.estimatedHeapSizeOf(cell); 566 if (this.tags != null) { -567sum += ClassSize.sizeOf(this.tags, this.tags.length); +567sum += ClassSize.sizeOf(this.tags); 568 } 569 return sum; 570} @@ -771,7 +771,7 @@ 763 long sum = HEAP_SIZE_OVERHEAD + CellUtil.estimatedHeapSizeOf(cell); 764 // this.tags is on heap byte[] 765 if (this.tags != null) { -766sum += ClassSize.sizeOf(this.tags, this.tags.length); +766sum += ClassSize.sizeOf(this.tags); 767 } 768 return sum; 769} @@ -897,7 +897,7 @@
[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9eba7fcf/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.ExtendedSink.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.ExtendedSink.html b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.ExtendedSink.html index e1fbce4..873e17f 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.ExtendedSink.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.ExtendedSink.html @@ -1089,497 +1089,498 @@ 1081} 1082 } 1083 MapString, AtomicLong actualReadTableLatency = regionSink.getReadLatencyMap(); -1084 for (String tableName : this.configuredReadTableTimeouts.keySet()) { -1085if (actualReadTableLatency.containsKey(tableName)) { -1086 Long actual = actualReadTableLatency.get(tableName).longValue(); -1087 Long configured = this.configuredReadTableTimeouts.get(tableName); -1088 LOG.info("Read operation for " + tableName + " took " + actual + -1089" ms. The configured read timeout was " + configured + " ms."); -1090 if (actual configured) { -1091LOG.error("Read operation for " + tableName + " exceeded the configured read timeout."); -1092 } -1093} else { -1094 LOG.error("Read operation for " + tableName + " failed!"); -1095} -1096 } -1097 if (this.writeSniffing) { -1098String writeTableStringName = this.writeTableName.getNameAsString(); -1099long actualWriteLatency = regionSink.getWriteLatency().longValue(); -1100LOG.info("Write operation for " + writeTableStringName + " took " + actualWriteLatency + " ms. The configured write timeout was " + -1101 this.configuredWriteTableTimeout + " ms."); -1102// Check that the writeTable write operation latency does not exceed the configured timeout. -1103if (actualWriteLatency this.configuredWriteTableTimeout) { -1104 LOG.error("Write operation for " + writeTableStringName + " exceeded the configured write timeout."); -1105} -1106 } -1107} catch (Exception e) { -1108 LOG.error("Run regionMonitor failed", e); -1109 this.errorCode = ERROR_EXIT_CODE; -1110} - } -1112 this.done = true; -1113} -1114 -1115private String[] generateMonitorTables(String[] monitorTargets) throws IOException { -1116 String[] returnTables = null; -1117 -1118 if (this.useRegExp) { -1119Pattern pattern = null; -1120HTableDescriptor[] tds = null; -1121SetString tmpTables = new TreeSet(); -1122try { -1123 if (LOG.isDebugEnabled()) { -1124 LOG.debug(String.format("reading list of tables")); -1125 } -1126 tds = this.admin.listTables(pattern); -1127 if (tds == null) { -1128tds = new HTableDescriptor[0]; -1129 } -1130 for (String monitorTarget : monitorTargets) { -1131pattern = Pattern.compile(monitorTarget); -1132for (HTableDescriptor td : tds) { -1133 if (pattern.matcher(td.getNameAsString()).matches()) { -1134 tmpTables.add(td.getNameAsString()); -1135 } -1136} -1137 } -1138} catch (IOException e) { -1139 LOG.error("Communicate with admin failed", e); -1140 throw e; -1141} -1142 -1143if (tmpTables.size() 0) { -1144 returnTables = tmpTables.toArray(new String[tmpTables.size()]); -1145} else { -1146 String msg = "No HTable found, tablePattern:" + Arrays.toString(monitorTargets); -1147 LOG.error(msg); -1148 this.errorCode = INIT_ERROR_EXIT_CODE; -1149 throw new TableNotFoundException(msg); -1150} -1151 } else { -1152returnTables = monitorTargets; -1153 } -1154 -1155 return returnTables; -1156} -1157 -1158/* -1159 * canary entry point to monitor all the tables. -1160 */ -1161private ListFutureVoid sniff(TaskType taskType, RegionStdOutSink regionSink) throws Exception { -1162 if (LOG.isDebugEnabled()) { -1163LOG.debug(String.format("reading list of tables")); -1164 } -1165 ListFutureVoid taskFutures = new LinkedList(); -1166 for (HTableDescriptor table : admin.listTables()) { -1167if (admin.isTableEnabled(table.getTableName()) -1168 (!table.getTableName().equals(writeTableName))) { -1169 AtomicLong readLatency = regionSink.initializeAndGetReadLatencyForTable(table.getNameAsString()); -1170 taskFutures.addAll(Canary.sniff(admin, sink, table, executor, taskType, this.rawScanEnabled, readLatency)); -1171}
[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/17128d27/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html index feb42ea..4bd98f4 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html @@ -185,4189 +185,4266 @@ 177import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest; 178import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest; 179import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse; -180import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest; -181import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest; -182import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse; -183import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest; -184import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; -185import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos; -186import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse; -187import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos; -188import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; -189import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException; -190import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException; -191import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; -192import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException; -193import org.apache.hadoop.hbase.util.Addressing; -194import org.apache.hadoop.hbase.util.Bytes; -195import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -196import org.apache.hadoop.hbase.util.ForeignExceptionUtil; -197import org.apache.hadoop.hbase.util.Pair; -198import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker; -199import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; -200import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; -201import org.apache.hadoop.ipc.RemoteException; -202import org.apache.hadoop.util.StringUtils; -203import org.apache.zookeeper.KeeperException; -204 -205import com.google.common.annotations.VisibleForTesting; -206import com.google.protobuf.Descriptors; -207import com.google.protobuf.Message; -208import com.google.protobuf.RpcController; -209import java.util.stream.Collectors; -210 -211/** -212 * HBaseAdmin is no longer a client API. It is marked InterfaceAudience.Private indicating that -213 * this is an HBase-internal class as defined in -214 * https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/InterfaceClassification.html -215 * There are no guarantees for backwards source / binary compatibility and methods or class can -216 * change or go away without deprecation. -217 * Use {@link Connection#getAdmin()} to obtain an instance of {@link Admin} instead of constructing -218 * an HBaseAdmin directly. -219 * -220 * pConnection should be an iunmanaged/i connection obtained via -221 * {@link ConnectionFactory#createConnection(Configuration)} -222 * -223 * @see ConnectionFactory -224 * @see Connection -225 * @see Admin -226 */ -227@InterfaceAudience.Private -228@InterfaceStability.Evolving -229public class HBaseAdmin implements Admin { -230 private static final Log LOG = LogFactory.getLog(HBaseAdmin.class); -231 -232 private static final String ZK_IDENTIFIER_PREFIX = "hbase-admin-on-"; +180import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionRequest; +181import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionResponse; +182import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest; +183import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest; +184import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse; +185import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest; +186import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; +187import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos; +188import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse; +189import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos; +190import
[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2777c693/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestStore.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestStore.html b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestStore.html index 7c8cdf7..344a566 100644 --- a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestStore.html +++ b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestStore.html @@ -18,7 +18,7 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":9,"i9":10,"i10":9,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10}; +var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":9,"i9":10,"i10":9,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10}; var tabs = {65535:["t0","All Methods"],1:["t1","Static Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab"; -public class TestStore +public class TestStore extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object Test class for the Store @@ -158,18 +158,22 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? TestStore.MyListT -private static class -TestStore.MyMemStoreCompactor +(package private) static interface +TestStore.MyListHook -private static interface -TestStore.MyScannerHook +private static class +TestStore.MyMemStoreCompactor (package private) class TestStore.MyStore +private class +TestStore.MyStoreHook + + private static class TestStore.MyThread @@ -386,26 +390,26 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? private org.apache.hadoop.hbase.regionserver.Store -init(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringmethodName, +init(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringmethodName, org.apache.hadoop.conf.Configurationconf, org.apache.hadoop.hbase.HTableDescriptorhtd, org.apache.hadoop.hbase.HColumnDescriptorhcd, -TestStore.MyScannerHookhook) +TestStore.MyStoreHookhook) private org.apache.hadoop.hbase.regionserver.Store -init(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringmethodName, +init(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringmethodName, org.apache.hadoop.conf.Configurationconf, org.apache.hadoop.hbase.HTableDescriptorhtd, org.apache.hadoop.hbase.HColumnDescriptorhcd, -TestStore.MyScannerHookhook, +TestStore.MyStoreHookhook, booleanswitchToPread) private TestStore.MyStore -initMyStore(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringmethodName, +initMyStore(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringmethodName, org.apache.hadoop.conf.Configurationconf, - TestStore.MyScannerHookhook) + TestStore.MyStoreHookhook) void @@ -443,90 +447,107 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? +private void +testFlushBeforeCompletingScan(TestStore.MyListHookhook, + org.apache.hadoop.hbase.filter.Filterfilter) + + +void +testFlushBeforeCompletingScanWithFilter() + + +void +testFlushBeforeCompletingScanWithFilterHint() + + +void +testFlushBeforeCompletingScanWoFilter() + + void testFlushSizeAccounting() Test we do not lose data if we fail a flush and then close. - + void testGet_FromFilesOnly() Getting data from files only - + void testGet_FromMemStoreAndFiles() Getting data from memstore and files - + void testGet_FromMemStoreOnly() Getting data from memstore only - + void testHandleErrorsInFlush() - + void testLowestModificationTime() - + void testMultipleTimestamps() Test to ensure correctness when using Stores with multiple timestamps -
[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/90c7dfe4/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ProcedureBiConsumer.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ProcedureBiConsumer.html b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ProcedureBiConsumer.html index 75db22d..99a09f9 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ProcedureBiConsumer.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ProcedureBiConsumer.html @@ -37,2710 +37,2816 @@ 029import java.util.List; 030import java.util.Map; 031import java.util.Optional; -032import java.util.concurrent.CompletableFuture; -033import java.util.concurrent.TimeUnit; -034import java.util.concurrent.atomic.AtomicReference; -035import java.util.function.BiConsumer; -036import java.util.regex.Pattern; -037import java.util.stream.Collectors; -038 -039import com.google.common.annotations.VisibleForTesting; -040 -041import io.netty.util.Timeout; -042import io.netty.util.TimerTask; -043 -044import java.util.stream.Stream; -045 -046import org.apache.commons.io.IOUtils; -047import org.apache.commons.logging.Log; -048import org.apache.commons.logging.LogFactory; -049import org.apache.hadoop.hbase.ClusterStatus; -050import org.apache.hadoop.hbase.HRegionInfo; -051import org.apache.hadoop.hbase.HRegionLocation; -052import org.apache.hadoop.hbase.MetaTableAccessor; -053import org.apache.hadoop.hbase.MetaTableAccessor.QueryType; -054import org.apache.hadoop.hbase.NotServingRegionException; -055import org.apache.hadoop.hbase.ProcedureInfo; -056import org.apache.hadoop.hbase.RegionLoad; -057import org.apache.hadoop.hbase.RegionLocations; -058import org.apache.hadoop.hbase.ServerName; -059import org.apache.hadoop.hbase.NamespaceDescriptor; -060import org.apache.hadoop.hbase.HConstants; -061import org.apache.hadoop.hbase.TableExistsException; -062import org.apache.hadoop.hbase.TableName; -063import org.apache.hadoop.hbase.AsyncMetaTableAccessor; -064import org.apache.hadoop.hbase.TableNotDisabledException; -065import org.apache.hadoop.hbase.TableNotEnabledException; -066import org.apache.hadoop.hbase.TableNotFoundException; -067import org.apache.hadoop.hbase.UnknownRegionException; -068import org.apache.hadoop.hbase.classification.InterfaceAudience; -069import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder; -070import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder; -071import org.apache.hadoop.hbase.client.Scan.ReadType; -072import org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper; -073import org.apache.hadoop.hbase.client.replication.TableCFs; -074import org.apache.hadoop.hbase.exceptions.DeserializationException; -075import org.apache.hadoop.hbase.ipc.HBaseRpcController; -076import org.apache.hadoop.hbase.quotas.QuotaFilter; -077import org.apache.hadoop.hbase.quotas.QuotaSettings; -078import org.apache.hadoop.hbase.quotas.QuotaTableUtil; -079import org.apache.hadoop.hbase.replication.ReplicationException; -080import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; -081import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; -082import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback; -083import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -084import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; -085import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; -086import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; -087import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; -088import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; -089import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse; -090import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; -091import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse; -092import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest; -093import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse; -094import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest; -095import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse; -096import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest; -097import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse; -098import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest; -099import
[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0821e51a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TruncateTableProcedureBiConsumer.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TruncateTableProcedureBiConsumer.html b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TruncateTableProcedureBiConsumer.html index 71844ce..75db22d 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TruncateTableProcedureBiConsumer.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TruncateTableProcedureBiConsumer.html @@ -105,2564 +105,2642 @@ 097import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse; 098import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest; 099import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse; -100import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription; -101import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema; -102import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; -103import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest; -104import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse; -105import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest; -106import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse; -107import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest; -108import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse; -109import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest; -110import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse; -111import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest; -112import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse; -113import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest; -114import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse; -115import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest; -116import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse; -117import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest; -118import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse; -119import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest; -120import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse; -121import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest; -122import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse; -123import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest; -124import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse; -125import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest; -126import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse; -127import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest; -128import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse; -129import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest; -130import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse; -131import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest; -132import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse; -133import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest; -134import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse; -135import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest; -136import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse; -137import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest; -138import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse; -139import
[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d27954a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html index 16c0042..71844ce 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html @@ -126,2499 +126,2543 @@ 118import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse; 119import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest; 120import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse; -121import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest; -122import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse; -123import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest; -124import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse; -125import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest; -126import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse; -127import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest; -128import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse; -129import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest; -130import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse; -131import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest; -132import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse; -133import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest; -134import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse; -135import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest; -136import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse; -137import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest; -138import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse; -139import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest; -140import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesResponse; -141import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest; -142import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse; -143import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest; -144import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse; -145import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest; -146import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse; -147import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest; -148import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse; -149import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledRequest; -150import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledResponse; -151import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest; -152import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse; -153import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest; -154import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse; -155import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest; -156import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse; -157import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest; -158import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse; -159import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest; -160import
[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9fb0764b/devapidocs/org/apache/hadoop/hbase/client/Admin.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/Admin.html b/devapidocs/org/apache/hadoop/hbase/client/Admin.html index 2ebbffe..773961a 100644 --- a/devapidocs/org/apache/hadoop/hbase/client/Admin.html +++ b/devapidocs/org/apache/hadoop/hbase/client/Admin.html @@ -18,7 +18,7 @@ catch(err) { } //--> -var methods = {"i0":6,"i1":6,"i2":6,"i3":38,"i4":6,"i5":6,"i6":18,"i7":18,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":6,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":6,"i26":6,"i27":6,"i28":6,"i29":6,"i30":6,"i31":6,"i32":6,"i33":6,"i34":6,"i35":6,"i36":38,"i37":6,"i38":6,"i39":6,"i40":6,"i41":6,"i42":6,"i43":6,"i44":6,"i45":6,"i46":6,"i47":6,"i48":6,"i49":6,"i50":6,"i51":18,"i52":6,"i53":6,"i54":6,"i55":6,"i56":6,"i57":6,"i58":6,"i59":18,"i60":6,"i61":6,"i62":6,"i63":6,"i64":6,"i65":6,"i66":6,"i67":6,"i68":6,"i69":38,"i70":6,"i71":6,"i72":6,"i73":6,"i74":6,"i75":6,"i76":6,"i77":6,"i78":6,"i79":6,"i80":6,"i81":6,"i82":6,"i83":6,"i84":6,"i85":6,"i86":6,"i87":18,"i88":6,"i89":6,"i90":6,"i91":6,"i92":6,"i93":6,"i94":6,"i95":6,"i96":6,"i97":6,"i98":6,"i99":6,"i100":6,"i101":6,"i102":6,"i103":6,"i104":6,"i105":6,"i106":6,"i107":6,"i108":6,"i109":6,"i110":6,"i111":18,"i112":18,"i113":18,"i114":6,"i115":6,"i116":6,"i117":6,"i118":6,"i119":6," i120":6,"i121":6,"i122":6,"i123":6,"i124":6,"i125":6,"i126":6,"i127":6,"i128":6,"i129":6,"i130":6,"i131":6,"i132":6,"i133":6,"i134":6,"i135":6,"i136":6,"i137":38,"i138":6,"i139":6,"i140":38,"i141":6,"i142":6,"i143":6,"i144":6,"i145":6,"i146":6,"i147":6,"i148":6,"i149":6,"i150":6,"i151":18,"i152":18,"i153":6,"i154":6,"i155":6,"i156":6,"i157":6,"i158":6,"i159":6,"i160":6,"i161":6,"i162":6,"i163":6,"i164":6,"i165":6,"i166":6,"i167":6,"i168":6,"i169":6,"i170":6,"i171":6,"i172":6,"i173":6,"i174":6,"i175":6,"i176":6,"i177":6,"i178":6,"i179":6,"i180":6,"i181":6,"i182":6,"i183":6,"i184":6,"i185":18}; +var methods = {"i0":6,"i1":6,"i2":6,"i3":38,"i4":6,"i5":6,"i6":18,"i7":18,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":6,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":6,"i26":6,"i27":6,"i28":6,"i29":6,"i30":6,"i31":6,"i32":50,"i33":50,"i34":50,"i35":6,"i36":6,"i37":6,"i38":50,"i39":6,"i40":38,"i41":6,"i42":6,"i43":6,"i44":6,"i45":6,"i46":6,"i47":6,"i48":6,"i49":6,"i50":6,"i51":38,"i52":38,"i53":6,"i54":6,"i55":18,"i56":6,"i57":6,"i58":6,"i59":38,"i60":38,"i61":6,"i62":6,"i63":18,"i64":6,"i65":6,"i66":6,"i67":38,"i68":38,"i69":6,"i70":6,"i71":6,"i72":6,"i73":38,"i74":6,"i75":6,"i76":6,"i77":6,"i78":6,"i79":6,"i80":6,"i81":6,"i82":6,"i83":6,"i84":6,"i85":6,"i86":6,"i87":6,"i88":6,"i89":6,"i90":6,"i91":18,"i92":6,"i93":38,"i94":38,"i95":38,"i96":6,"i97":6,"i98":6,"i99":6,"i100":6,"i101":6,"i102":6,"i103":6,"i104":6,"i105":6,"i106":6,"i107":6,"i108":6,"i109":6,"i110":6,"i111":6,"i112":6,"i113":6,"i114":6,"i115":18,"i116":18,"i117":18,"i118" :6,"i119":6,"i120":6,"i121":6,"i122":6,"i123":6,"i124":6,"i125":6,"i126":6,"i127":6,"i128":6,"i129":38,"i130":6,"i131":6,"i132":6,"i133":6,"i134":6,"i135":6,"i136":38,"i137":38,"i138":38,"i139":38,"i140":38,"i141":6,"i142":6,"i143":6,"i144":6,"i145":6,"i146":6,"i147":6,"i148":6,"i149":38,"i150":6,"i151":6,"i152":38,"i153":6,"i154":6,"i155":6,"i156":6,"i157":6,"i158":38,"i159":6,"i160":38,"i161":6,"i162":6,"i163":6,"i164":6,"i165":18,"i166":18,"i167":6,"i168":6,"i169":6,"i170":6,"i171":6,"i172":6,"i173":6,"i174":6,"i175":6,"i176":6,"i177":6,"i178":6,"i179":6,"i180":6,"i181":6,"i182":6,"i183":6,"i184":6,"i185":6,"i186":6,"i187":6,"i188":6,"i189":6,"i190":6,"i191":6,"i192":6,"i193":6,"i194":6,"i195":6,"i196":6,"i197":6,"i198":6,"i199":18}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],4:["t3","Abstract Methods"],16:["t5","Default Methods"],32:["t6","Deprecated Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -363,35 +363,76 @@ extends -void +default void createTable(HTableDescriptordesc) -Creates a new table. +Deprecated. +since 2.0 version and will be removed in 3.0 version. + use createTable(TableDescriptor) + -void +default void createTable(HTableDescriptordesc, byte[][]splitKeys) -Creates a new table with an initial set of empty regions defined by the specified split keys. +Deprecated. +since 2.0 version and will be removed in 3.0 version. + use createTable(TableDescriptor, byte[][]) + -void +default void createTable(HTableDescriptordesc, byte[]startKey, byte[]endKey, intnumRegions) -Creates a new table with the specified number of regions. +Deprecated. +since 2.0 version and will be removed in 3.0 version. + use createTable(TableDescriptor, byte[], byte[], int) +
[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b3b50f22/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html index d262744..7464ef7 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html @@ -894,2603 +894,2617 @@ 886// Set master as 'initialized'. 887setInitialized(true); 888 -889status.setStatus("Assign meta replicas"); -890metaBootstrap.assignMetaReplicas(); -891 -892status.setStatus("Starting quota manager"); -893initQuotaManager(); -894if (QuotaUtil.isQuotaEnabled(conf)) { -895 // Create the quota snapshot notifier -896 spaceQuotaSnapshotNotifier = createQuotaSnapshotNotifier(); -897 spaceQuotaSnapshotNotifier.initialize(getClusterConnection()); -898 this.quotaObserverChore = new QuotaObserverChore(this, getMasterMetrics()); -899 // Start the chore to read the region FS space reports and act on them -900 getChoreService().scheduleChore(quotaObserverChore); -901 -902 this.snapshotQuotaChore = new SnapshotQuotaObserverChore(this, getMasterMetrics()); -903 // Start the chore to read snapshots and add their usage to table/NS quotas -904 getChoreService().scheduleChore(snapshotQuotaChore); -905} -906 -907// clear the dead servers with same host name and port of online server because we are not -908// removing dead server with same hostname and port of rs which is trying to check in before -909// master initialization. See HBASE-5916. -910 this.serverManager.clearDeadServersWithSameHostNameAndPortOfOnlineServer(); -911 -912// Check and set the znode ACLs if needed in case we are overtaking a non-secure configuration -913status.setStatus("Checking ZNode ACLs"); -914zooKeeper.checkAndSetZNodeAcls(); -915 -916status.setStatus("Initializing MOB Cleaner"); -917initMobCleaner(); -918 -919status.setStatus("Calling postStartMaster coprocessors"); -920if (this.cpHost != null) { -921 // don't let cp initialization errors kill the master -922 try { -923this.cpHost.postStartMaster(); -924 } catch (IOException ioe) { -925LOG.error("Coprocessor postStartMaster() hook failed", ioe); -926 } -927} -928 -929zombieDetector.interrupt(); -930 } -931 -932 /** -933 * Adds the {@code MasterSpaceQuotaObserver} to the list of configured Master observers to -934 * automatically remove space quotas for a table when that table is deleted. -935 */ -936 @VisibleForTesting -937 public void updateConfigurationForSpaceQuotaObserver(Configuration conf) { -938// We're configured to not delete quotas on table deletion, so we don't need to add the obs. -939if (!conf.getBoolean( -940 MasterSpaceQuotaObserver.REMOVE_QUOTA_ON_TABLE_DELETE, -941 MasterSpaceQuotaObserver.REMOVE_QUOTA_ON_TABLE_DELETE_DEFAULT)) { -942 return; -943} -944String[] masterCoprocs = conf.getStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY); -945final int length = null == masterCoprocs ? 0 : masterCoprocs.length; -946String[] updatedCoprocs = new String[length + 1]; -947if (length 0) { -948 System.arraycopy(masterCoprocs, 0, updatedCoprocs, 0, masterCoprocs.length); -949} -950updatedCoprocs[length] = MasterSpaceQuotaObserver.class.getName(); -951 conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, updatedCoprocs); -952 } -953 -954 private void initMobCleaner() { -955this.expiredMobFileCleanerChore = new ExpiredMobFileCleanerChore(this); -956 getChoreService().scheduleChore(expiredMobFileCleanerChore); -957 -958int mobCompactionPeriod = conf.getInt(MobConstants.MOB_COMPACTION_CHORE_PERIOD, -959 MobConstants.DEFAULT_MOB_COMPACTION_CHORE_PERIOD); -960if (mobCompactionPeriod 0) { -961 this.mobCompactChore = new MobCompactionChore(this, mobCompactionPeriod); -962 getChoreService().scheduleChore(mobCompactChore); -963} else { -964 LOG -965.info("The period is " + mobCompactionPeriod + " seconds, MobCompactionChore is disabled"); -966} -967this.mobCompactThread = new MasterMobCompactionThread(this); -968 } -969 -970 /** -971 * Create a {@link MasterMetaBootstrap} instance. -972 */ -973 MasterMetaBootstrap createMetaBootstrap(final HMaster master, final MonitoredTask status) { -974// We put this out here in a method so can do a Mockito.spy and stub it out -975// w/ a mocked up MasterMetaBootstrap. -976return new MasterMetaBootstrap(master, status); -977 } -978 -979 /** -980 * Create a {@link
[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca9f6925/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncHBaseAdmin.NamespaceProcedureBiConsumer.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncHBaseAdmin.NamespaceProcedureBiConsumer.html b/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncHBaseAdmin.NamespaceProcedureBiConsumer.html deleted file mode 100644 index e4b2f30..000 --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncHBaseAdmin.NamespaceProcedureBiConsumer.html +++ /dev/null @@ -1,175 +0,0 @@ -http://www.w3.org/TR/html4/loose.dtd;> - - - - - -Uses of Class org.apache.hadoop.hbase.client.AsyncHBaseAdmin.NamespaceProcedureBiConsumer (Apache HBase 3.0.0-SNAPSHOT API) - - - - - - - -JavaScript is disabled on your browser. - - - - - -Skip navigation links - - - - -Overview -Package -Class -Use -Tree -Deprecated -Index -Help - - - - -Prev -Next - - -Frames -NoFrames - - -AllClasses - - - - - - - - - - -Uses of Classorg.apache.hadoop.hbase.client.AsyncHBaseAdmin.NamespaceProcedureBiConsumer - - - - - -Packages that use AsyncHBaseAdmin.NamespaceProcedureBiConsumer - -Package -Description - - - -org.apache.hadoop.hbase.client - -Provides HBase Client - - - - - - - - - - -Uses of AsyncHBaseAdmin.NamespaceProcedureBiConsumer in org.apache.hadoop.hbase.client - -Subclasses of AsyncHBaseAdmin.NamespaceProcedureBiConsumer in org.apache.hadoop.hbase.client - -Modifier and Type -Class and Description - - - -private class -AsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer - - -private class -AsyncHBaseAdmin.DeleteNamespaceProcedureBiConsumer - - -private class -AsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer - - - - - - - - - - - - -Skip navigation links - - - - -Overview -Package -Class -Use -Tree -Deprecated -Index -Help - - - - -Prev -Next - - -Frames -NoFrames - - -AllClasses - - - - - - - - - -Copyright 20072017 https://www.apache.org/;>The Apache Software Foundation. All rights reserved. - - http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca9f6925/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncHBaseAdmin.ProcedureBiConsumer.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncHBaseAdmin.ProcedureBiConsumer.html b/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncHBaseAdmin.ProcedureBiConsumer.html deleted file mode 100644 index 1fb4fcd..000 --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/AsyncHBaseAdmin.ProcedureBiConsumer.html +++ /dev/null @@ -1,235 +0,0 @@ -http://www.w3.org/TR/html4/loose.dtd;> - - - - - -Uses of Class org.apache.hadoop.hbase.client.AsyncHBaseAdmin.ProcedureBiConsumer (Apache HBase 3.0.0-SNAPSHOT API) - - - - - - - -JavaScript is disabled on your browser. - - - - - -Skip navigation links - - - - -Overview -Package -Class -Use -Tree -Deprecated -Index -Help - - - - -Prev -Next - - -Frames -NoFrames - - -AllClasses - - - - - - - - - - -Uses of Classorg.apache.hadoop.hbase.client.AsyncHBaseAdmin.ProcedureBiConsumer - - - - - -Packages that use AsyncHBaseAdmin.ProcedureBiConsumer - -Package -Description - - - -org.apache.hadoop.hbase.client - -Provides HBase Client - - - - - - - - - - -Uses of AsyncHBaseAdmin.ProcedureBiConsumer in org.apache.hadoop.hbase.client - -Subclasses of AsyncHBaseAdmin.ProcedureBiConsumer in org.apache.hadoop.hbase.client - -Modifier and Type -Class and Description - - - -private class -AsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer - - -private class -AsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer - - -private class -AsyncHBaseAdmin.CreateTableProcedureBiConsumer - - -private
[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8e3b63ca/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFromRegionLoadFunction.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFromRegionLoadFunction.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFromRegionLoadFunction.html index c895448..545d4da 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFromRegionLoadFunction.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFromRegionLoadFunction.html @@ -1294,425 +1294,426 @@ 1286 } 1287 1288 // We normalize locality to be a score between 0 and 1.0 representing how good it -1289 // is compared to how good it could be -1290 locality /= bestLocality; -1291} -1292 -1293@Override -1294protected void regionMoved(int region, int oldServer, int newServer) { -1295 int oldEntity = type == LocalityType.SERVER ? oldServer : cluster.serverIndexToRackIndex[oldServer]; -1296 int newEntity = type == LocalityType.SERVER ? newServer : cluster.serverIndexToRackIndex[newServer]; -1297 if (this.services == null) { -1298return; -1299 } -1300 double localityDelta = getWeightedLocality(region, newEntity) - getWeightedLocality(region, oldEntity); -1301 double normalizedDelta = localityDelta / bestLocality; -1302 locality += normalizedDelta; -1303} -1304 -1305@Override -1306double cost() { -1307 return 1 - locality; -1308} -1309 -1310private int getMostLocalEntityForRegion(int region) { -1311 return cluster.getOrComputeRegionsToMostLocalEntities(type)[region]; -1312} -1313 -1314private double getWeightedLocality(int region, int entity) { -1315 return cluster.getOrComputeWeightedLocality(region, entity, type); -1316} -1317 -1318 } -1319 -1320 static class ServerLocalityCostFunction extends LocalityBasedCostFunction { -1321 -1322private static final String LOCALITY_COST_KEY = "hbase.master.balancer.stochastic.localityCost"; -1323private static final float DEFAULT_LOCALITY_COST = 25; -1324 -1325 ServerLocalityCostFunction(Configuration conf, MasterServices srv) { -1326 super( -1327 conf, -1328 srv, -1329 LocalityType.SERVER, -1330 LOCALITY_COST_KEY, -1331 DEFAULT_LOCALITY_COST -1332 ); -1333} -1334 -1335@Override -1336int regionIndexToEntityIndex(int region) { -1337 return cluster.regionIndexToServerIndex[region]; -1338} -1339 } -1340 -1341 static class RackLocalityCostFunction extends LocalityBasedCostFunction { -1342 -1343private static final String RACK_LOCALITY_COST_KEY = "hbase.master.balancer.stochastic.rackLocalityCost"; -1344private static final float DEFAULT_RACK_LOCALITY_COST = 15; -1345 -1346public RackLocalityCostFunction(Configuration conf, MasterServices services) { -1347 super( -1348 conf, -1349 services, -1350 LocalityType.RACK, -1351 RACK_LOCALITY_COST_KEY, -1352 DEFAULT_RACK_LOCALITY_COST -1353 ); -1354} -1355 -1356@Override -1357int regionIndexToEntityIndex(int region) { -1358 return cluster.getRackForRegion(region); -1359} -1360 } -1361 -1362 /** -1363 * Base class the allows writing costs functions from rolling average of some -1364 * number from RegionLoad. -1365 */ -1366 abstract static class CostFromRegionLoadFunction extends CostFunction { -1367 -1368private ClusterStatus clusterStatus = null; -1369private MapString, DequeBalancerRegionLoad loads = null; -1370private double[] stats = null; -1371 CostFromRegionLoadFunction(Configuration conf) { -1372 super(conf); -1373} -1374 -1375void setClusterStatus(ClusterStatus status) { -1376 this.clusterStatus = status; -1377} -1378 -1379void setLoads(MapString, DequeBalancerRegionLoad l) { -1380 this.loads = l; -1381} -1382 -1383@Override -1384double cost() { -1385 if (clusterStatus == null || loads == null) { -1386return 0; -1387 } -1388 -1389 if (stats == null || stats.length != cluster.numServers) { -1390stats = new double[cluster.numServers]; -1391 } -1392 -1393 for (int i =0; i stats.length; i++) { -1394//Cost this server has from RegionLoad -1395long cost = 0; -1396 -1397// for every region on this server get the rl -1398for(int regionIndex:cluster.regionsPerServer[i]) { -1399 CollectionBalancerRegionLoad regionLoadList = cluster.regionLoads[regionIndex]; -1400 -1401 // Now if we found a region load get the type of cost that was requested. -1402
[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aecb1286/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html index 01496d6..dc12c09 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html @@ -48,2406 +48,2267 @@ 040 041import io.netty.util.Timeout; 042import io.netty.util.TimerTask; -043import java.util.stream.Stream; -044import org.apache.commons.io.IOUtils; -045import org.apache.commons.logging.Log; -046import org.apache.commons.logging.LogFactory; -047import org.apache.hadoop.hbase.HRegionInfo; -048import org.apache.hadoop.hbase.HRegionLocation; -049import org.apache.hadoop.hbase.MetaTableAccessor; -050import org.apache.hadoop.hbase.MetaTableAccessor.QueryType; -051import org.apache.hadoop.hbase.NotServingRegionException; -052import org.apache.hadoop.hbase.ProcedureInfo; -053import org.apache.hadoop.hbase.RegionLocations; -054import org.apache.hadoop.hbase.ServerName; -055import org.apache.hadoop.hbase.NamespaceDescriptor; -056import org.apache.hadoop.hbase.HConstants; -057import org.apache.hadoop.hbase.TableExistsException; -058import org.apache.hadoop.hbase.TableName; -059import org.apache.hadoop.hbase.AsyncMetaTableAccessor; -060import org.apache.hadoop.hbase.TableNotDisabledException; -061import org.apache.hadoop.hbase.TableNotEnabledException; -062import org.apache.hadoop.hbase.TableNotFoundException; -063import org.apache.hadoop.hbase.UnknownRegionException; -064import org.apache.hadoop.hbase.classification.InterfaceAudience; -065import org.apache.hadoop.hbase.classification.InterfaceStability; -066import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder; -067import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder; -068import org.apache.hadoop.hbase.client.Scan.ReadType; -069import org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper; -070import org.apache.hadoop.hbase.client.replication.TableCFs; -071import org.apache.hadoop.hbase.exceptions.DeserializationException; -072import org.apache.hadoop.hbase.ipc.HBaseRpcController; -073import org.apache.hadoop.hbase.quotas.QuotaFilter; -074import org.apache.hadoop.hbase.quotas.QuotaSettings; -075import org.apache.hadoop.hbase.quotas.QuotaTableUtil; -076import org.apache.hadoop.hbase.replication.ReplicationException; -077import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; -078import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; -079import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback; -080import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -081import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; -082import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; -083import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; -084import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; -085import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; -086import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse; -087import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; -088import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse; -089import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest; -090import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse; -091import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest; -092import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse; -093import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription; -094import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema; -095import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest; -096import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse; -097import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest; -098import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse; -099import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest; -100import
[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a719cd00/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RandomCandidateGenerator.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RandomCandidateGenerator.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RandomCandidateGenerator.html index 6de986f..c895448 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RandomCandidateGenerator.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RandomCandidateGenerator.html @@ -26,1592 +26,1693 @@ 018package org.apache.hadoop.hbase.master.balancer; 019 020import java.util.ArrayDeque; -021import java.util.Arrays; -022import java.util.Collection; -023import java.util.Deque; -024import java.util.HashMap; -025import java.util.LinkedList; -026import java.util.List; -027import java.util.Map; -028import java.util.Map.Entry; -029import java.util.Random; -030 -031import org.apache.commons.logging.Log; -032import org.apache.commons.logging.LogFactory; -033import org.apache.hadoop.conf.Configuration; -034import org.apache.hadoop.hbase.ClusterStatus; -035import org.apache.hadoop.hbase.HBaseInterfaceAudience; -036import org.apache.hadoop.hbase.HConstants; -037import org.apache.hadoop.hbase.HRegionInfo; -038import org.apache.hadoop.hbase.RegionLoad; -039import org.apache.hadoop.hbase.ServerLoad; -040import org.apache.hadoop.hbase.ServerName; -041import org.apache.hadoop.hbase.TableName; -042import org.apache.hadoop.hbase.classification.InterfaceAudience; -043import org.apache.hadoop.hbase.master.MasterServices; -044import org.apache.hadoop.hbase.master.RegionPlan; -045import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action; -046import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type; -047import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.AssignRegionAction; -048import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction; -049import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction; -050import org.apache.hadoop.hbase.util.Bytes; -051import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -052 -053import com.google.common.collect.Lists; -054 -055/** -056 * pThis is a best effort load balancer. Given a Cost function F(C) =gt; x It will -057 * randomly try and mutate the cluster to Cprime. If F(Cprime) lt; F(C) then the -058 * new cluster state becomes the plan. It includes costs functions to compute the cost of:/p -059 * ul -060 * liRegion Load/li -061 * liTable Load/li -062 * liData Locality/li -063 * liMemstore Sizes/li -064 * liStorefile Sizes/li -065 * /ul -066 * -067 * -068 * pEvery cost function returns a number between 0 and 1 inclusive; where 0 is the lowest cost -069 * best solution, and 1 is the highest possible cost and the worst solution. The computed costs are -070 * scaled by their respective multipliers:/p +021import java.util.ArrayList; +022import java.util.Arrays; +023import java.util.Collection; +024import java.util.Collections; +025import java.util.Deque; +026import java.util.HashMap; +027import java.util.LinkedList; +028import java.util.List; +029import java.util.Map; +030import java.util.Map.Entry; +031import java.util.Random; +032 +033import org.apache.commons.logging.Log; +034import org.apache.commons.logging.LogFactory; +035import org.apache.hadoop.conf.Configuration; +036import org.apache.hadoop.hbase.ClusterStatus; +037import org.apache.hadoop.hbase.HBaseInterfaceAudience; +038import org.apache.hadoop.hbase.HConstants; +039import org.apache.hadoop.hbase.HRegionInfo; +040import org.apache.hadoop.hbase.RegionLoad; +041import org.apache.hadoop.hbase.ServerLoad; +042import org.apache.hadoop.hbase.ServerName; +043import org.apache.hadoop.hbase.TableName; +044import org.apache.hadoop.hbase.classification.InterfaceAudience; +045import org.apache.hadoop.hbase.master.MasterServices; +046import org.apache.hadoop.hbase.master.RegionPlan; +047import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action; +048import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type; +049import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.AssignRegionAction; +050import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType; +051import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction; +052import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction; +053import org.apache.hadoop.hbase.util.Bytes; +054import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +055 +056import com.google.common.base.Optional; +057import
[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/src-html/org/apache/hadoop/hbase/quotas/SnapshotQuotaObserverChore.StoreFileReference.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/quotas/SnapshotQuotaObserverChore.StoreFileReference.html b/devapidocs/src-html/org/apache/hadoop/hbase/quotas/SnapshotQuotaObserverChore.StoreFileReference.html new file mode 100644 index 000..b7ca7d9 --- /dev/null +++ b/devapidocs/src-html/org/apache/hadoop/hbase/quotas/SnapshotQuotaObserverChore.StoreFileReference.html @@ -0,0 +1,615 @@ +http://www.w3.org/TR/html4/loose.dtd;> + + +Source code + + + + +001/* +002 * Licensed to the Apache Software Foundation (ASF) under one or more +003 * contributor license agreements. See the NOTICE file distributed with +004 * this work for additional information regarding copyright ownership. +005 * The ASF licenses this file to you under the Apache License, Version 2.0 +006 * (the "License"); you may not use this file except in compliance with +007 * the License. You may obtain a copy of the License at +008 * +009 * http://www.apache.org/licenses/LICENSE-2.0 +010 * +011 * Unless required by applicable law or agreed to in writing, software +012 * distributed under the License is distributed on an "AS IS" BASIS, +013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +014 * See the License for the specific language governing permissions and +015 * limitations under the License. +016 */ +017package org.apache.hadoop.hbase.quotas; +018 +019import java.io.IOException; +020import java.util.ArrayList; +021import java.util.Arrays; +022import java.util.Collection; +023import java.util.Collections; +024import java.util.HashSet; +025import java.util.List; +026import java.util.Map; +027import java.util.Map.Entry; +028import java.util.Objects; +029import java.util.Set; +030import java.util.concurrent.TimeUnit; +031import java.util.function.Predicate; +032import java.util.stream.Collectors; +033 +034import org.apache.commons.lang.builder.HashCodeBuilder; +035import org.apache.commons.logging.Log; +036import org.apache.commons.logging.LogFactory; +037import org.apache.hadoop.conf.Configuration; +038import org.apache.hadoop.fs.FileStatus; +039import org.apache.hadoop.fs.FileSystem; +040import org.apache.hadoop.fs.Path; +041import org.apache.hadoop.hbase.HRegionInfo; +042import org.apache.hadoop.hbase.ScheduledChore; +043import org.apache.hadoop.hbase.Stoppable; +044import org.apache.hadoop.hbase.TableName; +045import org.apache.hadoop.hbase.classification.InterfaceAudience; +046import org.apache.hadoop.hbase.client.Admin; +047import org.apache.hadoop.hbase.client.Connection; +048import org.apache.hadoop.hbase.client.Table; +049import org.apache.hadoop.hbase.master.HMaster; +050import org.apache.hadoop.hbase.master.MetricsMaster; +051import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; +052import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; +053import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.FamilyFiles; +054import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest.StoreFile; +055import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; +056import org.apache.hadoop.hbase.snapshot.SnapshotManifest; +057import org.apache.hadoop.hbase.util.FSUtils; +058import org.apache.hadoop.hbase.util.HFileArchiveUtil; +059import org.apache.hadoop.util.StringUtils; +060 +061import com.google.common.collect.HashMultimap; +062import com.google.common.collect.Multimap; +063 +064/** +065 * A Master-invoked {@code Chore} that computes the size of each snapshot which was created from +066 * a table which has a space quota. +067 */ +068@InterfaceAudience.Private +069public class SnapshotQuotaObserverChore extends ScheduledChore { +070 private static final Log LOG = LogFactory.getLog(SnapshotQuotaObserverChore.class); +071 static final String SNAPSHOT_QUOTA_CHORE_PERIOD_KEY = +072 "hbase.master.quotas.snapshot.chore.period"; +073 static final int SNAPSHOT_QUOTA_CHORE_PERIOD_DEFAULT = 1000 * 60 * 5; // 5 minutes in millis +074 +075 static final String SNAPSHOT_QUOTA_CHORE_DELAY_KEY = +076 "hbase.master.quotas.snapshot.chore.delay"; +077 static final long SNAPSHOT_QUOTA_CHORE_DELAY_DEFAULT = 1000L * 60L; // 1 minute in millis +078 +079 static final String SNAPSHOT_QUOTA_CHORE_TIMEUNIT_KEY = +080 "hbase.master.quotas.snapshot.chore.timeunit"; +081 static final String SNAPSHOT_QUOTA_CHORE_TIMEUNIT_DEFAULT = TimeUnit.MILLISECONDS.name(); +082 +083 private final Connection conn; +084 private final Configuration conf; +085 private final MetricsMaster metrics; +086 private final FileSystem fs; +087 +088 public SnapshotQuotaObserverChore(HMaster
[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/77a552c4/devapidocs/org/apache/hadoop/hbase/client/HTableWrapper.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/HTableWrapper.html b/devapidocs/org/apache/hadoop/hbase/client/HTableWrapper.html index b33bdbd..09eba46 100644 --- a/devapidocs/org/apache/hadoop/hbase/client/HTableWrapper.html +++ b/devapidocs/org/apache/hadoop/hbase/client/HTableWrapper.html @@ -50,7 +50,7 @@ var activeTableTab = "activeTableTab"; PrevClass -NextClass +NextClass Frames @@ -1816,7 +1816,7 @@ publicint PrevClass -NextClass +NextClass Frames http://git-wip-us.apache.org/repos/asf/hbase-site/blob/77a552c4/devapidocs/org/apache/hadoop/hbase/client/ImmutableHColumnDescriptor.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/ImmutableHColumnDescriptor.html b/devapidocs/org/apache/hadoop/hbase/client/ImmutableHColumnDescriptor.html new file mode 100644 index 000..c800c26 --- /dev/null +++ b/devapidocs/org/apache/hadoop/hbase/client/ImmutableHColumnDescriptor.html @@ -0,0 +1,339 @@ +http://www.w3.org/TR/html4/loose.dtd;> + + + + + +ImmutableHColumnDescriptor (Apache HBase 3.0.0-SNAPSHOT API) + + + + + +var methods = {"i0":42}; +var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"],32:["t6","Deprecated Methods"]}; +var altColor = "altColor"; +var rowColor = "rowColor"; +var tableTab = "tableTab"; +var activeTableTab = "activeTableTab"; + + +JavaScript is disabled on your browser. + + + + + +Skip navigation links + + + + +Overview +Package +Class +Use +Tree +Deprecated +Index +Help + + + + +PrevClass +NextClass + + +Frames +NoFrames + + +AllClasses + + + + + + + +Summary: +Nested| +Field| +Constr| +Method + + +Detail: +Field| +Constr| +Method + + + + + + + + +org.apache.hadoop.hbase.client +Class ImmutableHColumnDescriptor + + + +http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">java.lang.Object + + +org.apache.hadoop.hbase.HColumnDescriptor + + +org.apache.hadoop.hbase.client.ImmutableHColumnDescriptor + + + + + + + + + +All Implemented Interfaces: +http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true; title="class or interface in java.lang">ComparableHColumnDescriptor, ColumnFamilyDescriptor + + +Deprecated. + +http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true; title="class or interface in java.lang">@Deprecated + @InterfaceAudience.Private +public class ImmutableHColumnDescriptor +extends HColumnDescriptor +Read-only column descriptor. + + + + + + + + + + + +Field Summary + + + + +Fields inherited from classorg.apache.hadoop.hbase.HColumnDescriptor +BLOCKCACHE, BLOCKSIZE, BLOOMFILTER, CACHE_BLOOMS_ON_WRITE, CACHE_DATA_IN_L1, CACHE_DATA_ON_WRITE, CACHE_INDEX_ON_WRITE, COMPRESS_TAGS, COMPRESSION, COMPRESSION_COMPACT, DATA_BLOCK_ENCODING, DEFAULT_BLOCKCACHE, DEFAULT_BLOCKSIZE, DEFAULT_BLOOMFILTER, DEFAULT_CACHE_BLOOMS_ON_WRITE, DEFAULT_CACHE_DATA_IN_L1, DEFAULT_CACHE_DATA_ON_WRITE, DEFAULT_CACHE_INDEX_ON_WRITE, DEFAULT_COMPRESS_TAGS, DEFAULT_COMPRESSION, DEFAULT_DATA_BLOCK_ENCODING, DEFAULT_DFS_REPLICATION, DEFAULT_ENCODE_ON_DISK, DEFAULT_EVICT_BLOCKS_ON_CLOSE, DEFAULT_IN_MEMORY, DEFAULT_KEEP_DELETED, DEFAULT_MIN_VERSIONS, DEFAULT_MOB_COMPACT_PARTITION_POLICY, DEFAULT_MOB_THRESHOLD, DEFAULT_PREFETCH_BLOCKS_ON_OPEN, DEFAULT_REPLICATION_SCOPE, DEFAULT_TTL, DEFAULT_VERSIONS, delegatee, DFS_REPLICATION, ENCODE_ON_DISK, ENCRYPTION, ENCRYPTION_KEY, EVICT_BLOCKS_ON_CLOSE, FOREVER, IN_MEMORY_COMPACTION, IS_MOB, IS_MOB_B YTES, KEEP_DELETED_CELLS, LENGTH, MIN_VERSIONS, MOB_COMPACT_PARTITION_POLICY, MOB_COMPACT_PARTITION_POLICY_BYTES, MOB_THRESHOLD, MOB_THRESHOLD_BYTES, PREFETCH_BLOCKS_ON_OPEN, REPLICATION_SCOPE, REPLICATION_SCOPE_BYTES, STORAGE_POLICY, TTL + + + + + +Fields inherited from interfaceorg.apache.hadoop.hbase.client.ColumnFamilyDescriptor +COMPARATOR + + + + + + + + +Constructor Summary + +Constructors + +Constructor and Description + + +ImmutableHColumnDescriptor(ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptordesc) +Deprecated. + + + +ImmutableHColumnDescriptor(HColumnDescriptordesc) +Deprecated. + + + + + + + + + + +Method Summary + +All MethodsInstance MethodsConcrete MethodsDeprecated Methods + +Modifier and
[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b44796ef/devapidocs/org/apache/hadoop/hbase/backup/impl/BackupCommands.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/backup/impl/BackupCommands.html b/devapidocs/org/apache/hadoop/hbase/backup/impl/BackupCommands.html index 1e51efa..503160a 100644 --- a/devapidocs/org/apache/hadoop/hbase/backup/impl/BackupCommands.html +++ b/devapidocs/org/apache/hadoop/hbase/backup/impl/BackupCommands.html @@ -4,7 +4,7 @@ -BackupCommands (Apache HBase 2.0.0-SNAPSHOT API) +BackupCommands (Apache HBase 3.0.0-SNAPSHOT API) @@ -12,7 +12,7 @@ -var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10}; +var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -119,7 +119,7 @@ var activeTableTab = "activeTableTab"; @InterfaceAudience.Private -public class MasterRpcServices +public class MasterRpcServices extends RSRpcServices implements org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService.BlockingInterface, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService.BlockingInterface, org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService.BlockingInterface Implements the master RPC services. @@ -306,38 +306,43 @@ implements org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.Master org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequestrequest) +org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse +dispatchMergingRegions(org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcControllercontroller, + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequestrequest) + + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse drainRegionServers(org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcControllercontroller, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequestrequest) - + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse enableCatalogJanitor(org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcControllerc, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequestreq) - + org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse enableReplicationPeer(org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcControllercontroller, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequestrequest) - + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse enableTable(org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcControllercontroller, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequestrequest) - + org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse execMasterService(org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcControllercontroller,
[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dab57116/devapidocs/org/apache/hadoop/hbase/ipc/class-use/NettyRpcServer.MessageDecoder.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/NettyRpcServer.MessageDecoder.html b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/NettyRpcServer.MessageDecoder.html deleted file mode 100644 index 10a0873..000 --- a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/NettyRpcServer.MessageDecoder.html +++ /dev/null @@ -1,125 +0,0 @@ -http://www.w3.org/TR/html4/loose.dtd;> - - - - - -Uses of Class org.apache.hadoop.hbase.ipc.NettyRpcServer.MessageDecoder (Apache HBase 2.0.0-SNAPSHOT API) - - - - - - - -JavaScript is disabled on your browser. - - - - - -Skip navigation links - - - - -Overview -Package -Class -Use -Tree -Deprecated -Index -Help - - - - -Prev -Next - - -Frames -NoFrames - - -AllClasses - - - - - - - - - - -Uses of Classorg.apache.hadoop.hbase.ipc.NettyRpcServer.MessageDecoder - -No usage of org.apache.hadoop.hbase.ipc.NettyRpcServer.MessageDecoder - - - - -Skip navigation links - - - - -Overview -Package -Class -Use -Tree -Deprecated -Index -Help - - - - -Prev -Next - - -Frames -NoFrames - - -AllClasses - - - - - - - - - -Copyright 20072017 https://www.apache.org/;>The Apache Software Foundation. All rights reserved. - - http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dab57116/devapidocs/org/apache/hadoop/hbase/ipc/class-use/NettyRpcServer.MessageEncoder.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/NettyRpcServer.MessageEncoder.html b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/NettyRpcServer.MessageEncoder.html deleted file mode 100644 index 7b4a91d..000 --- a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/NettyRpcServer.MessageEncoder.html +++ /dev/null @@ -1,125 +0,0 @@ -http://www.w3.org/TR/html4/loose.dtd;> - - - - - -Uses of Class org.apache.hadoop.hbase.ipc.NettyRpcServer.MessageEncoder (Apache HBase 2.0.0-SNAPSHOT API) - - - - - - - -JavaScript is disabled on your browser. - - - - - -Skip navigation links - - - - -Overview -Package -Class -Use -Tree -Deprecated -Index -Help - - - - -Prev -Next - - -Frames -NoFrames - - -AllClasses - - - - - - - - - - -Uses of Classorg.apache.hadoop.hbase.ipc.NettyRpcServer.MessageEncoder - -No usage of org.apache.hadoop.hbase.ipc.NettyRpcServer.MessageEncoder - - - - -Skip navigation links - - - - -Overview -Package -Class -Use -Tree -Deprecated -Index -Help - - - - -Prev -Next - - -Frames -NoFrames - - -AllClasses - - - - - - - - - -Copyright 20072017 https://www.apache.org/;>The Apache Software Foundation. All rights reserved. - - http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dab57116/devapidocs/org/apache/hadoop/hbase/ipc/class-use/NettyRpcServer.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/NettyRpcServer.html b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/NettyRpcServer.html index 7c8bddc..6c61471 100644 --- a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/NettyRpcServer.html +++ b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/NettyRpcServer.html @@ -97,6 +97,19 @@ Uses of NettyRpcServer in org.apache.hadoop.hbase.ipc + +Fields in org.apache.hadoop.hbase.ipc declared as NettyRpcServer + +Modifier and Type +Field and Description + + + +private NettyRpcServer
[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKUtil.ZKUtilOp.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKUtil.ZKUtilOp.html b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKUtil.ZKUtilOp.html index bc14b2e..547dec5 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKUtil.ZKUtilOp.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ZKUtil.ZKUtilOp.html @@ -139,2012 +139,2013 @@ 131int retry = conf.getInt("zookeeper.recovery.retry", 3); 132int retryIntervalMillis = 133 conf.getInt("zookeeper.recovery.retry.intervalmill", 1000); -134zkDumpConnectionTimeOut = conf.getInt("zookeeper.dump.connection.timeout", -1351000); -136return new RecoverableZooKeeper(ensemble, timeout, watcher, -137retry, retryIntervalMillis, identifier); -138 } -139 -140 /** -141 * Log in the current zookeeper server process using the given configuration -142 * keys for the credential file and login principal. -143 * -144 * pstrongThis is only applicable when running on secure hbase/strong -145 * On regular HBase (without security features), this will safely be ignored. -146 * /p -147 * -148 * @param conf The configuration data to use -149 * @param keytabFileKey Property key used to configure the path to the credential file -150 * @param userNameKey Property key used to configure the login principal -151 * @param hostname Current hostname to use in any credentials -152 * @throws IOException underlying exception from SecurityUtil.login() call -153 */ -154 public static void loginServer(Configuration conf, String keytabFileKey, -155 String userNameKey, String hostname) throws IOException { -156login(conf, keytabFileKey, userNameKey, hostname, -157 ZooKeeperSaslServer.LOGIN_CONTEXT_NAME_KEY, -158 JaasConfiguration.SERVER_KEYTAB_KERBEROS_CONFIG_NAME); -159 } -160 -161 /** -162 * Log in the current zookeeper client using the given configuration -163 * keys for the credential file and login principal. -164 * -165 * pstrongThis is only applicable when running on secure hbase/strong -166 * On regular HBase (without security features), this will safely be ignored. -167 * /p -168 * -169 * @param conf The configuration data to use -170 * @param keytabFileKey Property key used to configure the path to the credential file -171 * @param userNameKey Property key used to configure the login principal -172 * @param hostname Current hostname to use in any credentials -173 * @throws IOException underlying exception from SecurityUtil.login() call -174 */ -175 public static void loginClient(Configuration conf, String keytabFileKey, -176 String userNameKey, String hostname) throws IOException { -177login(conf, keytabFileKey, userNameKey, hostname, -178 ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY, -179 JaasConfiguration.CLIENT_KEYTAB_KERBEROS_CONFIG_NAME); -180 } -181 -182 /** -183 * Log in the current process using the given configuration keys for the -184 * credential file and login principal. -185 * -186 * pstrongThis is only applicable when running on secure hbase/strong -187 * On regular HBase (without security features), this will safely be ignored. -188 * /p -189 * -190 * @param conf The configuration data to use -191 * @param keytabFileKey Property key used to configure the path to the credential file -192 * @param userNameKey Property key used to configure the login principal -193 * @param hostname Current hostname to use in any credentials -194 * @param loginContextProperty property name to expose the entry name -195 * @param loginContextName jaas entry name -196 * @throws IOException underlying exception from SecurityUtil.login() call -197 */ -198 private static void login(Configuration conf, String keytabFileKey, -199 String userNameKey, String hostname, -200 String loginContextProperty, String loginContextName) -201 throws IOException { -202if (!isSecureZooKeeper(conf)) -203 return; -204 -205// User has specified a jaas.conf, keep this one as the good one. -206// HBASE_OPTS="-Djava.security.auth.login.config=jaas.conf" -207if (System.getProperty("java.security.auth.login.config") != null) -208 return; -209 -210// No keytab specified, no auth -211String keytabFilename = conf.get(keytabFileKey); -212if (keytabFilename == null) { -213 LOG.warn("no keytab specified for: " + keytabFileKey); -214 return; -215} -216 -217String principalConfig = conf.get(userNameKey, System.getProperty("user.name")); -218String principalName = SecurityUtil.getServerPrincipal(principalConfig, hostname); -219 -220// Initialize the "jaas.conf" for
[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b4cf63f/devapidocs/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.html b/devapidocs/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.html index 9c7a5ad..3b4be74 100644 --- a/devapidocs/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.html +++ b/devapidocs/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.html @@ -214,11 +214,11 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? (package private) static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">Listorg.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest -loadRegionManifests(org.apache.hadoop.conf.Configurationconf, +loadRegionManifests(org.apache.hadoop.conf.Configurationconf, http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Executor.html?is-external=true; title="class or interface in java.util.concurrent">Executorexecutor, org.apache.hadoop.fs.FileSystemfs, org.apache.hadoop.fs.PathsnapshotDir, - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescriptiondesc, + org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescriptiondesc, intmanifestSizeLimit) @@ -303,7 +303,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? Method Detail - + @@ -313,7 +313,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Executor.html?is-external=true; title="class or interface in java.util.concurrent">Executorexecutor, org.apache.hadoop.fs.FileSystemfs, org.apache.hadoop.fs.PathsnapshotDir, - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescriptiondesc, + org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescriptiondesc, intmanifestSizeLimit) throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b4cf63f/devapidocs/org/apache/hadoop/hbase/snapshot/SnapshotReferenceUtil.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/snapshot/SnapshotReferenceUtil.html b/devapidocs/org/apache/hadoop/hbase/snapshot/SnapshotReferenceUtil.html index 0c18a04..3e138ba 100644 --- a/devapidocs/org/apache/hadoop/hbase/snapshot/SnapshotReferenceUtil.html +++ b/devapidocs/org/apache/hadoop/hbase/snapshot/SnapshotReferenceUtil.html @@ -218,19 +218,19 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? private static http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true; title="class or interface in java.util">Sethttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String -getHFileNames(org.apache.hadoop.conf.Configurationconf, +getHFileNames(org.apache.hadoop.conf.Configurationconf, org.apache.hadoop.fs.FileSystemfs, org.apache.hadoop.fs.PathsnapshotDir, - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescriptionsnapshotDesc) + org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescriptionsnapshotDesc) Returns the store file names in the snapshot. static void -verifySnapshot(org.apache.hadoop.conf.Configurationconf, +verifySnapshot(org.apache.hadoop.conf.Configurationconf, org.apache.hadoop.fs.FileSystemfs, org.apache.hadoop.fs.PathsnapshotDir, - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescriptionsnapshotDesc) +
[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f55ebeaa/devapidocs/src-html/org/apache/hadoop/hbase/ipc/ServerRpcConnection.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/ServerRpcConnection.html b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/ServerRpcConnection.html new file mode 100644 index 000..50c4422 --- /dev/null +++ b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/ServerRpcConnection.html @@ -0,0 +1,924 @@ +http://www.w3.org/TR/html4/loose.dtd;> + + +Source code + + + + +001/** +002 * Licensed to the Apache Software Foundation (ASF) under one +003 * or more contributor license agreements. See the NOTICE file +004 * distributed with this work for additional information +005 * regarding copyright ownership. The ASF licenses this file +006 * to you under the Apache License, Version 2.0 (the +007 * "License"); you may not use this file except in compliance +008 * with the License. You may obtain a copy of the License at +009 * +010 * http://www.apache.org/licenses/LICENSE-2.0 +011 * +012 * Unless required by applicable law or agreed to in writing, software +013 * distributed under the License is distributed on an "AS IS" BASIS, +014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +015 * See the License for the specific language governing permissions and +016 * limitations under the License. +017 */ +018package org.apache.hadoop.hbase.ipc; +019 +020import java.io.ByteArrayInputStream; +021import java.io.ByteArrayOutputStream; +022import java.io.Closeable; +023import java.io.DataOutputStream; +024import java.io.IOException; +025import java.net.InetAddress; +026import java.net.InetSocketAddress; +027import java.nio.ByteBuffer; +028import java.nio.channels.Channels; +029import java.nio.channels.ReadableByteChannel; +030import java.security.GeneralSecurityException; +031import java.security.PrivilegedExceptionAction; +032import java.util.Properties; +033 +034import javax.security.sasl.Sasl; +035import javax.security.sasl.SaslException; +036import javax.security.sasl.SaslServer; +037 +038import org.apache.commons.crypto.cipher.CryptoCipherFactory; +039import org.apache.commons.crypto.random.CryptoRandom; +040import org.apache.commons.crypto.random.CryptoRandomFactory; +041import org.apache.hadoop.hbase.CellScanner; +042import org.apache.hadoop.hbase.DoNotRetryIOException; +043import org.apache.hadoop.hbase.client.VersionInfoUtil; +044import org.apache.hadoop.hbase.codec.Codec; +045import org.apache.hadoop.hbase.io.ByteBufferOutputStream; +046import org.apache.hadoop.hbase.io.crypto.aes.CryptoAES; +047import org.apache.hadoop.hbase.ipc.RpcServer.CallCleanup; +048import org.apache.hadoop.hbase.nio.ByteBuff; +049import org.apache.hadoop.hbase.nio.SingleByteBuff; +050import org.apache.hadoop.hbase.security.AccessDeniedException; +051import org.apache.hadoop.hbase.security.AuthMethod; +052import org.apache.hadoop.hbase.security.HBaseSaslRpcServer; +053import org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslDigestCallbackHandler; +054import org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslGssCallbackHandler; +055import org.apache.hadoop.hbase.security.SaslStatus; +056import org.apache.hadoop.hbase.security.SaslUtil; +057import org.apache.hadoop.hbase.security.User; +058import org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService; +059import org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteInput; +060import org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString; +061import org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream; +062import org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor; +063import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message; +064import org.apache.hadoop.hbase.shaded.com.google.protobuf.TextFormat; +065import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations; +066import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +067import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.VersionInfo; +068import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos; +069import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ConnectionHeader; +070import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; +071import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation; +072import org.apache.hadoop.hbase.util.Bytes; +073import org.apache.hadoop.io.BytesWritable; +074import org.apache.hadoop.io.Writable; +075import org.apache.hadoop.io.WritableUtils; +076import org.apache.hadoop.io.compress.CompressionCodec; +077import org.apache.hadoop.security.UserGroupInformation; +078import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; +079import org.apache.hadoop.security.authorize.AuthorizationException;
[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8e0a5167/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.html index 7e37ca0..79c65e3 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.html @@ -70,1527 +70,1525 @@ 062import com.google.common.collect.Sets; 063 064/** -065 * The base class for load balancers. It provides the the functions used to by -066 * {@link org.apache.hadoop.hbase.master.AssignmentManager} to assign regions -067 * in the edge cases. It doesn't provide an implementation of the -068 * actual balancing algorithm. -069 * -070 */ -071public abstract class BaseLoadBalancer implements LoadBalancer { -072 protected static final int MIN_SERVER_BALANCE = 2; -073 private volatile boolean stopped = false; +065 * The base class for load balancers. It provides functions used by +066 * {@link org.apache.hadoop.hbase.master.AssignmentManager} to assign regions in the edge cases. +067 * It doesn't provide an implementation of the actual balancing algorithm. +068 */ +069public abstract class BaseLoadBalancer implements LoadBalancer { +070 protected static final int MIN_SERVER_BALANCE = 2; +071 private volatile boolean stopped = false; +072 +073 private static final ListHRegionInfo EMPTY_REGION_LIST = new ArrayList(0); 074 -075 private static final ListHRegionInfo EMPTY_REGION_LIST = new ArrayList(0); -076 -077 static final PredicateServerLoad IDLE_SERVER_PREDICATOR -078= load - load.getNumberOfRegions() == 0; +075 static final PredicateServerLoad IDLE_SERVER_PREDICATOR +076= load - load.getNumberOfRegions() == 0; +077 +078 protected final RegionLocationFinder regionFinder = new RegionLocationFinder(); 079 -080 protected final RegionLocationFinder regionFinder = new RegionLocationFinder(); -081 -082 private static class DefaultRackManager extends RackManager { -083@Override -084public String getRack(ServerName server) { -085 return UNKNOWN_RACK; -086} -087 } -088 -089 /** -090 * The constructor that uses the basic MetricsBalancer -091 */ -092 protected BaseLoadBalancer() { -093metricsBalancer = new MetricsBalancer(); -094 } -095 -096 /** -097 * This Constructor accepts an instance of MetricsBalancer, -098 * which will be used instead of creating a new one -099 */ -100 protected BaseLoadBalancer(MetricsBalancer metricsBalancer) { -101this.metricsBalancer = (metricsBalancer != null) ? metricsBalancer : new MetricsBalancer(); -102 } -103 -104 /** -105 * An efficient array based implementation similar to ClusterState for keeping -106 * the status of the cluster in terms of region assignment and distribution. -107 * LoadBalancers, such as StochasticLoadBalancer uses this Cluster object because of -108 * hundreds of thousands of hashmap manipulations are very costly, which is why this -109 * class uses mostly indexes and arrays. -110 * -111 * Cluster tracks a list of unassigned regions, region assignments, and the server -112 * topology in terms of server names, hostnames and racks. -113 */ -114 protected static class Cluster { -115ServerName[] servers; -116String[] hosts; // ServerName uniquely identifies a region server. multiple RS can run on the same host -117String[] racks; -118boolean multiServersPerHost = false; // whether or not any host has more than one server -119 -120ArrayListString tables; -121HRegionInfo[] regions; -122DequeBalancerRegionLoad[] regionLoads; -123private RegionLocationFinder regionFinder; +080 private static class DefaultRackManager extends RackManager { +081@Override +082public String getRack(ServerName server) { +083 return UNKNOWN_RACK; +084} +085 } +086 +087 /** +088 * The constructor that uses the basic MetricsBalancer +089 */ +090 protected BaseLoadBalancer() { +091metricsBalancer = new MetricsBalancer(); +092 } +093 +094 /** +095 * This Constructor accepts an instance of MetricsBalancer, +096 * which will be used instead of creating a new one +097 */ +098 protected BaseLoadBalancer(MetricsBalancer metricsBalancer) { +099this.metricsBalancer = (metricsBalancer != null) ? metricsBalancer : new MetricsBalancer(); +100 } +101 +102 /** +103 * An efficient array based implementation similar to ClusterState for keeping +104 * the status of the cluster in terms of region assignment and distribution. +105 * LoadBalancers, such as StochasticLoadBalancer uses this Cluster object because of +106 * hundreds of thousands
[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/709b8fcc/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html index f2c44db..6cf2fc8 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html @@ -2581,7 +2581,7 @@ 2573try { 2574 // Restore snapshot 2575 get( -2576 internalRestoreSnapshotAsync(snapshotName, tableName, false), +2576 internalRestoreSnapshotAsync(snapshotName, tableName), 2577syncWaitTimeout, 2578TimeUnit.MILLISECONDS); 2579} catch (IOException e) { @@ -2590,7 +2590,7 @@ 2582 if (takeFailSafeSnapshot) { 2583try { 2584 get( -2585 internalRestoreSnapshotAsync(failSafeSnapshotSnapshotName, tableName, false), +2585 internalRestoreSnapshotAsync(failSafeSnapshotSnapshotName, tableName), 2586syncWaitTimeout, 2587TimeUnit.MILLISECONDS); 2588 String msg = "Restore snapshot=" + snapshotName + @@ -2633,7 +2633,7 @@ 2625 throw new TableNotDisabledException(tableName); 2626} 2627 -2628return internalRestoreSnapshotAsync(snapshotName, tableName, false); +2628return internalRestoreSnapshotAsync(snapshotName, tableName); 2629 } 2630 2631 @Override @@ -2643,1621 +2643,1614 @@ 2635 } 2636 2637 @Override -2638 public void cloneSnapshot(String snapshotName, TableName tableName, boolean restoreAcl) +2638 public void cloneSnapshot(final String snapshotName, final TableName tableName) 2639 throws IOException, TableExistsException, RestoreSnapshotException { 2640if (tableExists(tableName)) { 2641 throw new TableExistsException(tableName); 2642} 2643get( -2644 internalRestoreSnapshotAsync(snapshotName, tableName, restoreAcl), +2644 internalRestoreSnapshotAsync(snapshotName, tableName), 2645 Integer.MAX_VALUE, 2646 TimeUnit.MILLISECONDS); 2647 } 2648 2649 @Override -2650 public void cloneSnapshot(final String snapshotName, final TableName tableName) -2651 throws IOException, TableExistsException, RestoreSnapshotException { -2652cloneSnapshot(snapshotName, tableName, false); -2653 } -2654 -2655 @Override -2656 public FutureVoid cloneSnapshotAsync(final String snapshotName, final TableName tableName) -2657 throws IOException, TableExistsException { -2658if (tableExists(tableName)) { -2659 throw new TableExistsException(tableName); -2660} -2661return internalRestoreSnapshotAsync(snapshotName, tableName, false); -2662 } -2663 -2664 @Override -2665 public byte[] execProcedureWithRet(String signature, String instance, MapString, String props) -2666 throws IOException { -2667ProcedureDescription desc = ProtobufUtil.buildProcedureDescription(signature, instance, props); -2668final ExecProcedureRequest request = -2669 ExecProcedureRequest.newBuilder().setProcedure(desc).build(); -2670// run the procedure on the master -2671ExecProcedureResponse response = executeCallable( -2672 new MasterCallableExecProcedureResponse(getConnection(), getRpcControllerFactory()) { -2673@Override -2674protected ExecProcedureResponse rpcCall() throws Exception { -2675 return master.execProcedureWithRet(getRpcController(), request); -2676} -2677 }); -2678 -2679return response.hasReturnData() ? response.getReturnData().toByteArray() : null; -2680 } -2681 -2682 @Override -2683 public void execProcedure(String signature, String instance, MapString, String props) -2684 throws IOException { -2685ProcedureDescription desc = ProtobufUtil.buildProcedureDescription(signature, instance, props); -2686final ExecProcedureRequest request = -2687 ExecProcedureRequest.newBuilder().setProcedure(desc).build(); -2688// run the procedure on the master -2689ExecProcedureResponse response = executeCallable(new MasterCallableExecProcedureResponse( -2690getConnection(), getRpcControllerFactory()) { -2691 @Override -2692 protected ExecProcedureResponse rpcCall() throws Exception { -2693return master.execProcedure(getRpcController(), request); -2694 } -2695}); -2696 -2697long start = EnvironmentEdgeManager.currentTime(); -2698long max = response.getExpectedTimeout(); -2699long maxPauseTime = max / this.numRetries; -2700int tries = 0; -2701LOG.debug("Waiting a max of " + max + " ms for procedure '" +
[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html index e9107e6..1f0030b 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html @@ -69,15 +69,15 @@ 061 requiredArguments = { 062@org.jamon.annotations.Argument(name = "master", type = "HMaster")}, 063 optionalArguments = { -064@org.jamon.annotations.Argument(name = "serverManager", type = "ServerManager"), -065@org.jamon.annotations.Argument(name = "metaLocation", type = "ServerName"), -066@org.jamon.annotations.Argument(name = "deadServers", type = "SetServerName"), +064@org.jamon.annotations.Argument(name = "catalogJanitorEnabled", type = "boolean"), +065@org.jamon.annotations.Argument(name = "filter", type = "String"), +066@org.jamon.annotations.Argument(name = "serverManager", type = "ServerManager"), 067@org.jamon.annotations.Argument(name = "assignmentManager", type = "AssignmentManager"), -068@org.jamon.annotations.Argument(name = "frags", type = "MapString,Integer"), +068@org.jamon.annotations.Argument(name = "deadServers", type = "SetServerName"), 069@org.jamon.annotations.Argument(name = "servers", type = "ListServerName"), -070@org.jamon.annotations.Argument(name = "catalogJanitorEnabled", type = "boolean"), -071@org.jamon.annotations.Argument(name = "filter", type = "String"), -072@org.jamon.annotations.Argument(name = "format", type = "String")}) +070@org.jamon.annotations.Argument(name = "format", type = "String"), +071@org.jamon.annotations.Argument(name = "frags", type = "MapString,Integer"), +072@org.jamon.annotations.Argument(name = "metaLocation", type = "ServerName")}) 073public class MasterStatusTmpl 074 extends org.jamon.AbstractTemplateProxy 075{ @@ -118,57 +118,57 @@ 110 return m_master; 111} 112private HMaster m_master; -113// 28, 1 -114public void setServerManager(ServerManager serverManager) +113// 25, 1 +114public void setCatalogJanitorEnabled(boolean catalogJanitorEnabled) 115{ -116 // 28, 1 -117 m_serverManager = serverManager; -118 m_serverManager__IsNotDefault = true; +116 // 25, 1 +117 m_catalogJanitorEnabled = catalogJanitorEnabled; +118 m_catalogJanitorEnabled__IsNotDefault = true; 119} -120public ServerManager getServerManager() +120public boolean getCatalogJanitorEnabled() 121{ -122 return m_serverManager; +122 return m_catalogJanitorEnabled; 123} -124private ServerManager m_serverManager; -125public boolean getServerManager__IsNotDefault() +124private boolean m_catalogJanitorEnabled; +125public boolean getCatalogJanitorEnabled__IsNotDefault() 126{ -127 return m_serverManager__IsNotDefault; +127 return m_catalogJanitorEnabled__IsNotDefault; 128} -129private boolean m_serverManager__IsNotDefault; -130// 22, 1 -131public void setMetaLocation(ServerName metaLocation) +129private boolean m_catalogJanitorEnabled__IsNotDefault; +130// 26, 1 +131public void setFilter(String filter) 132{ -133 // 22, 1 -134 m_metaLocation = metaLocation; -135 m_metaLocation__IsNotDefault = true; +133 // 26, 1 +134 m_filter = filter; +135 m_filter__IsNotDefault = true; 136} -137public ServerName getMetaLocation() +137public String getFilter() 138{ -139 return m_metaLocation; +139 return m_filter; 140} -141private ServerName m_metaLocation; -142public boolean getMetaLocation__IsNotDefault() +141private String m_filter; +142public boolean getFilter__IsNotDefault() 143{ -144 return m_metaLocation__IsNotDefault; +144 return m_filter__IsNotDefault; 145} -146private boolean m_metaLocation__IsNotDefault; -147// 24, 1 -148public void setDeadServers(SetServerName deadServers) +146private boolean m_filter__IsNotDefault; +147// 28, 1 +148public void setServerManager(ServerManager serverManager) 149{ -150 // 24, 1 -151 m_deadServers = deadServers; -152 m_deadServers__IsNotDefault = true; +150 // 28, 1 +151 m_serverManager = serverManager; +152 m_serverManager__IsNotDefault = true; 153} -154public SetServerName getDeadServers() +154public ServerManager getServerManager() 155{ -156 return m_deadServers; +156 return m_serverManager; 157} -158private SetServerName m_deadServers; -159
[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dd7176bf/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.EnableTableFuture.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.EnableTableFuture.html b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.EnableTableFuture.html index 8b22aa1..f2c44db 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.EnableTableFuture.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.EnableTableFuture.html @@ -100,4135 +100,4164 @@ 092import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; 093import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos; 094import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; -095import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; -096import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; -097import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; -098import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; -099import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest; -100import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse; -101import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest; -102import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse; -103import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest; -104import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest; -105import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; -106import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest; -107import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse; -108import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; -109import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair; -110import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription; -111import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; -112import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema; -113import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos; -114import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos; -115import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest; -116import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse; -117import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest; -118import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse; -119import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest; -120import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest; -121import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse; -122import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest; -123import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse; -124import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest; -125import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse; -126import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest; -127import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse; -128import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest; -129import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest; -130import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse; -131import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest; -132import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse; -133import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest; -134import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest; -135import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse; -136import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest; -137import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse; -138import
[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7ef4c5a9/devapidocs/src-html/org/apache/hadoop/hbase/ipc/ServerCall.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/ServerCall.html b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/ServerCall.html new file mode 100644 index 000..6ad1e1b --- /dev/null +++ b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/ServerCall.html @@ -0,0 +1,599 @@ +http://www.w3.org/TR/html4/loose.dtd;> + + +Source code + + + + +001/** +002 * Licensed to the Apache Software Foundation (ASF) under one +003 * or more contributor license agreements. See the NOTICE file +004 * distributed with this work for additional information +005 * regarding copyright ownership. The ASF licenses this file +006 * to you under the Apache License, Version 2.0 (the +007 * "License"); you may not use this file except in compliance +008 * with the License. You may obtain a copy of the License at +009 * +010 * http://www.apache.org/licenses/LICENSE-2.0 +011 * +012 * Unless required by applicable law or agreed to in writing, software +013 * distributed under the License is distributed on an "AS IS" BASIS, +014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +015 * See the License for the specific language governing permissions and +016 * limitations under the License. +017 */ +018package org.apache.hadoop.hbase.ipc; +019 +020import java.io.IOException; +021import java.net.InetAddress; +022import java.nio.ByteBuffer; +023import java.util.ArrayList; +024import java.util.List; +025 +026import org.apache.hadoop.hbase.CellScanner; +027import org.apache.hadoop.hbase.DoNotRetryIOException; +028import org.apache.hadoop.hbase.classification.InterfaceAudience; +029import org.apache.hadoop.hbase.exceptions.RegionMovedException; +030import org.apache.hadoop.hbase.io.ByteBufferListOutputStream; +031import org.apache.hadoop.hbase.io.ByteBufferPool; +032import org.apache.hadoop.hbase.ipc.RpcServer.CallCleanup; +033import org.apache.hadoop.hbase.ipc.RpcServer.Connection; +034import org.apache.hadoop.hbase.security.User; +035import org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService; +036import org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream; +037import org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor; +038import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message; +039import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +040import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.VersionInfo; +041import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.CellBlockMeta; +042import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ExceptionResponse; +043import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; +044import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ResponseHeader; +045import org.apache.hadoop.hbase.util.ByteBufferUtils; +046import org.apache.hadoop.hbase.util.Bytes; +047import org.apache.hadoop.util.StringUtils; +048import org.apache.htrace.TraceInfo; +049 +050/** +051 * Datastructure that holds all necessary to a method invocation and then afterward, carries +052 * the result. +053 */ +054@InterfaceAudience.Private +055abstract class ServerCall implements RpcCall { +056 +057 protected final int id; // the client's call id +058 protected final BlockingService service; +059 protected final MethodDescriptor md; +060 protected final RequestHeader header; +061 protected Message param; // the parameter passed +062 // Optional cell data passed outside of protobufs. +063 protected final CellScanner cellScanner; +064 protected final Connection connection; // connection to client +065 protected final long receiveTime; // the time received when response is null +066 // the time served when response is not null +067 protected final int timeout; +068 protected long startTime; +069 protected final long deadline;// the deadline to handle this call, if exceed we can drop it. +070 +071 protected final ByteBufferPool reservoir; +072 +073 protected final CellBlockBuilder cellBlockBuilder; +074 +075 /** +076 * Chain of buffers to send as response. +077 */ +078 protected BufferChain response; +079 +080 protected final long size; // size of current call +081 protected boolean isError; +082 protected final TraceInfo tinfo; +083 protected ByteBufferListOutputStream cellBlockStream = null; +084 protected CallCleanup reqCleanup = null; +085 +086 protected User user; +087 protected final InetAddress remoteAddress; +088 protected RpcCallback rpcCallback; +089 +090 private long responseCellSize = 0; +091 private long responseBlockSize = 0; +092 //
[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/31df4674/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html index f3f7a46..8750fa2 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html @@ -56,2015 +56,2125 @@ 048import org.apache.hadoop.hbase.MetaTableAccessor; 049import org.apache.hadoop.hbase.MetaTableAccessor.QueryType; 050import org.apache.hadoop.hbase.NotServingRegionException; -051import org.apache.hadoop.hbase.RegionLocations; -052import org.apache.hadoop.hbase.ServerName; -053import org.apache.hadoop.hbase.NamespaceDescriptor; -054import org.apache.hadoop.hbase.HConstants; -055import org.apache.hadoop.hbase.TableExistsException; -056import org.apache.hadoop.hbase.TableName; -057import org.apache.hadoop.hbase.AsyncMetaTableAccessor; -058import org.apache.hadoop.hbase.TableNotDisabledException; -059import org.apache.hadoop.hbase.TableNotFoundException; -060import org.apache.hadoop.hbase.UnknownRegionException; -061import org.apache.hadoop.hbase.classification.InterfaceAudience; -062import org.apache.hadoop.hbase.classification.InterfaceStability; -063import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder; -064import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder; -065import org.apache.hadoop.hbase.client.Scan.ReadType; -066import org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper; -067import org.apache.hadoop.hbase.client.replication.TableCFs; -068import org.apache.hadoop.hbase.exceptions.DeserializationException; -069import org.apache.hadoop.hbase.ipc.HBaseRpcController; -070import org.apache.hadoop.hbase.quotas.QuotaFilter; -071import org.apache.hadoop.hbase.quotas.QuotaSettings; -072import org.apache.hadoop.hbase.quotas.QuotaTableUtil; -073import org.apache.hadoop.hbase.replication.ReplicationException; -074import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; -075import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; -076import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback; -077import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -078import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; -079import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; -080import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; -081import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; -082import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest; -083import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse; -084import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; -085import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema; -086import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest; -087import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse; -088import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest; -089import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse; -090import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest; -091import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse; -092import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest; -093import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse; -094import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest; -095import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse; -096import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest; -097import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse; -098import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest; -099import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse; -100import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest; -101import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse; -102import
[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6f2e75f2/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.NamespaceProcedureBiConsumer.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.NamespaceProcedureBiConsumer.html b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.NamespaceProcedureBiConsumer.html index 6c52543..f3f7a46 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.NamespaceProcedureBiConsumer.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.NamespaceProcedureBiConsumer.html @@ -31,1797 +31,2040 @@ 023import java.util.ArrayList; 024import java.util.Arrays; 025import java.util.Collection; -026import java.util.HashMap; -027import java.util.LinkedList; -028import java.util.List; -029import java.util.Map; -030import java.util.Optional; -031import java.util.concurrent.CompletableFuture; -032import java.util.concurrent.TimeUnit; -033import java.util.concurrent.atomic.AtomicReference; -034import java.util.function.BiConsumer; -035import java.util.regex.Pattern; -036import java.util.stream.Collectors; -037 -038import com.google.common.annotations.VisibleForTesting; -039 -040import io.netty.util.Timeout; -041import io.netty.util.TimerTask; -042import org.apache.commons.logging.Log; -043import org.apache.commons.logging.LogFactory; -044import org.apache.hadoop.hbase.HColumnDescriptor; -045import org.apache.hadoop.hbase.HRegionInfo; -046import org.apache.hadoop.hbase.HRegionLocation; -047import org.apache.hadoop.hbase.MetaTableAccessor; -048import org.apache.hadoop.hbase.MetaTableAccessor.QueryType; -049import org.apache.hadoop.hbase.NotServingRegionException; -050import org.apache.hadoop.hbase.RegionLocations; -051import org.apache.hadoop.hbase.ServerName; -052import org.apache.hadoop.hbase.NamespaceDescriptor; -053import org.apache.hadoop.hbase.HConstants; -054import org.apache.hadoop.hbase.TableExistsException; -055import org.apache.hadoop.hbase.TableName; -056import org.apache.hadoop.hbase.AsyncMetaTableAccessor; -057import org.apache.hadoop.hbase.TableNotFoundException; -058import org.apache.hadoop.hbase.UnknownRegionException; -059import org.apache.hadoop.hbase.classification.InterfaceAudience; -060import org.apache.hadoop.hbase.classification.InterfaceStability; -061import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder; -062import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder; -063import org.apache.hadoop.hbase.client.Scan.ReadType; -064import org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper; -065import org.apache.hadoop.hbase.client.replication.TableCFs; -066import org.apache.hadoop.hbase.exceptions.DeserializationException; -067import org.apache.hadoop.hbase.ipc.HBaseRpcController; -068import org.apache.hadoop.hbase.quotas.QuotaFilter; -069import org.apache.hadoop.hbase.quotas.QuotaSettings; -070import org.apache.hadoop.hbase.quotas.QuotaTableUtil; -071import org.apache.hadoop.hbase.replication.ReplicationException; -072import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; -073import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; -074import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback; -075import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -076import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; -077import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; -078import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; -079import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; -080import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest; -081import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse; -082import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; -083import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema; -084import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest; -085import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse; -086import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest; -087import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse; -088import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest; -089import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse; -090import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest; -091import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse; -092import
[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/efd0601e/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html b/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html index 518b752..ba71fe3 100644 --- a/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html +++ b/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html @@ -105,9 +105,12 @@ var activeTableTab = "activeTableTab"; -@InterfaceAudience.Public -public interface AsyncAdmin -The asynchronous administrative API for HBase. +@InterfaceAudience.Private +public interface AsyncAdmin +The asynchronous administrative API for HBase. + + This feature is still under development, so marked as IA.Private. Will change to public when + done. Use it with caution. @@ -206,20 +209,20 @@ public interface http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true; title="class or interface in java.lang">Void -createTable(HTableDescriptordesc) +createTable(TableDescriptordesc) Creates a new table. http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true; title="class or interface in java.lang">Void -createTable(HTableDescriptordesc, +createTable(TableDescriptordesc, byte[][]splitKeys) Creates a new table with an initial set of empty regions defined by the specified split keys. http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true; title="class or interface in java.lang">Void -createTable(HTableDescriptordesc, +createTable(TableDescriptordesc, byte[]startKey, byte[]endKey, intnumRegions) @@ -246,13 +249,13 @@ public interface -http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFutureHTableDescriptor[] +http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFutureTableDescriptor[] deleteTables(http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true; title="class or interface in java.util.regex">Patternpattern) Delete tables matching the passed in pattern and wait on completion. -http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFutureHTableDescriptor[] +http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFutureTableDescriptor[] deleteTables(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringregex) Deletes tables matching the passed in pattern and wait on completion. @@ -270,13 +273,13 @@ public interface -http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFutureHTableDescriptor[] +http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFutureTableDescriptor[] disableTables(http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true; title="class or interface in java.util.regex">Patternpattern) Disable tables matching the passed in pattern. -http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFutureHTableDescriptor[] +http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFutureTableDescriptor[] disableTables(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringregex) Disable tables matching the passed in pattern. @@ -294,13 +297,13 @@ public interface -http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in
[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/10601a30/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer.html b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer.html index be839b7..72853dd 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer.html @@ -45,1639 +45,1784 @@ 037 038import com.google.common.annotations.VisibleForTesting; 039 -040import org.apache.commons.logging.Log; -041import org.apache.commons.logging.LogFactory; -042import org.apache.hadoop.hbase.HColumnDescriptor; -043import org.apache.hadoop.hbase.HRegionInfo; -044import org.apache.hadoop.hbase.HRegionLocation; -045import org.apache.hadoop.hbase.HTableDescriptor; -046import org.apache.hadoop.hbase.MetaTableAccessor; -047import org.apache.hadoop.hbase.MetaTableAccessor.QueryType; -048import org.apache.hadoop.hbase.NotServingRegionException; -049import org.apache.hadoop.hbase.RegionLocations; -050import org.apache.hadoop.hbase.ServerName; -051import org.apache.hadoop.hbase.NamespaceDescriptor; -052import org.apache.hadoop.hbase.HConstants; -053import org.apache.hadoop.hbase.TableName; -054import org.apache.hadoop.hbase.AsyncMetaTableAccessor; -055import org.apache.hadoop.hbase.TableNotFoundException; -056import org.apache.hadoop.hbase.UnknownRegionException; -057import org.apache.hadoop.hbase.classification.InterfaceAudience; -058import org.apache.hadoop.hbase.classification.InterfaceStability; -059import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder; -060import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder; -061import org.apache.hadoop.hbase.client.Scan.ReadType; -062import org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper; -063import org.apache.hadoop.hbase.client.replication.TableCFs; -064import org.apache.hadoop.hbase.exceptions.DeserializationException; -065import org.apache.hadoop.hbase.ipc.HBaseRpcController; -066import org.apache.hadoop.hbase.quotas.QuotaFilter; -067import org.apache.hadoop.hbase.quotas.QuotaSettings; -068import org.apache.hadoop.hbase.quotas.QuotaTableUtil; -069import org.apache.hadoop.hbase.replication.ReplicationException; -070import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; -071import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; -072import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback; -073import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -074import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; -075import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; -076import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; -077import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; -078import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest; -079import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse; -080import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema; -081import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest; -082import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse; -083import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest; -084import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse; -085import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest; -086import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse; -087import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest; -088import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse; -089import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest; -090import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse; -091import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest; -092import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse; -093import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest; -094import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse; -095import
[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/662ea7dc/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.TableOperator.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.TableOperator.html b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.TableOperator.html index ac4a9b3..be839b7 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.TableOperator.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.TableOperator.html @@ -30,212 +30,212 @@ 022import java.io.IOException; 023import java.util.ArrayList; 024import java.util.Arrays; -025import java.util.LinkedList; -026import java.util.List; -027import java.util.Optional; -028import java.util.concurrent.CompletableFuture; -029import java.util.concurrent.TimeUnit; -030import java.util.concurrent.atomic.AtomicReference; -031import java.util.function.BiConsumer; -032import java.util.regex.Pattern; -033 -034import com.google.common.annotations.VisibleForTesting; -035import org.apache.commons.logging.Log; -036import org.apache.commons.logging.LogFactory; -037import org.apache.hadoop.hbase.HColumnDescriptor; -038import org.apache.hadoop.hbase.HRegionInfo; -039import org.apache.hadoop.hbase.HRegionLocation; -040import org.apache.hadoop.hbase.HTableDescriptor; -041import org.apache.hadoop.hbase.MetaTableAccessor; -042import org.apache.hadoop.hbase.MetaTableAccessor.QueryType; -043import org.apache.hadoop.hbase.NotServingRegionException; -044import org.apache.hadoop.hbase.RegionLocations; -045import org.apache.hadoop.hbase.ServerName; -046import org.apache.hadoop.hbase.NamespaceDescriptor; -047import org.apache.hadoop.hbase.HConstants; -048import org.apache.hadoop.hbase.TableName; -049import org.apache.hadoop.hbase.AsyncMetaTableAccessor; -050import org.apache.hadoop.hbase.TableNotFoundException; -051import org.apache.hadoop.hbase.UnknownRegionException; -052import org.apache.hadoop.hbase.classification.InterfaceAudience; -053import org.apache.hadoop.hbase.classification.InterfaceStability; -054import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder; -055import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder; -056import org.apache.hadoop.hbase.client.Scan.ReadType; -057import org.apache.hadoop.hbase.exceptions.DeserializationException; -058import org.apache.hadoop.hbase.ipc.HBaseRpcController; -059import org.apache.hadoop.hbase.quotas.QuotaFilter; -060import org.apache.hadoop.hbase.quotas.QuotaSettings; -061import org.apache.hadoop.hbase.quotas.QuotaTableUtil; -062import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback; -063import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -064import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; -065import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; -066import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; -067import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; -068import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest; -069import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse; -070import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema; -071import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest; -072import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse; -073import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest; -074import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse; -075import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest; -076import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse; -077import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest; -078import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse; -079import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest; -080import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse; -081import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest; -082import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse; -083import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest; -084import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse; -085import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest; -086import
[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b4bae59/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFile.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFile.html b/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFile.html index ee03734..7ff5c4d 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFile.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/StoreFile.html @@ -18,7 +18,7 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":9,"i17":10,"i18":9,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":9,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10}; +var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":9,"i15":10,"i16":9,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":9,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10}; var tabs = {65535:["t0","All Methods"],1:["t1","Static Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab"; @InterfaceAudience.LimitedPrivate(value="Coprocessor") -public class StoreFile +public class StoreFile extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object A Store data file. Stores usually have one or more of these files. They are produced by flushing the memstore to disk. To @@ -118,7 +118,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? and append data. Be sure to add any metadata before calling close on the Writer (Use the appendMetadata convenience methods). On close, a StoreFile is sitting in the Filesystem. To refer to it, create a StoreFile instance - passing filesystem and path. To read, call createReader(). + passing filesystem and path. To read, call initReader() StoreFiles may also reference store files in another Store. The reason for this weird pattern where you use a different instance for the @@ -189,9 +189,17 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? -private http://docs.oracle.com/javase/8/docs/api/java/util/Comparator.html?is-external=true; title="class or interface in java.util">Comparator +private boolean +compactedAway + + +private http://docs.oracle.com/javase/8/docs/api/java/util/Comparator.html?is-external=true; title="class or interface in java.util">ComparatorCell comparator + +private static boolean +DEFAULT_STORE_FILE_READER_NO_READAHEAD + static byte[] DELETE_FAMILY_COUNT @@ -273,19 +281,35 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? +private boolean +noReadahead + + +private boolean +primaryReplica + + private StoreFileReader reader +private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true; title="class or interface in java.util.concurrent.atomic">AtomicInteger +refCount + + private long sequenceid - + static byte[] SKIP_RESET_SEQ_ID Key for skipping resetting sequence id in metadata. + +static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String +STORE_FILE_READER_NO_READAHEAD + static byte[] TIMERANGE_KEY @@ -312,21 +336,46 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? org.apache.hadoop.conf.Configurationconf, CacheConfigcacheConf, BloomTypecfBloomType) -Constructor, loads a reader and it's indices, etc. +Deprecated. +Now we will specific whether the StoreFile is for primary replica when + constructing, so please use + StoreFile(FileSystem, Path, Configuration, CacheConfig, BloomType, boolean) + directly. + +StoreFile(org.apache.hadoop.fs.FileSystemfs, + org.apache.hadoop.fs.Pathp, + org.apache.hadoop.conf.Configurationconf, + CacheConfigcacheConf, + BloomTypecfBloomType, + booleanprimaryReplica) +Constructor, loads a reader and it's indices, etc. + + + StoreFile(org.apache.hadoop.fs.FileSystemfs, StoreFileInfofileInfo, org.apache.hadoop.conf.Configurationconf, CacheConfigcacheConf, BloomTypecfBloomType) -Constructor, loads a reader and it's indices, etc. +Deprecated. +Now we will specific whether the StoreFile is for primary replica when + constructing, so please use + StoreFile(FileSystem, StoreFileInfo, Configuration,
[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2fcc2ae0/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemstoreFlusher.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemstoreFlusher.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemstoreFlusher.html index 1934610..a2eb716 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemstoreFlusher.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemstoreFlusher.html @@ -1480,7 +1480,7 @@ 1472 startServiceThreads(); 1473 startHeapMemoryManager(); 1474 // Call it after starting HeapMemoryManager. -1475 initializeMemStoreChunkCreator(); +1475 initializeMemStoreChunkPool(); 1476 LOG.info("Serving as " + this.serverName + 1477", RpcServer on " + rpcServices.isa + 1478", sessionid=0x" + @@ -1500,7 +1500,7 @@ 1492} 1493 } 1494 -1495 protected void initializeMemStoreChunkCreator() { +1495 private void initializeMemStoreChunkPool() { 1496if (MemStoreLAB.isEnabled(conf)) { 1497 // MSLAB is enabled. So initialize MemStoreChunkPool 1498 // By this time, the MemstoreFlusher is already initialized. We can get the global limits from @@ -1514,2158 +1514,2162 @@ 1506 float initialCountPercentage = conf.getFloat(MemStoreLAB.CHUNK_POOL_INITIALSIZE_KEY, 1507 MemStoreLAB.POOL_INITIAL_SIZE_DEFAULT); 1508 int chunkSize = conf.getInt(MemStoreLAB.CHUNK_SIZE_KEY, MemStoreLAB.CHUNK_SIZE_DEFAULT); -1509 // init the chunkCreator -1510 ChunkCreator chunkCreator = -1511 ChunkCreator.initialize(chunkSize, offheap, globalMemStoreSize, poolSizePercentage, -1512initialCountPercentage, this.hMemManager); -1513} -1514 } -1515 -1516 private void startHeapMemoryManager() { -1517this.hMemManager = HeapMemoryManager.create(this.conf, this.cacheFlusher, this, -1518this.regionServerAccounting); -1519if (this.hMemManager != null) { -1520 this.hMemManager.start(getChoreService()); -1521} -1522 } -1523 -1524 private void createMyEphemeralNode() throws KeeperException, IOException { -1525RegionServerInfo.Builder rsInfo = RegionServerInfo.newBuilder(); -1526rsInfo.setInfoPort(infoServer != null ? infoServer.getPort() : -1); -1527 rsInfo.setVersionInfo(ProtobufUtil.getVersionInfo()); -1528byte[] data = ProtobufUtil.prependPBMagic(rsInfo.build().toByteArray()); -1529 ZKUtil.createEphemeralNodeAndWatch(this.zooKeeper, -1530 getMyEphemeralNodePath(), data); -1531 } -1532 -1533 private void deleteMyEphemeralNode() throws KeeperException { -1534ZKUtil.deleteNode(this.zooKeeper, getMyEphemeralNodePath()); -1535 } -1536 -1537 @Override -1538 public RegionServerAccounting getRegionServerAccounting() { -1539return regionServerAccounting; -1540 } -1541 -1542 /* -1543 * @param r Region to get RegionLoad for. -1544 * @param regionLoadBldr the RegionLoad.Builder, can be null -1545 * @param regionSpecifier the RegionSpecifier.Builder, can be null -1546 * @return RegionLoad instance. -1547 * -1548 * @throws IOException -1549 */ -1550 RegionLoad createRegionLoad(final Region r, RegionLoad.Builder regionLoadBldr, -1551 RegionSpecifier.Builder regionSpecifier) throws IOException { -1552byte[] name = r.getRegionInfo().getRegionName(); -1553int stores = 0; -1554int storefiles = 0; -1555int storeUncompressedSizeMB = 0; -1556int storefileSizeMB = 0; -1557int memstoreSizeMB = (int) (r.getMemstoreSize() / 1024 / 1024); -1558int storefileIndexSizeMB = 0; -1559int rootIndexSizeKB = 0; -1560int totalStaticIndexSizeKB = 0; -1561int totalStaticBloomSizeKB = 0; -1562long totalCompactingKVs = 0; -1563long currentCompactedKVs = 0; -1564ListStore storeList = r.getStores(); -1565stores += storeList.size(); -1566for (Store store : storeList) { -1567 storefiles += store.getStorefilesCount(); -1568 storeUncompressedSizeMB += (int) (store.getStoreSizeUncompressed() / 1024 / 1024); -1569 storefileSizeMB += (int) (store.getStorefilesSize() / 1024 / 1024); -1570 storefileIndexSizeMB += (int) (store.getStorefilesIndexSize() / 1024 / 1024); -1571 CompactionProgress progress = store.getCompactionProgress(); -1572 if (progress != null) { -1573totalCompactingKVs += progress.totalCompactingKVs; -1574currentCompactedKVs += progress.currentCompactedKVs; -1575 } -1576 rootIndexSizeKB += (int) (store.getStorefilesIndexSize() / 1024); -1577 totalStaticIndexSizeKB += (int) (store.getTotalStaticIndexSize() / 1024); -1578 totalStaticBloomSizeKB += (int) (store.getTotalStaticBloomSize() / 1024); -1579
[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e4348f53/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html index 4c688fb..e763538 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html @@ -25,455 +25,471 @@ 017 */ 018package org.apache.hadoop.hbase.client; 019 -020import java.util.concurrent.CompletableFuture; -021import java.util.regex.Pattern; -022 -023import org.apache.hadoop.hbase.HColumnDescriptor; -024import org.apache.hadoop.hbase.HRegionInfo; -025import org.apache.hadoop.hbase.HTableDescriptor; -026import org.apache.hadoop.hbase.ServerName; -027import org.apache.hadoop.hbase.NamespaceDescriptor; -028import org.apache.hadoop.hbase.TableName; -029import org.apache.hadoop.hbase.classification.InterfaceAudience; -030import org.apache.hadoop.hbase.util.Pair; -031 -032/** -033 * The asynchronous administrative API for HBase. -034 */ -035@InterfaceAudience.Public -036public interface AsyncAdmin { -037 -038 /** -039 * @return Async Connection used by this object. -040 */ -041 AsyncConnectionImpl getConnection(); -042 -043 /** -044 * @param tableName Table to check. -045 * @return True if table exists already. The return value will be wrapped by a -046 * {@link CompletableFuture}. -047 */ -048 CompletableFutureBoolean tableExists(final TableName tableName); -049 -050 /** -051 * List all the userspace tables. -052 * @return - returns an array of HTableDescriptors wrapped by a {@link CompletableFuture}. -053 * @see #listTables(Pattern, boolean) -054 */ -055 CompletableFutureHTableDescriptor[] listTables(); -056 -057 /** -058 * List all the tables matching the given pattern. -059 * @param regex The regular expression to match against -060 * @param includeSysTables False to match only against userspace tables -061 * @return - returns an array of HTableDescriptors wrapped by a {@link CompletableFuture}. -062 * @see #listTables(Pattern, boolean) -063 */ -064 CompletableFutureHTableDescriptor[] listTables(String regex, boolean includeSysTables); -065 -066 /** -067 * List all the tables matching the given pattern. -068 * @param pattern The compiled regular expression to match against -069 * @param includeSysTables False to match only against userspace tables -070 * @return - returns an array of HTableDescriptors wrapped by a {@link CompletableFuture}. -071 */ -072 CompletableFutureHTableDescriptor[] listTables(Pattern pattern, boolean includeSysTables); -073 -074 /** -075 * List all of the names of userspace tables. -076 * @return TableName[] an array of table names wrapped by a {@link CompletableFuture}. -077 * @see #listTableNames(Pattern, boolean) -078 */ -079 CompletableFutureTableName[] listTableNames(); -080 -081 /** -082 * List all of the names of userspace tables. -083 * @param regex The regular expression to match against -084 * @param includeSysTables False to match only against userspace tables -085 * @return TableName[] an array of table names wrapped by a {@link CompletableFuture}. -086 * @see #listTableNames(Pattern, boolean) -087 */ -088 CompletableFutureTableName[] listTableNames(final String regex, final boolean includeSysTables); -089 -090 /** -091 * List all of the names of userspace tables. -092 * @param pattern The regular expression to match against -093 * @param includeSysTables False to match only against userspace tables -094 * @return TableName[] an array of table names wrapped by a {@link CompletableFuture}. -095 */ -096 CompletableFutureTableName[] listTableNames(final Pattern pattern, -097 final boolean includeSysTables); -098 -099 /** -100 * Method for getting the tableDescriptor -101 * @param tableName as a {@link TableName} -102 * @return the tableDescriptor wrapped by a {@link CompletableFuture}. -103 */ -104 CompletableFutureHTableDescriptor getTableDescriptor(final TableName tableName); -105 -106 /** -107 * Creates a new table. -108 * @param desc table descriptor for table -109 */ -110 CompletableFutureVoid createTable(HTableDescriptor desc); -111 -112 /** -113 * Creates a new table with the specified number of regions. The start key specified will become -114 * the end key of the first region of the table, and the end key specified will become the start -115 * key of the last region of the table (the first region has a null start key and the last region -116 * has a null end key). BigInteger math will be used to divide the key range specified into enough -117 * segments to make the required number of total regions. -118 * @param desc table descriptor for
[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e57d1b63/apidocs/org/apache/hadoop/hbase/class-use/TableNotDisabledException.html -- diff --git a/apidocs/org/apache/hadoop/hbase/class-use/TableNotDisabledException.html b/apidocs/org/apache/hadoop/hbase/class-use/TableNotDisabledException.html deleted file mode 100644 index ca655f7..000 --- a/apidocs/org/apache/hadoop/hbase/class-use/TableNotDisabledException.html +++ /dev/null @@ -1,125 +0,0 @@ -http://www.w3.org/TR/html4/loose.dtd;> - - - - - -Uses of Class org.apache.hadoop.hbase.TableNotDisabledException (Apache HBase 2.0.0-SNAPSHOT API) - - - - - - - -JavaScript is disabled on your browser. - - - - - -Skip navigation links - - - - -Overview -Package -Class -Use -Tree -Deprecated -Index -Help - - - - -Prev -Next - - -Frames -NoFrames - - -AllClasses - - - - - - - - - - -Uses of Classorg.apache.hadoop.hbase.TableNotDisabledException - -No usage of org.apache.hadoop.hbase.TableNotDisabledException - - - - -Skip navigation links - - - - -Overview -Package -Class -Use -Tree -Deprecated -Index -Help - - - - -Prev -Next - - -Frames -NoFrames - - -AllClasses - - - - - - - - - -Copyright 20072017 https://www.apache.org/;>The Apache Software Foundation. All rights reserved. - - http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e57d1b63/apidocs/org/apache/hadoop/hbase/class-use/TableNotEnabledException.html -- diff --git a/apidocs/org/apache/hadoop/hbase/class-use/TableNotEnabledException.html b/apidocs/org/apache/hadoop/hbase/class-use/TableNotEnabledException.html deleted file mode 100644 index 791e909..000 --- a/apidocs/org/apache/hadoop/hbase/class-use/TableNotEnabledException.html +++ /dev/null @@ -1,125 +0,0 @@ -http://www.w3.org/TR/html4/loose.dtd;> - - - - - -Uses of Class org.apache.hadoop.hbase.TableNotEnabledException (Apache HBase 2.0.0-SNAPSHOT API) - - - - - - - -JavaScript is disabled on your browser. - - - - - -Skip navigation links - - - - -Overview -Package -Class -Use -Tree -Deprecated -Index -Help - - - - -Prev -Next - - -Frames -NoFrames - - -AllClasses - - - - - - - - - - -Uses of Classorg.apache.hadoop.hbase.TableNotEnabledException - -No usage of org.apache.hadoop.hbase.TableNotEnabledException - - - - -Skip navigation links - - - - -Overview -Package -Class -Use -Tree -Deprecated -Index -Help - - - - -Prev -Next - - -Frames -NoFrames - - -AllClasses - - - - - - - - - -Copyright 20072017 https://www.apache.org/;>The Apache Software Foundation. All rights reserved. - - http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e57d1b63/apidocs/org/apache/hadoop/hbase/class-use/TableNotFoundException.html -- diff --git a/apidocs/org/apache/hadoop/hbase/class-use/TableNotFoundException.html b/apidocs/org/apache/hadoop/hbase/class-use/TableNotFoundException.html deleted file mode 100644 index e442be7..000 --- a/apidocs/org/apache/hadoop/hbase/class-use/TableNotFoundException.html +++ /dev/null @@ -1,224 +0,0 @@ -http://www.w3.org/TR/html4/loose.dtd;> - - - - - -Uses of Class org.apache.hadoop.hbase.TableNotFoundException (Apache HBase 2.0.0-SNAPSHOT API) - - - - - - - -JavaScript