[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.RegionServerMetricsWrapperRunnable.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.RegionServerMetricsWrapperRunnable.html b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.RegionServerMetricsWrapperRunnable.html index 51ab56b..4fd71ab 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.RegionServerMetricsWrapperRunnable.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.RegionServerMetricsWrapperRunnable.html @@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab"; -public class MetricsRegionServerWrapperImpl.RegionServerMetricsWrapperRunnable +public class MetricsRegionServerWrapperImpl.RegionServerMetricsWrapperRunnable extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object implements http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true; title="class or interface in java.lang">Runnable This is the runnable that will be executed on the executor every PERIOD number of seconds @@ -213,7 +213,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable. lastRan -privatelong lastRan +privatelong lastRan @@ -222,7 +222,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable. lastRequestCount -privatelong lastRequestCount +privatelong lastRequestCount @@ -239,7 +239,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable. RegionServerMetricsWrapperRunnable -publicRegionServerMetricsWrapperRunnable() +publicRegionServerMetricsWrapperRunnable() @@ -256,7 +256,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable. run -publicvoidrun() +publicvoidrun() Specified by: http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true#run--; title="class or interface in java.lang">runin interfacehttp://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true; title="class or interface in java.lang">Runnable
[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2aec596e/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html b/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html index 6fab848..dda7356 100644 --- a/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html +++ b/devapidocs/org/apache/hadoop/hbase/class-use/ServerName.html @@ -621,27 +621,27 @@ private ServerName -AsyncRequestFutureImpl.SingleServerRequestRunnable.server +FastFailInterceptorContext.server private ServerName -FastFailInterceptorContext.server +AsyncRequestFutureImpl.SingleServerRequestRunnable.server private ServerName -AsyncAdminRequestRetryingCaller.serverName +AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder.serverName private ServerName -AsyncServerRequestRpcRetryingCaller.serverName +AsyncRpcRetryingCallerFactory.ServerRequestCallerBuilder.serverName private ServerName -AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder.serverName +AsyncAdminRequestRetryingCaller.serverName private ServerName -AsyncRpcRetryingCallerFactory.ServerRequestCallerBuilder.serverName +AsyncServerRequestRpcRetryingCaller.serverName private ServerName @@ -762,9 +762,7 @@ http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListServerName -AsyncAdmin.listDrainingRegionServers() -List region servers marked as draining to not get additional regions assigned to them. - +AsyncHBaseAdmin.listDrainingRegionServers() http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListServerName @@ -773,16 +771,18 @@ -http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListServerName -RawAsyncHBaseAdmin.listDrainingRegionServers() - - http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListServerName HBaseAdmin.listDrainingRegionServers() + +http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListServerName +AsyncAdmin.listDrainingRegionServers() +List region servers marked as draining to not get additional regions assigned to them. + + http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListServerName -AsyncHBaseAdmin.listDrainingRegionServers() +RawAsyncHBaseAdmin.listDrainingRegionServers() @@ -822,16 +822,16 @@ ServerNameserverName) -private void -ConnectionImplementation.cacheLocation(TableNametableName, +void +MetaCache.cacheLocation(TableNametableName, ServerNamesource, HRegionLocationlocation) Put a newly discovered HRegionLocation into the cache. -void -MetaCache.cacheLocation(TableNametableName, +private void +ConnectionImplementation.cacheLocation(TableNametableName, ServerNamesource, HRegionLocationlocation) Put a newly discovered HRegionLocation into the cache. @@ -860,20 +860,18 @@ void -ClusterConnection.clearCaches(ServerNamesn) -Clear any caches that pertain to server name sn. - +ConnectionImplementation.clearCaches(ServerNameserverName) void -ConnectionImplementation.clearCaches(ServerNameserverName) +ClusterConnection.clearCaches(ServerNamesn) +Clear any caches that pertain to server name sn. + http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true; title="class or interface in java.lang">Void -AsyncAdmin.clearCompactionQueues(ServerNameserverName, - http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true; title="class or interface in java.util">Sethttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringqueues) -Clear compacting queues on a
[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1837997e/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableOperator.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableOperator.html b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableOperator.html index 35d5549..7f42873 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableOperator.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableOperator.html @@ -115,2816 +115,2814 @@ 107import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse; 108import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest; 109import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse; -110import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest; -111import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse; -112import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest; -113import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse; -114import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest; -115import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse; -116import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription; -117import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema; -118import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; -119import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest; -120import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse; -121import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest; -122import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse; -123import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest; -124import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse; -125import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest; -126import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse; -127import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest; -128import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse; -129import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest; -130import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse; -131import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest; -132import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse; -133import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest; -134import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse; -135import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest; -136import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse; -137import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest; -138import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse; -139import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest; -140import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse; -141import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest; -142import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse; -143import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest; -144import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse; -145import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest; -146import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse; -147import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest; -148import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse; -149import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest; -150import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse;
[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushRegionEntry.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushRegionEntry.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushRegionEntry.html index f355960..13d9b4a 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushRegionEntry.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushRegionEntry.html @@ -360,478 +360,480 @@ 352 353 @Override 354 public void requestFlush(Region r, boolean forceFlushAllStores) { -355synchronized (regionsInQueue) { -356 if (!regionsInQueue.containsKey(r)) { -357// This entry has no delay so it will be added at the top of the flush -358// queue. It'll come out near immediately. -359FlushRegionEntry fqe = new FlushRegionEntry(r, forceFlushAllStores); -360this.regionsInQueue.put(r, fqe); -361this.flushQueue.add(fqe); -362 } -363} -364 } -365 -366 @Override -367 public void requestDelayedFlush(Region r, long delay, boolean forceFlushAllStores) { -368synchronized (regionsInQueue) { -369 if (!regionsInQueue.containsKey(r)) { -370// This entry has some delay -371FlushRegionEntry fqe = new FlushRegionEntry(r, forceFlushAllStores); -372fqe.requeue(delay); -373this.regionsInQueue.put(r, fqe); -374this.flushQueue.add(fqe); -375 } -376} -377 } -378 -379 public int getFlushQueueSize() { -380return flushQueue.size(); -381 } -382 -383 /** -384 * Only interrupt once it's done with a run through the work loop. -385 */ -386 void interruptIfNecessary() { -387lock.writeLock().lock(); -388try { -389 for (FlushHandler flushHander : flushHandlers) { -390if (flushHander != null) flushHander.interrupt(); -391 } -392} finally { -393 lock.writeLock().unlock(); -394} -395 } -396 -397 synchronized void start(UncaughtExceptionHandler eh) { -398ThreadFactory flusherThreadFactory = Threads.newDaemonThreadFactory( -399 server.getServerName().toShortString() + "-MemStoreFlusher", eh); -400for (int i = 0; i flushHandlers.length; i++) { -401 flushHandlers[i] = new FlushHandler("MemStoreFlusher." + i); -402 flusherThreadFactory.newThread(flushHandlers[i]); -403 flushHandlers[i].start(); -404} -405 } -406 -407 boolean isAlive() { -408for (FlushHandler flushHander : flushHandlers) { -409 if (flushHander != null flushHander.isAlive()) { -410return true; -411 } -412} -413return false; -414 } -415 -416 void join() { -417for (FlushHandler flushHander : flushHandlers) { -418 if (flushHander != null) { -419 Threads.shutdown(flushHander.getThread()); -420 } -421} -422 } -423 -424 /** -425 * A flushRegion that checks store file count. If too many, puts the flush -426 * on delay queue to retry later. -427 * @param fqe -428 * @return true if the region was successfully flushed, false otherwise. If -429 * false, there will be accompanying log messages explaining why the region was -430 * not flushed. -431 */ -432 private boolean flushRegion(final FlushRegionEntry fqe) { -433Region region = fqe.region; -434if (!region.getRegionInfo().isMetaRegion() -435isTooManyStoreFiles(region)) { -436 if (fqe.isMaximumWait(this.blockingWaitTime)) { -437LOG.info("Waited " + (EnvironmentEdgeManager.currentTime() - fqe.createTime) + -438 "ms on a compaction to clean up 'too many store files'; waited " + -439 "long enough... proceeding with flush of " + -440 region.getRegionInfo().getRegionNameAsString()); -441 } else { -442// If this is first time we've been put off, then emit a log message. -443if (fqe.getRequeueCount() = 0) { -444 // Note: We don't impose blockingStoreFiles constraint on meta regions -445 LOG.warn("Region " + region.getRegionInfo().getRegionNameAsString() + " has too many " + -446"store files; delaying flush up to " + this.blockingWaitTime + "ms"); -447 if (!this.server.compactSplitThread.requestSplit(region)) { -448try { -449 this.server.compactSplitThread.requestSystemCompaction( -450 region, Thread.currentThread().getName()); -451} catch (IOException e) { -452 e = e instanceof RemoteException ? -453 ((RemoteException)e).unwrapRemoteException() : e; -454 LOG.error("Cache flush failed for region " + -455 Bytes.toStringBinary(region.getRegionInfo().getRegionName()), e); -456} -457
[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21766f4a/devapidocs/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.html b/devapidocs/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.html index 5e68510..e70dc7f 100644 --- a/devapidocs/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.html +++ b/devapidocs/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.html @@ -18,7 +18,7 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":9,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10}; +var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":9,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10}; var tabs = {65535:["t0","All Methods"],1:["t1","Static Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -335,87 +335,94 @@ implements org.apache.zookeeper.Watcher, +void +interruptedExceptionNoThrow(http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true; title="class or interface in java.lang">InterruptedExceptionie, + booleanthrowLater) +Log the InterruptedException and interrupt current thread + + + boolean isAborted() Check if the server or client was aborted. - + private boolean isBaseZnodeAclSetup(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">Listorg.apache.zookeeper.data.ACLacls) Checks whether the ACLs returned from the base znode (/hbase) is set for secure setup. - + boolean isClientReadable(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringnode) Returns whether the znode is supposed to be readable by the client and DOES NOT contain sensitive information (world readable). - + static boolean isSuperUserId(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String[]superUsers, org.apache.zookeeper.data.Idid) - + void keeperException(org.apache.zookeeper.KeeperExceptionke) Handles KeeperExceptions in client calls. - + http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String prefix(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringstr) Adds this instance's identifier as a prefix to the passed str - + void process(org.apache.zookeeper.WatchedEventevent) Method called from ZooKeeper for events and connection status. - + void reconnectAfterExpiration() - + void registerListener(ZooKeeperListenerlistener) Register the specified listener to receive ZooKeeper events. - + void registerListenerFirst(ZooKeeperListenerlistener) Register the specified listener to receive ZooKeeper events and add it as the first in the list of current listeners. - + private void setZnodeAclsRecursive(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringznode) Set the znode perms recursively. - + void sync(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringpath) Forces a synchronization of this ZooKeeper client connection. - + http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String toString() - + void unregisterAllListeners() Clean all existing listeners - + void unregisterListener(ZooKeeperListenerlistener) @@ -976,18 +983,30 @@ implements org.apache.zookeeper.Watcher, interruptedException -publicvoidinterruptedException(http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true; title="class or interface in java.lang">InterruptedExceptionie) -Handles InterruptedExceptions in client calls. - - This may be temporary but for now this gives one place to deal with these. - - TODO: Currently, this method does nothing. - Is this ever expected to happen? Do we abort or can we let it run? - Maybe this should be logged as WARN? It shouldn't happen? - +publicvoidinterruptedException(http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true; title="class or interface in
[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d5075d7/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/ProcedureSyncWait.Predicate.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/ProcedureSyncWait.Predicate.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/ProcedureSyncWait.Predicate.html index 7af3762..9837a0e 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/ProcedureSyncWait.Predicate.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/ProcedureSyncWait.Predicate.html @@ -40,208 +40,207 @@ 032import org.apache.hadoop.hbase.CoordinatedStateException; 033import org.apache.hadoop.hbase.HRegionInfo; 034import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException; -035import org.apache.hadoop.hbase.ProcedureInfo; -036import org.apache.hadoop.hbase.classification.InterfaceAudience; -037import org.apache.hadoop.hbase.classification.InterfaceStability; -038import org.apache.hadoop.hbase.exceptions.TimeoutIOException; -039import org.apache.hadoop.hbase.master.assignment.RegionStates; -040import org.apache.hadoop.hbase.procedure2.Procedure; -041import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; -042import org.apache.hadoop.hbase.quotas.MasterQuotaManager; -043import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -044 -045/** -046 * Helper to synchronously wait on conditions. -047 * This will be removed in the future (mainly when the AssignmentManager will be -048 * replaced with a Procedure version) by using ProcedureYieldException, -049 * and the queue will handle waiting and scheduling based on events. -050 */ -051@InterfaceAudience.Private -052@InterfaceStability.Evolving -053public final class ProcedureSyncWait { -054 private static final Log LOG = LogFactory.getLog(ProcedureSyncWait.class); -055 -056 private ProcedureSyncWait() {} -057 -058 @InterfaceAudience.Private -059 public interface PredicateT { -060T evaluate() throws IOException; -061 } -062 -063 private static class ProcedureFuture implements Futurebyte[] { -064 private final ProcedureExecutorMasterProcedureEnv procExec; -065 private final long procId; -066 -067 private boolean hasResult = false; -068 private byte[] result = null; -069 -070 public ProcedureFuture(ProcedureExecutorMasterProcedureEnv procExec, long procId) { -071this.procExec = procExec; -072this.procId = procId; -073 } -074 -075 @Override -076 public boolean cancel(boolean mayInterruptIfRunning) { return false; } -077 -078 @Override -079 public boolean isCancelled() { return false; } -080 -081 @Override -082 public boolean isDone() { return hasResult; } -083 -084 @Override -085 public byte[] get() throws InterruptedException, ExecutionException { -086if (hasResult) return result; -087try { -088 return waitForProcedureToComplete(procExec, procId, Long.MAX_VALUE); -089} catch (Exception e) { -090 throw new ExecutionException(e); -091} -092 } -093 -094 @Override -095 public byte[] get(long timeout, TimeUnit unit) -096 throws InterruptedException, ExecutionException, TimeoutException { -097if (hasResult) return result; -098try { -099 result = waitForProcedureToComplete(procExec, procId, unit.toMillis(timeout)); -100 hasResult = true; -101 return result; -102} catch (TimeoutIOException e) { -103 throw new TimeoutException(e.getMessage()); -104} catch (Exception e) { -105 throw new ExecutionException(e); -106} -107 } -108} -109 -110 public static Futurebyte[] submitProcedure(final ProcedureExecutorMasterProcedureEnv procExec, -111 final Procedure proc) { -112if (proc.isInitializing()) { -113 procExec.submitProcedure(proc); -114} -115return new ProcedureFuture(procExec, proc.getProcId()); -116 } -117 -118 public static byte[] submitAndWaitProcedure(ProcedureExecutorMasterProcedureEnv procExec, -119 final Procedure proc) throws IOException { -120if (proc.isInitializing()) { -121 procExec.submitProcedure(proc); -122} -123return waitForProcedureToCompleteIOE(procExec, proc.getProcId(), Long.MAX_VALUE); -124 } -125 -126 public static byte[] waitForProcedureToCompleteIOE( -127 final ProcedureExecutorMasterProcedureEnv procExec, final long procId, final long timeout) -128 throws IOException { -129try { -130 return waitForProcedureToComplete(procExec, procId, timeout); -131} catch (IOException e) { -132 throw e; -133} catch (Exception e) { -134 throw new IOException(e); -135} -136 } -137 -138 public static byte[] waitForProcedureToComplete( -139 final ProcedureExecutorMasterProcedureEnv
[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0383a9c2/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.CoprocessorOperationWithResult.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.CoprocessorOperationWithResult.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.CoprocessorOperationWithResult.html index e690c2d..ec75aa9 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.CoprocessorOperationWithResult.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.CoprocessorOperationWithResult.html @@ -818,1092 +818,1070 @@ 810}); 811 } 812 -813 public void preDispatchMerge(final HRegionInfo regionInfoA, final HRegionInfo regionInfoB) -814 throws IOException { +813 public void preMergeRegions(final HRegionInfo[] regionsToMerge) +814 throws IOException { 815execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { 816 @Override 817 public void call(MasterObserver oserver, ObserverContextMasterCoprocessorEnvironment ctx) 818 throws IOException { -819oserver.preDispatchMerge(ctx, regionInfoA, regionInfoB); +819oserver.preMergeRegions(ctx, regionsToMerge); 820 } 821}); 822 } 823 -824 public void postDispatchMerge(final HRegionInfo regionInfoA, final HRegionInfo regionInfoB) -825 throws IOException { +824 public void postMergeRegions(final HRegionInfo[] regionsToMerge) +825 throws IOException { 826execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { 827 @Override 828 public void call(MasterObserver oserver, ObserverContextMasterCoprocessorEnvironment ctx) 829 throws IOException { -830oserver.postDispatchMerge(ctx, regionInfoA, regionInfoB); +830oserver.postMergeRegions(ctx, regionsToMerge); 831 } 832}); 833 } 834 -835 public void preMergeRegions(final HRegionInfo[] regionsToMerge) -836 throws IOException { -837execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { -838 @Override -839 public void call(MasterObserver oserver, ObserverContextMasterCoprocessorEnvironment ctx) -840 throws IOException { -841oserver.preMergeRegions(ctx, regionsToMerge); -842 } -843}); -844 } -845 -846 public void postMergeRegions(final HRegionInfo[] regionsToMerge) -847 throws IOException { -848execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { -849 @Override -850 public void call(MasterObserver oserver, ObserverContextMasterCoprocessorEnvironment ctx) -851 throws IOException { -852oserver.postMergeRegions(ctx, regionsToMerge); -853 } -854}); -855 } -856 -857 public boolean preBalance() throws IOException { -858return execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { -859 @Override -860 public void call(MasterObserver oserver, ObserverContextMasterCoprocessorEnvironment ctx) -861 throws IOException { -862oserver.preBalance(ctx); -863 } -864}); -865 } -866 -867 public void postBalance(final ListRegionPlan plans) throws IOException { +835 public boolean preBalance() throws IOException { +836return execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { +837 @Override +838 public void call(MasterObserver oserver, ObserverContextMasterCoprocessorEnvironment ctx) +839 throws IOException { +840oserver.preBalance(ctx); +841 } +842}); +843 } +844 +845 public void postBalance(final ListRegionPlan plans) throws IOException { +846execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { +847 @Override +848 public void call(MasterObserver oserver, ObserverContextMasterCoprocessorEnvironment ctx) +849 throws IOException { +850oserver.postBalance(ctx, plans); +851 } +852}); +853 } +854 +855 public boolean preSetSplitOrMergeEnabled(final boolean newValue, +856 final MasterSwitchType switchType) throws IOException { +857return execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { +858 @Override +859 public void call(MasterObserver oserver, ObserverContextMasterCoprocessorEnvironment ctx) +860 throws IOException { +861 oserver.preSetSplitOrMergeEnabled(ctx, newValue, switchType); +862 } +863}); +864 } +865 +866 public void postSetSplitOrMergeEnabled(final boolean newValue, +867 final MasterSwitchType switchType) throws IOException { 868execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { 869 @Override 870 public void call(MasterObserver oserver,
[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f391bcef/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html b/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html index 2888fc4..110fd9e 100644 --- a/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html +++ b/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html @@ -18,7 +18,7 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":9,"i2":9,"i3":9,"i4":9,"i5":9,"i6":9,"i7":9,"i8":9,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":9}; +var methods = {"i0":10,"i1":9,"i2":9,"i3":9,"i4":9,"i5":9,"i6":9,"i7":9,"i8":9,"i9":9,"i10":9,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":9}; var tabs = {65535:["t0","All Methods"],1:["t1","Static Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -594,140 +594,148 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? static ColumnFamilyDescriptor -parseFrom(byte[]pbBytes) +of(byte[]name) +static ColumnFamilyDescriptor +of(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringname) + + +static ColumnFamilyDescriptor +parseFrom(byte[]pbBytes) + + ColumnFamilyDescriptorBuilder removeConfiguration(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringkey) - + ColumnFamilyDescriptorBuilder setBlockCacheEnabled(booleanvalue) - + ColumnFamilyDescriptorBuilder setBlocksize(intvalue) - + ColumnFamilyDescriptorBuilder setBloomFilterType(BloomTypevalue) - + ColumnFamilyDescriptorBuilder setCacheBloomsOnWrite(booleanvalue) - + ColumnFamilyDescriptorBuilder setCacheDataInL1(booleanvalue) - + ColumnFamilyDescriptorBuilder setCacheDataOnWrite(booleanvalue) - + ColumnFamilyDescriptorBuilder setCacheIndexesOnWrite(booleanvalue) - + ColumnFamilyDescriptorBuilder setCompactionCompressionType(Compression.Algorithmvalue) - + ColumnFamilyDescriptorBuilder setCompressionType(Compression.Algorithmvalue) - + ColumnFamilyDescriptorBuilder setCompressTags(booleanvalue) - + ColumnFamilyDescriptorBuilder setConfiguration(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringkey, http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringvalue) - + ColumnFamilyDescriptorBuilder setDataBlockEncoding(DataBlockEncodingvalue) - + ColumnFamilyDescriptorBuilder setDFSReplication(shortvalue) - + ColumnFamilyDescriptorBuilder setEncryptionKey(byte[]value) - + ColumnFamilyDescriptorBuilder setEncryptionType(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringvalue) - + ColumnFamilyDescriptorBuilder setEvictBlocksOnClose(booleanvalue) - + ColumnFamilyDescriptorBuilder setInMemory(booleanvalue) - + ColumnFamilyDescriptorBuilder setInMemoryCompaction(MemoryCompactionPolicyvalue) - + ColumnFamilyDescriptorBuilder setKeepDeletedCells(KeepDeletedCellsvalue) - + ColumnFamilyDescriptorBuilder setMaxVersions(intvalue) - + ColumnFamilyDescriptorBuilder setMinVersions(intvalue) - + ColumnFamilyDescriptorBuilder setMobCompactPartitionPolicy(MobCompactPartitionPolicyvalue) - + ColumnFamilyDescriptorBuilder setMobEnabled(booleanvalue) - + ColumnFamilyDescriptorBuilder setMobThreshold(longvalue) - + ColumnFamilyDescriptorBuilder setPrefetchBlocksOnOpen(booleanvalue) - + ColumnFamilyDescriptorBuilder setScope(intvalue) - + ColumnFamilyDescriptorBuilder setStoragePolicy(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringvalue) - + ColumnFamilyDescriptorBuilder setTimeToLive(intvalue) - + ColumnFamilyDescriptorBuilder setTimeToLive(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringvalue) - + ColumnFamilyDescriptorBuilder setValue(byte[]key, byte[]value) -
[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca5b0275/devapidocs/org/apache/hadoop/hbase/security/access/AccessControlLists.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/security/access/AccessControlLists.html b/devapidocs/org/apache/hadoop/hbase/security/access/AccessControlLists.html index cba8014..a95a076 100644 --- a/devapidocs/org/apache/hadoop/hbase/security/access/AccessControlLists.html +++ b/devapidocs/org/apache/hadoop/hbase/security/access/AccessControlLists.html @@ -272,12 +272,12 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? Cellcell) -(package private) static com.google.common.collect.ListMultimaphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String,TablePermission +(package private) static org.apache.hadoop.hbase.shaded.com.google.common.collect.ListMultimaphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String,TablePermission getNamespacePermissions(org.apache.hadoop.conf.Configurationconf, http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringnamespace) -(package private) static com.google.common.collect.ListMultimaphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String,TablePermission +(package private) static org.apache.hadoop.hbase.shaded.com.google.common.collect.ListMultimaphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String,TablePermission getPermissions(org.apache.hadoop.conf.Configurationconf, byte[]entryName, Tablet) @@ -286,7 +286,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? -static com.google.common.collect.ListMultimaphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String,TablePermission +static org.apache.hadoop.hbase.shaded.com.google.common.collect.ListMultimaphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String,TablePermission getTablePermissions(org.apache.hadoop.conf.Configurationconf, TableNametableName) @@ -330,14 +330,14 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? isNamespaceEntry(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringentryName) -(package private) static http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true; title="class or interface in java.util">Mapbyte[],com.google.common.collect.ListMultimaphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String,TablePermission +(package private) static http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true; title="class or interface in java.util">Mapbyte[],org.apache.hadoop.hbase.shaded.com.google.common.collect.ListMultimaphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String,TablePermission loadAll(org.apache.hadoop.conf.Configurationconf) Load all permissions from the region server holding _acl_, primarily intended for testing purposes. -(package private) static http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true; title="class or interface in java.util">Mapbyte[],com.google.common.collect.ListMultimaphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String,TablePermission +(package private) static http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true; title="class or interface in java.util">Mapbyte[],org.apache.hadoop.hbase.shaded.com.google.common.collect.ListMultimaphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String,TablePermission loadAll(RegionaclRegion) Loads all of the permission grants stored in a region of the _acl_ table. @@ -349,12 +349,12 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? Cellkv) -private static com.google.common.collect.ListMultimaphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String,TablePermission +private static
[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9eba7fcf/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html index 504e470..38667c0 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html @@ -2866,5375 +2866,5371 @@ 2858checkResources(); 2859 startRegionOperation(Operation.DELETE); 2860try { -2861 delete.getRow(); -2862 // All edits for the given row (across all column families) must happen atomically. -2863 doBatchMutate(delete); -2864} finally { -2865 closeRegionOperation(Operation.DELETE); -2866} -2867 } -2868 -2869 /** -2870 * Row needed by below method. -2871 */ -2872 private static final byte [] FOR_UNIT_TESTS_ONLY = Bytes.toBytes("ForUnitTestsOnly"); -2873 -2874 /** -2875 * This is used only by unit tests. Not required to be a public API. -2876 * @param familyMap map of family to edits for the given family. -2877 * @throws IOException -2878 */ -2879 void delete(NavigableMapbyte[], ListCell familyMap, -2880 Durability durability) throws IOException { -2881Delete delete = new Delete(FOR_UNIT_TESTS_ONLY); -2882 delete.setFamilyCellMap(familyMap); -2883delete.setDurability(durability); -2884doBatchMutate(delete); -2885 } -2886 -2887 @Override -2888 public void prepareDeleteTimestamps(Mutation mutation, Mapbyte[], ListCell familyMap, -2889 byte[] byteNow) throws IOException { -2890for (Map.Entrybyte[], ListCell e : familyMap.entrySet()) { -2891 -2892 byte[] family = e.getKey(); -2893 ListCell cells = e.getValue(); -2894 assert cells instanceof RandomAccess; -2895 -2896 Mapbyte[], Integer kvCount = new TreeMap(Bytes.BYTES_COMPARATOR); -2897 int listSize = cells.size(); -2898 for (int i=0; i listSize; i++) { -2899Cell cell = cells.get(i); -2900// Check if time is LATEST, change to time of most recent addition if so -2901// This is expensive. -2902if (cell.getTimestamp() == HConstants.LATEST_TIMESTAMP CellUtil.isDeleteType(cell)) { -2903 byte[] qual = CellUtil.cloneQualifier(cell); -2904 if (qual == null) qual = HConstants.EMPTY_BYTE_ARRAY; -2905 -2906 Integer count = kvCount.get(qual); -2907 if (count == null) { -2908kvCount.put(qual, 1); -2909 } else { -2910kvCount.put(qual, count + 1); -2911 } -2912 count = kvCount.get(qual); -2913 -2914 Get get = new Get(CellUtil.cloneRow(cell)); -2915 get.setMaxVersions(count); -2916 get.addColumn(family, qual); -2917 if (coprocessorHost != null) { -2918if (!coprocessorHost.prePrepareTimeStampForDeleteVersion(mutation, cell, -2919byteNow, get)) { -2920 updateDeleteLatestVersionTimeStamp(cell, get, count, byteNow); -2921} -2922 } else { -2923 updateDeleteLatestVersionTimeStamp(cell, get, count, byteNow); -2924 } -2925} else { -2926 CellUtil.updateLatestStamp(cell, byteNow, 0); -2927} -2928 } -2929} -2930 } -2931 -2932 void updateDeleteLatestVersionTimeStamp(Cell cell, Get get, int count, byte[] byteNow) -2933 throws IOException { -2934ListCell result = get(get, false); -2935 -2936if (result.size() count) { -2937 // Nothing to delete -2938 CellUtil.updateLatestStamp(cell, byteNow, 0); -2939 return; -2940} -2941if (result.size() count) { -2942 throw new RuntimeException("Unexpected size: " + result.size()); -2943} -2944Cell getCell = result.get(count - 1); -2945CellUtil.setTimestamp(cell, getCell.getTimestamp()); -2946 } -2947 -2948 @Override -2949 public void put(Put put) throws IOException { -2950checkReadOnly(); -2951 -2952// Do a rough check that we have resources to accept a write. The check is -2953// 'rough' in that between the resource check and the call to obtain a -2954// read lock, resources may run out. For now, the thought is that this -2955// will be extremely rare; we'll deal with it when it happens. -2956checkResources(); -2957 startRegionOperation(Operation.PUT); -2958try { -2959 // All edits for the given row (across all column families) must happen atomically. -2960 doBatchMutate(put); -2961} finally { -2962 closeRegionOperation(Operation.PUT); -2963} -2964 } -2965 -2966 /** -2967 * Struct-like class that tracks the progress of a batch operation, -2968 * accumulating status codes and
[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/17128d27/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DisableTableFuture.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DisableTableFuture.html b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DisableTableFuture.html index feb42ea..4bd98f4 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DisableTableFuture.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DisableTableFuture.html @@ -185,4189 +185,4266 @@ 177import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest; 178import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest; 179import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse; -180import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest; -181import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest; -182import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse; -183import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest; -184import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; -185import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos; -186import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse; -187import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos; -188import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; -189import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException; -190import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException; -191import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; -192import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException; -193import org.apache.hadoop.hbase.util.Addressing; -194import org.apache.hadoop.hbase.util.Bytes; -195import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -196import org.apache.hadoop.hbase.util.ForeignExceptionUtil; -197import org.apache.hadoop.hbase.util.Pair; -198import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker; -199import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; -200import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; -201import org.apache.hadoop.ipc.RemoteException; -202import org.apache.hadoop.util.StringUtils; -203import org.apache.zookeeper.KeeperException; -204 -205import com.google.common.annotations.VisibleForTesting; -206import com.google.protobuf.Descriptors; -207import com.google.protobuf.Message; -208import com.google.protobuf.RpcController; -209import java.util.stream.Collectors; -210 -211/** -212 * HBaseAdmin is no longer a client API. It is marked InterfaceAudience.Private indicating that -213 * this is an HBase-internal class as defined in -214 * https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/InterfaceClassification.html -215 * There are no guarantees for backwards source / binary compatibility and methods or class can -216 * change or go away without deprecation. -217 * Use {@link Connection#getAdmin()} to obtain an instance of {@link Admin} instead of constructing -218 * an HBaseAdmin directly. -219 * -220 * pConnection should be an iunmanaged/i connection obtained via -221 * {@link ConnectionFactory#createConnection(Configuration)} -222 * -223 * @see ConnectionFactory -224 * @see Connection -225 * @see Admin -226 */ -227@InterfaceAudience.Private -228@InterfaceStability.Evolving -229public class HBaseAdmin implements Admin { -230 private static final Log LOG = LogFactory.getLog(HBaseAdmin.class); -231 -232 private static final String ZK_IDENTIFIER_PREFIX = "hbase-admin-on-"; +180import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionRequest; +181import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionResponse; +182import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest; +183import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest; +184import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse; +185import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest; +186import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; +187import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos; +188import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse; +189import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos; +190import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; +191import
[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2777c693/hbase-annotations/index.html -- diff --git a/hbase-annotations/index.html b/hbase-annotations/index.html index 7d7b876..7f97d2d 100644 --- a/hbase-annotations/index.html +++ b/hbase-annotations/index.html @@ -1,5 +1,5 @@ http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;> - + http://www.w3.org/1999/xhtml; xml:lang="en" lang="en"> @@ -10,7 +10,7 @@ @import url("./css/site.css"); - + @@ -27,7 +27,7 @@ -Last Published: 2017-07-11 +Last Published: 2017-07-12 | Version: 3.0.0-SNAPSHOT Apache HBase - Annotations http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2777c693/hbase-annotations/integration.html -- diff --git a/hbase-annotations/integration.html b/hbase-annotations/integration.html index ee1c10f..3f09ed9 100644 --- a/hbase-annotations/integration.html +++ b/hbase-annotations/integration.html @@ -1,5 +1,5 @@ http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;> - + http://www.w3.org/1999/xhtml; xml:lang="en" lang="en"> @@ -10,7 +10,7 @@ @import url("./css/site.css"); - + @@ -27,7 +27,7 @@ -Last Published: 2017-07-11 +Last Published: 2017-07-12 | Version: 3.0.0-SNAPSHOT Apache HBase - Annotations http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2777c693/hbase-annotations/issue-tracking.html -- diff --git a/hbase-annotations/issue-tracking.html b/hbase-annotations/issue-tracking.html index 5a11473..bdfc860 100644 --- a/hbase-annotations/issue-tracking.html +++ b/hbase-annotations/issue-tracking.html @@ -1,5 +1,5 @@ http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;> - + http://www.w3.org/1999/xhtml; xml:lang="en" lang="en"> @@ -10,7 +10,7 @@ @import url("./css/site.css"); - + @@ -27,7 +27,7 @@ -Last Published: 2017-07-11 +Last Published: 2017-07-12 | Version: 3.0.0-SNAPSHOT Apache HBase - Annotations http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2777c693/hbase-annotations/license.html -- diff --git a/hbase-annotations/license.html b/hbase-annotations/license.html index 59915fc..d7643fb 100644 --- a/hbase-annotations/license.html +++ b/hbase-annotations/license.html @@ -1,5 +1,5 @@ http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;> - + http://www.w3.org/1999/xhtml; xml:lang="en" lang="en"> @@ -10,7 +10,7 @@ @import url("./css/site.css"); - + @@ -27,7 +27,7 @@ -Last Published: 2017-07-11 +Last Published: 2017-07-12 | Version: 3.0.0-SNAPSHOT Apache HBase - Annotations @@ -117,210 +117,7 @@ Project Licenses Apache License, Version 2.0 - - - Apache License - Version 2.0, January 2004 -http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - License shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - Licensor shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - Legal Entity shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - control means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - You (or Your) shall mean an individual or Legal Entity - exercising permissions granted by this License. - - Source form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - Object form shall mean any
[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/90c7dfe4/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.EnableTableProcedureBiConsumer.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.EnableTableProcedureBiConsumer.html b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.EnableTableProcedureBiConsumer.html index 75db22d..99a09f9 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.EnableTableProcedureBiConsumer.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.EnableTableProcedureBiConsumer.html @@ -37,2710 +37,2816 @@ 029import java.util.List; 030import java.util.Map; 031import java.util.Optional; -032import java.util.concurrent.CompletableFuture; -033import java.util.concurrent.TimeUnit; -034import java.util.concurrent.atomic.AtomicReference; -035import java.util.function.BiConsumer; -036import java.util.regex.Pattern; -037import java.util.stream.Collectors; -038 -039import com.google.common.annotations.VisibleForTesting; -040 -041import io.netty.util.Timeout; -042import io.netty.util.TimerTask; -043 -044import java.util.stream.Stream; -045 -046import org.apache.commons.io.IOUtils; -047import org.apache.commons.logging.Log; -048import org.apache.commons.logging.LogFactory; -049import org.apache.hadoop.hbase.ClusterStatus; -050import org.apache.hadoop.hbase.HRegionInfo; -051import org.apache.hadoop.hbase.HRegionLocation; -052import org.apache.hadoop.hbase.MetaTableAccessor; -053import org.apache.hadoop.hbase.MetaTableAccessor.QueryType; -054import org.apache.hadoop.hbase.NotServingRegionException; -055import org.apache.hadoop.hbase.ProcedureInfo; -056import org.apache.hadoop.hbase.RegionLoad; -057import org.apache.hadoop.hbase.RegionLocations; -058import org.apache.hadoop.hbase.ServerName; -059import org.apache.hadoop.hbase.NamespaceDescriptor; -060import org.apache.hadoop.hbase.HConstants; -061import org.apache.hadoop.hbase.TableExistsException; -062import org.apache.hadoop.hbase.TableName; -063import org.apache.hadoop.hbase.AsyncMetaTableAccessor; -064import org.apache.hadoop.hbase.TableNotDisabledException; -065import org.apache.hadoop.hbase.TableNotEnabledException; -066import org.apache.hadoop.hbase.TableNotFoundException; -067import org.apache.hadoop.hbase.UnknownRegionException; -068import org.apache.hadoop.hbase.classification.InterfaceAudience; -069import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder; -070import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder; -071import org.apache.hadoop.hbase.client.Scan.ReadType; -072import org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper; -073import org.apache.hadoop.hbase.client.replication.TableCFs; -074import org.apache.hadoop.hbase.exceptions.DeserializationException; -075import org.apache.hadoop.hbase.ipc.HBaseRpcController; -076import org.apache.hadoop.hbase.quotas.QuotaFilter; -077import org.apache.hadoop.hbase.quotas.QuotaSettings; -078import org.apache.hadoop.hbase.quotas.QuotaTableUtil; -079import org.apache.hadoop.hbase.replication.ReplicationException; -080import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; -081import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; -082import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback; -083import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -084import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; -085import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; -086import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; -087import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; -088import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; -089import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse; -090import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; -091import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse; -092import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest; -093import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse; -094import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest; -095import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse; -096import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest; -097import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse; -098import
[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0821e51a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer.html b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer.html index 71844ce..75db22d 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer.html @@ -105,2564 +105,2642 @@ 097import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse; 098import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest; 099import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse; -100import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription; -101import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema; -102import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; -103import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest; -104import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse; -105import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest; -106import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse; -107import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest; -108import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse; -109import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest; -110import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse; -111import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest; -112import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse; -113import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest; -114import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse; -115import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest; -116import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse; -117import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest; -118import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse; -119import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest; -120import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse; -121import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest; -122import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse; -123import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest; -124import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse; -125import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest; -126import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse; -127import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest; -128import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse; -129import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest; -130import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse; -131import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest; -132import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse; -133import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest; -134import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse; -135import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest; -136import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse; -137import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest; -138import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse; -139import
[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d27954a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.html b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.html index f5bc73a..feb42ea 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.html @@ -4044,345 +4044,330 @@ 4036 4037 @Override 4038 public void drainRegionServers(ListServerName servers) throws IOException { -4039final ListHBaseProtos.ServerName pbServers = new ArrayList(servers.size()); -4040for (ServerName server : servers) { -4041 // Parse to ServerName to do simple validation. -4042 ServerName.parseServerName(server.toString()); -4043 pbServers.add(ProtobufUtil.toServerName(server)); -4044} -4045 -4046executeCallable(new MasterCallableVoid(getConnection(), getRpcControllerFactory()) { -4047 @Override -4048 public Void rpcCall() throws ServiceException { -4049DrainRegionServersRequest req = -4050 DrainRegionServersRequest.newBuilder().addAllServerName(pbServers).build(); -4051 master.drainRegionServers(getRpcController(), req); -4052return null; -4053 } -4054}); -4055 } -4056 -4057 @Override -4058 public ListServerName listDrainingRegionServers() throws IOException { -4059return executeCallable(new MasterCallableListServerName(getConnection(), -4060 getRpcControllerFactory()) { -4061 @Override -4062 public ListServerName rpcCall() throws ServiceException { -4063ListDrainingRegionServersRequest req = ListDrainingRegionServersRequest.newBuilder().build(); -4064ListServerName servers = new ArrayList(); -4065for (HBaseProtos.ServerName server : master.listDrainingRegionServers(null, req) -4066.getServerNameList()) { -4067 servers.add(ProtobufUtil.toServerName(server)); -4068} -4069return servers; -4070 } -4071}); -4072 } -4073 -4074 @Override -4075 public void removeDrainFromRegionServers(ListServerName servers) throws IOException { -4076final ListHBaseProtos.ServerName pbServers = new ArrayList(servers.size()); -4077for (ServerName server : servers) { -4078 pbServers.add(ProtobufUtil.toServerName(server)); -4079} -4080 -4081executeCallable(new MasterCallableVoid(getConnection(), getRpcControllerFactory()) { -4082 @Override -4083 public Void rpcCall() throws ServiceException { -4084 RemoveDrainFromRegionServersRequest req = RemoveDrainFromRegionServersRequest.newBuilder() -4085 .addAllServerName(pbServers).build(); -4086 master.removeDrainFromRegionServers(getRpcController(), req); -4087return null; +4039executeCallable(new MasterCallableVoid(getConnection(), getRpcControllerFactory()) { +4040 @Override +4041 public Void rpcCall() throws ServiceException { +4042 master.drainRegionServers(getRpcController(), +4043 RequestConverter.buildDrainRegionServersRequest(servers)); +4044return null; +4045 } +4046}); +4047 } +4048 +4049 @Override +4050 public ListServerName listDrainingRegionServers() throws IOException { +4051return executeCallable(new MasterCallableListServerName(getConnection(), +4052 getRpcControllerFactory()) { +4053 @Override +4054 public ListServerName rpcCall() throws ServiceException { +4055ListDrainingRegionServersRequest req = ListDrainingRegionServersRequest.newBuilder().build(); +4056ListServerName servers = new ArrayList(); +4057for (HBaseProtos.ServerName server : master.listDrainingRegionServers(null, req) +4058.getServerNameList()) { +4059 servers.add(ProtobufUtil.toServerName(server)); +4060} +4061return servers; +4062 } +4063}); +4064 } +4065 +4066 @Override +4067 public void removeDrainFromRegionServers(ListServerName servers) throws IOException { +4068executeCallable(new MasterCallableVoid(getConnection(), getRpcControllerFactory()) { +4069 @Override +4070 public Void rpcCall() throws ServiceException { +4071 master.removeDrainFromRegionServers(getRpcController(), RequestConverter.buildRemoveDrainFromRegionServersRequest(servers)); +4072return null; +4073 } +4074}); +4075 } +4076 +4077 @Override +4078 public ListTableCFs listReplicatedTableCFs() throws IOException { +4079ListTableCFs replicatedTableCFs = new ArrayList(); +4080HTableDescriptor[] tables = listTables(); +4081for (HTableDescriptor table : tables) { +4082 HColumnDescriptor[] columns =
[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9fb0764b/devapidocs/org/apache/hadoop/hbase/class-use/HColumnDescriptor.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/HColumnDescriptor.html b/devapidocs/org/apache/hadoop/hbase/class-use/HColumnDescriptor.html index 9b3e33f..ff07afc 100644 --- a/devapidocs/org/apache/hadoop/hbase/class-use/HColumnDescriptor.html +++ b/devapidocs/org/apache/hadoop/hbase/class-use/HColumnDescriptor.html @@ -122,19 +122,11 @@ Input/OutputFormats, a table indexing MapReduce job, and utility methods. -org.apache.hadoop.hbase.regionserver - - - org.apache.hadoop.hbase.replication Multi Cluster Replication - -org.apache.hadoop.hbase.security - - org.apache.hadoop.hbase.security.access @@ -150,10 +142,6 @@ Input/OutputFormats, a table indexing MapReduce job, and utility methods. service. - -org.apache.hadoop.hbase.tool - - @@ -823,39 +811,6 @@ service. Uses of HColumnDescriptor in org.apache.hadoop.hbase.mapreduce - -Fields in org.apache.hadoop.hbase.mapreduce with type parameters of type HColumnDescriptor - -Modifier and Type -Field and Description - - - -(package private) static http://docs.oracle.com/javase/8/docs/api/java/util/function/Function.html?is-external=true; title="class or interface in java.util.function">FunctionHColumnDescriptor,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String -HFileOutputFormat2.blockSizeDetails -Serialize column family to block size map to configuration. - - - -(package private) static http://docs.oracle.com/javase/8/docs/api/java/util/function/Function.html?is-external=true; title="class or interface in java.util.function">FunctionHColumnDescriptor,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String -HFileOutputFormat2.bloomTypeDetails -Serialize column family to bloom type map to configuration. - - - -(package private) static http://docs.oracle.com/javase/8/docs/api/java/util/function/Function.html?is-external=true; title="class or interface in java.util.function">FunctionHColumnDescriptor,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String -HFileOutputFormat2.compressionDetails -Serialize column family to compression algorithm map to configuration. - - - -(package private) static http://docs.oracle.com/javase/8/docs/api/java/util/function/Function.html?is-external=true; title="class or interface in java.util.function">FunctionHColumnDescriptor,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String -HFileOutputFormat2.dataBlockEncodingDetails -Serialize column family to data block encoding map to configuration. - - - - Methods in org.apache.hadoop.hbase.mapreduce with parameters of type HColumnDescriptor @@ -887,20 +842,6 @@ service. - -Method parameters in org.apache.hadoop.hbase.mapreduce with type arguments of type HColumnDescriptor - -Modifier and Type -Method and Description - - - -(package private) static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String -HFileOutputFormat2.serializeColumnFamilyAttribute(http://docs.oracle.com/javase/8/docs/api/java/util/function/Function.html?is-external=true; title="class or interface in java.util.function">FunctionHColumnDescriptor,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringfn, - http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListHTableDescriptorallTables) - - - @@ -1186,24 +1127,6 @@ service. static StoreFileWriter -MobUtils.createWriter(org.apache.hadoop.conf.Configurationconf, -org.apache.hadoop.fs.FileSystemfs, -HColumnDescriptorfamily, -org.apache.hadoop.fs.Pathpath, -longmaxKeyCount, -Compression.Algorithmcompression, -CacheConfigcacheConfig, -Encryption.ContextcryptoContext, -ChecksumTypechecksumType, -intbytesPerChecksum, -intblocksize, -BloomTypebloomType, -booleanisCompaction) -Creates a writer for the mob file in temp directory. - - - -static StoreFileWriter MobUtils.createWriter(org.apache.hadoop.conf.Configurationconf, org.apache.hadoop.fs.FileSystemfs, HColumnDescriptorfamily, @@ -1218,7 +1141,7 @@ service. Creates a writer for the mob file in temp directory. - + static StoreFileWriter MobUtils.createWriter(org.apache.hadoop.conf.Configurationconf,
[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b3b50f22/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.StatisticsThread.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.StatisticsThread.html b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.StatisticsThread.html index 43db01d..79dc4e0 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.StatisticsThread.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.StatisticsThread.html @@ -235,7 +235,7 @@ 227 public BucketCache(String ioEngineName, long capacity, int blockSize, int[] bucketSizes, 228 int writerThreadNum, int writerQLen, String persistencePath, int ioErrorsTolerationDuration) 229 throws FileNotFoundException, IOException { -230this.ioEngine = getIOEngineFromName(ioEngineName, capacity); +230this.ioEngine = getIOEngineFromName(ioEngineName, capacity, persistencePath); 231this.writerThreads = new WriterThread[writerThreadNum]; 232long blockNumCapacity = capacity / blockSize; 233if (blockNumCapacity = Integer.MAX_VALUE) { @@ -317,1229 +317,1230 @@ 309 * Get the IOEngine from the IO engine name 310 * @param ioEngineName 311 * @param capacity -312 * @return the IOEngine -313 * @throws IOException -314 */ -315 private IOEngine getIOEngineFromName(String ioEngineName, long capacity) -316 throws IOException { -317if (ioEngineName.startsWith("file:") || ioEngineName.startsWith("files:")) { -318 // In order to make the usage simple, we only need the prefix 'files:' in -319 // document whether one or multiple file(s), but also support 'file:' for -320 // the compatibility -321 String[] filePaths = ioEngineName.substring(ioEngineName.indexOf(":") + 1) -322 .split(FileIOEngine.FILE_DELIMITER); -323 return new FileIOEngine(capacity, filePaths); -324} else if (ioEngineName.startsWith("offheap")) { -325 return new ByteBufferIOEngine(capacity, true); -326} else if (ioEngineName.startsWith("heap")) { -327 return new ByteBufferIOEngine(capacity, false); -328} else if (ioEngineName.startsWith("mmap:")) { -329 return new FileMmapEngine(ioEngineName.substring(5), capacity); -330} else { -331 throw new IllegalArgumentException( -332 "Don't understand io engine name for cache - prefix with file:, heap or offheap"); -333} -334 } -335 -336 /** -337 * Cache the block with the specified name and buffer. -338 * @param cacheKey block's cache key -339 * @param buf block buffer -340 */ -341 @Override -342 public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) { -343cacheBlock(cacheKey, buf, false, false); -344 } -345 -346 /** -347 * Cache the block with the specified name and buffer. -348 * @param cacheKey block's cache key -349 * @param cachedItem block buffer -350 * @param inMemory if block is in-memory -351 * @param cacheDataInL1 -352 */ -353 @Override -354 public void cacheBlock(BlockCacheKey cacheKey, Cacheable cachedItem, boolean inMemory, -355 final boolean cacheDataInL1) { -356cacheBlockWithWait(cacheKey, cachedItem, inMemory, wait_when_cache); -357 } -358 -359 /** -360 * Cache the block to ramCache -361 * @param cacheKey block's cache key -362 * @param cachedItem block buffer -363 * @param inMemory if block is in-memory -364 * @param wait if true, blocking wait when queue is full -365 */ -366 public void cacheBlockWithWait(BlockCacheKey cacheKey, Cacheable cachedItem, boolean inMemory, -367 boolean wait) { -368if (LOG.isTraceEnabled()) LOG.trace("Caching key=" + cacheKey + ", item=" + cachedItem); -369if (!cacheEnabled) { -370 return; -371} -372 -373if (backingMap.containsKey(cacheKey)) { -374 return; -375} -376 -377/* -378 * Stuff the entry into the RAM cache so it can get drained to the persistent store -379 */ -380RAMQueueEntry re = -381new RAMQueueEntry(cacheKey, cachedItem, accessCount.incrementAndGet(), inMemory); -382if (ramCache.putIfAbsent(cacheKey, re) != null) { -383 return; -384} -385int queueNum = (cacheKey.hashCode() 0x7FFF) % writerQueues.size(); -386BlockingQueueRAMQueueEntry bq = writerQueues.get(queueNum); -387boolean successfulAddition = false; -388if (wait) { -389 try { -390successfulAddition = bq.offer(re, DEFAULT_CACHE_WAIT_TIME, TimeUnit.MILLISECONDS); -391 } catch (InterruptedException e) { -392 Thread.currentThread().interrupt(); -393 } -394} else { -395 successfulAddition = bq.offer(re); -396} -397if (!successfulAddition) { -398 ramCache.remove(cacheKey); -399 cacheStats.failInsert(); -400} else { -401
[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca9f6925/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.EnableTableProcedureBiConsumer.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.EnableTableProcedureBiConsumer.html b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.EnableTableProcedureBiConsumer.html new file mode 100644 index 000..9b32517 --- /dev/null +++ b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.EnableTableProcedureBiConsumer.html @@ -0,0 +1,339 @@ +http://www.w3.org/TR/html4/loose.dtd;> + + + + + +RawAsyncHBaseAdmin.EnableTableProcedureBiConsumer (Apache HBase 3.0.0-SNAPSHOT API) + + + + + +var methods = {"i0":10}; +var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; +var altColor = "altColor"; +var rowColor = "rowColor"; +var tableTab = "tableTab"; +var activeTableTab = "activeTableTab"; + + +JavaScript is disabled on your browser. + + + + + +Skip navigation links + + + + +Overview +Package +Class +Use +Tree +Deprecated +Index +Help + + + + +PrevClass +NextClass + + +Frames +NoFrames + + +AllClasses + + + + + + + +Summary: +Nested| +Field| +Constr| +Method + + +Detail: +Field| +Constr| +Method + + + + + + + + +org.apache.hadoop.hbase.client +Class RawAsyncHBaseAdmin.EnableTableProcedureBiConsumer + + + +http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">java.lang.Object + + +org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.ProcedureBiConsumer + + +org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.TableProcedureBiConsumer + + +org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.EnableTableProcedureBiConsumer + + + + + + + + + + + +All Implemented Interfaces: +http://docs.oracle.com/javase/8/docs/api/java/util/function/BiConsumer.html?is-external=true; title="class or interface in java.util.function">BiConsumerhttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true; title="class or interface in java.lang">Void,http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true; title="class or interface in java.lang">Throwable + + +Enclosing class: +RawAsyncHBaseAdmin + + + +private class RawAsyncHBaseAdmin.EnableTableProcedureBiConsumer +extends RawAsyncHBaseAdmin.TableProcedureBiConsumer + + + + + + + + + + + +Field Summary + + + + +Fields inherited from classorg.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.TableProcedureBiConsumer +tableName + + + + + +Fields inherited from classorg.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.ProcedureBiConsumer +admin + + + + + + + + +Constructor Summary + +Constructors + +Constructor and Description + + +EnableTableProcedureBiConsumer(AsyncAdminadmin, + TableNametableName) + + + + + + + + + +Method Summary + +All MethodsInstance MethodsConcrete Methods + +Modifier and Type +Method and Description + + +(package private) http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String +getOperationType() + + + + + + +Methods inherited from classorg.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.TableProcedureBiConsumer +getDescription, onError, onFinished + + + + + +Methods inherited from classorg.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.ProcedureBiConsumer +accept + + + + + +Methods inherited from classjava.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object +http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--; title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-; title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--; title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--; title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--; title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--; title="class or interface in java.lang">notify,
[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8e3b63ca/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.TableOperator.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.TableOperator.html b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.TableOperator.html index dc12c09..82506d2 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.TableOperator.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.TableOperator.html @@ -54,2261 +54,2259 @@ 046import org.apache.commons.io.IOUtils; 047import org.apache.commons.logging.Log; 048import org.apache.commons.logging.LogFactory; -049import org.apache.directory.api.util.OptionalComponentsMonitor; -050import org.apache.hadoop.hbase.HRegionInfo; -051import org.apache.hadoop.hbase.HRegionLocation; -052import org.apache.hadoop.hbase.MetaTableAccessor; -053import org.apache.hadoop.hbase.MetaTableAccessor.QueryType; -054import org.apache.hadoop.hbase.NotServingRegionException; -055import org.apache.hadoop.hbase.ProcedureInfo; -056import org.apache.hadoop.hbase.RegionLocations; -057import org.apache.hadoop.hbase.ServerName; -058import org.apache.hadoop.hbase.NamespaceDescriptor; -059import org.apache.hadoop.hbase.HConstants; -060import org.apache.hadoop.hbase.TableExistsException; -061import org.apache.hadoop.hbase.TableName; -062import org.apache.hadoop.hbase.AsyncMetaTableAccessor; -063import org.apache.hadoop.hbase.TableNotDisabledException; -064import org.apache.hadoop.hbase.TableNotEnabledException; -065import org.apache.hadoop.hbase.TableNotFoundException; -066import org.apache.hadoop.hbase.UnknownRegionException; -067import org.apache.hadoop.hbase.classification.InterfaceAudience; -068import org.apache.hadoop.hbase.classification.InterfaceStability; -069import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder; -070import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder; -071import org.apache.hadoop.hbase.client.Scan.ReadType; -072import org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper; -073import org.apache.hadoop.hbase.client.replication.TableCFs; -074import org.apache.hadoop.hbase.exceptions.DeserializationException; -075import org.apache.hadoop.hbase.ipc.HBaseRpcController; -076import org.apache.hadoop.hbase.quotas.QuotaFilter; -077import org.apache.hadoop.hbase.quotas.QuotaSettings; -078import org.apache.hadoop.hbase.quotas.QuotaTableUtil; -079import org.apache.hadoop.hbase.replication.ReplicationException; -080import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; -081import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; -082import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback; -083import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -084import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; -085import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; -086import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; -087import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; -088import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; -089import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse; -090import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; -091import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse; -092import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest; -093import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse; -094import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest; -095import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse; -096import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription; -097import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema; -098import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest; -099import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse; -100import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest; -101import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse; -102import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest; -103import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse; -104import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest; -105import
[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aecb1286/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.QueryType.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.QueryType.html b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.QueryType.html index e65748d..91a0ffa 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.QueryType.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.QueryType.html @@ -372,1874 +372,1873 @@ 364 * is stored in the name, so the returned object should only be used for the fields 365 * in the regionName. 366 */ -367 protected static HRegionInfo parseRegionInfoFromRegionName(byte[] regionName) -368throws IOException { -369byte[][] fields = HRegionInfo.parseRegionName(regionName); -370long regionId = Long.parseLong(Bytes.toString(fields[2])); -371int replicaId = fields.length 3 ? Integer.parseInt(Bytes.toString(fields[3]), 16) : 0; -372return new HRegionInfo( -373 TableName.valueOf(fields[0]), fields[1], fields[1], false, regionId, replicaId); -374 } -375 -376 /** -377 * Gets the result in hbase:meta for the specified region. -378 * @param connection connection we're using -379 * @param regionName region we're looking for -380 * @return result of the specified region -381 * @throws IOException -382 */ -383 public static Result getRegionResult(Connection connection, -384 byte[] regionName) throws IOException { -385Get get = new Get(regionName); -386 get.addFamily(HConstants.CATALOG_FAMILY); -387return get(getMetaHTable(connection), get); -388 } -389 -390 /** -391 * Get regions from the merge qualifier of the specified merged region -392 * @return null if it doesn't contain merge qualifier, else two merge regions -393 * @throws IOException -394 */ -395 @Nullable -396 public static PairHRegionInfo, HRegionInfo getRegionsFromMergeQualifier( -397 Connection connection, byte[] regionName) throws IOException { -398Result result = getRegionResult(connection, regionName); -399HRegionInfo mergeA = getHRegionInfo(result, HConstants.MERGEA_QUALIFIER); -400HRegionInfo mergeB = getHRegionInfo(result, HConstants.MERGEB_QUALIFIER); -401if (mergeA == null mergeB == null) { -402 return null; -403} -404return new Pair(mergeA, mergeB); -405 } -406 -407 /** -408 * Checks if the specified table exists. Looks at the hbase:meta table hosted on -409 * the specified server. -410 * @param connection connection we're using -411 * @param tableName table to check -412 * @return true if the table exists in meta, false if not -413 * @throws IOException -414 */ -415 public static boolean tableExists(Connection connection, -416 final TableName tableName) -417 throws IOException { -418// Catalog tables always exist. -419return tableName.equals(TableName.META_TABLE_NAME) -420|| getTableState(connection, tableName) != null; -421 } -422 -423 /** -424 * Lists all of the regions currently in META. -425 * -426 * @param connection to connect with -427 * @param excludeOfflinedSplitParents False if we are to include offlined/splitparents regions, -428 * true and we'll leave out offlined regions from returned list -429 * @return List of all user-space regions. -430 * @throws IOException -431 */ -432 @VisibleForTesting -433 public static ListHRegionInfo getAllRegions(Connection connection, -434 boolean excludeOfflinedSplitParents) -435 throws IOException { -436ListPairHRegionInfo, ServerName result; -437 -438result = getTableRegionsAndLocations(connection, null, -439excludeOfflinedSplitParents); -440 -441return getListOfHRegionInfos(result); -442 -443 } -444 -445 /** -446 * Gets all of the regions of the specified table. Do not use this method -447 * to get meta table regions, use methods in MetaTableLocator instead. -448 * @param connection connection we're using -449 * @param tableName table we're looking for -450 * @return Ordered list of {@link HRegionInfo}. -451 * @throws IOException -452 */ -453 public static ListHRegionInfo getTableRegions(Connection connection, TableName tableName) -454 throws IOException { -455return getTableRegions(connection, tableName, false); -456 } -457 -458 /** -459 * Gets all of the regions of the specified table. Do not use this method -460 * to get meta table regions, use methods in MetaTableLocator instead. -461 * @param connection connection we're using -462 * @param tableName table we're looking for -463 * @param excludeOfflinedSplitParents If true, do not include offlined split -464 * parents in the return. -465 * @return Ordered list of {@link HRegionInfo}. -466 * @throws IOException
[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a719cd00/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCostFunction.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCostFunction.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCostFunction.html new file mode 100644 index 000..c895448 --- /dev/null +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCostFunction.html @@ -0,0 +1,1779 @@ +http://www.w3.org/TR/html4/loose.dtd;> + + +Source code + + + + +001/** +002 * Licensed to the Apache Software Foundation (ASF) under one +003 * or more contributor license agreements. See the NOTICE file +004 * distributed with this work for additional information +005 * regarding copyright ownership. The ASF licenses this file +006 * to you under the Apache License, Version 2.0 (the +007 * "License"); you may not use this file except in compliance +008 * with the License. You may obtain a copy of the License at +009 * +010 * http://www.apache.org/licenses/LICENSE-2.0 +011 * +012 * Unless required by applicable law or agreed to in writing, software +013 * distributed under the License is distributed on an "AS IS" BASIS, +014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +015 * See the License for the specific language governing permissions and +016 * limitations under the License. +017 */ +018package org.apache.hadoop.hbase.master.balancer; +019 +020import java.util.ArrayDeque; +021import java.util.ArrayList; +022import java.util.Arrays; +023import java.util.Collection; +024import java.util.Collections; +025import java.util.Deque; +026import java.util.HashMap; +027import java.util.LinkedList; +028import java.util.List; +029import java.util.Map; +030import java.util.Map.Entry; +031import java.util.Random; +032 +033import org.apache.commons.logging.Log; +034import org.apache.commons.logging.LogFactory; +035import org.apache.hadoop.conf.Configuration; +036import org.apache.hadoop.hbase.ClusterStatus; +037import org.apache.hadoop.hbase.HBaseInterfaceAudience; +038import org.apache.hadoop.hbase.HConstants; +039import org.apache.hadoop.hbase.HRegionInfo; +040import org.apache.hadoop.hbase.RegionLoad; +041import org.apache.hadoop.hbase.ServerLoad; +042import org.apache.hadoop.hbase.ServerName; +043import org.apache.hadoop.hbase.TableName; +044import org.apache.hadoop.hbase.classification.InterfaceAudience; +045import org.apache.hadoop.hbase.master.MasterServices; +046import org.apache.hadoop.hbase.master.RegionPlan; +047import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action; +048import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type; +049import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.AssignRegionAction; +050import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType; +051import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction; +052import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction; +053import org.apache.hadoop.hbase.util.Bytes; +054import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +055 +056import com.google.common.base.Optional; +057import com.google.common.collect.Lists; +058 +059/** +060 * pThis is a best effort load balancer. Given a Cost function F(C) =gt; x It will +061 * randomly try and mutate the cluster to Cprime. If F(Cprime) lt; F(C) then the +062 * new cluster state becomes the plan. It includes costs functions to compute the cost of:/p +063 * ul +064 * liRegion Load/li +065 * liTable Load/li +066 * liData Locality/li +067 * liMemstore Sizes/li +068 * liStorefile Sizes/li +069 * /ul +070 * +071 * +072 * pEvery cost function returns a number between 0 and 1 inclusive; where 0 is the lowest cost +073 * best solution, and 1 is the highest possible cost and the worst solution. The computed costs are +074 * scaled by their respective multipliers:/p +075 * +076 * ul +077 * lihbase.master.balancer.stochastic.regionLoadCost/li +078 * lihbase.master.balancer.stochastic.moveCost/li +079 * lihbase.master.balancer.stochastic.tableLoadCost/li +080 * lihbase.master.balancer.stochastic.localityCost/li +081 * lihbase.master.balancer.stochastic.memstoreSizeCost/li +082 * lihbase.master.balancer.stochastic.storefileSizeCost/li +083 * /ul +084 * +085 * pIn addition to the above configurations, the balancer can be tuned by the following +086 * configuration values:/p +087 * ul +088 * lihbase.master.balancer.stochastic.maxMoveRegions which +089 * controls what the max number of regions that can be moved in a single invocation of this +090 * balancer./li
[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.NamespaceQuotasVisitor.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.NamespaceQuotasVisitor.html b/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.NamespaceQuotasVisitor.html index 509b93c..3c6f9b8 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.NamespaceQuotasVisitor.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaTableUtil.NamespaceQuotasVisitor.html @@ -53,606 +53,717 @@ 045import org.apache.hadoop.hbase.client.ResultScanner; 046import org.apache.hadoop.hbase.client.Scan; 047import org.apache.hadoop.hbase.client.Table; -048import org.apache.hadoop.hbase.filter.CompareFilter; -049import org.apache.hadoop.hbase.filter.Filter; -050import org.apache.hadoop.hbase.filter.FilterList; -051import org.apache.hadoop.hbase.filter.QualifierFilter; -052import org.apache.hadoop.hbase.filter.RegexStringComparator; -053import org.apache.hadoop.hbase.filter.RowFilter; -054import org.apache.hadoop.hbase.protobuf.ProtobufMagic; -055import org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString; -056import org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException; -057import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations; -058import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -059import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; -060import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos; -061import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse; -062import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse; -063import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse; -064import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse.TableQuotaSnapshot; -065import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse.RegionSizes; -066import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas; -067import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota; -068import org.apache.hadoop.hbase.util.Bytes; -069import org.apache.hadoop.hbase.util.Strings; -070 -071/** -072 * Helper class to interact with the quota table. -073 * pre -074 * ROW-KEY FAM/QUAL DATA -075 * n.lt;namespacegt; q:s lt;global-quotasgt; -076 * t.lt;namespacegt; u:p lt;namespace-quota policygt; -077 * t.lt;tablegt; q:s lt;global-quotasgt; -078 * t.lt;tablegt; u:p lt;table-quota policygt; -079 * u.lt;usergt; q:s lt;global-quotasgt; -080 * u.lt;usergt; q:s.lt;tablegt; lt;table-quotasgt; -081 * u.lt;usergt; q:s.lt;nsgt;: lt;namespace-quotasgt; -082 * /pre -083 */ -084@InterfaceAudience.Private -085@InterfaceStability.Evolving -086public class QuotaTableUtil { -087 private static final Log LOG = LogFactory.getLog(QuotaTableUtil.class); -088 -089 /** System table for quotas */ -090 public static final TableName QUOTA_TABLE_NAME = -091 TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "quota"); -092 -093 protected static final byte[] QUOTA_FAMILY_INFO = Bytes.toBytes("q"); -094 protected static final byte[] QUOTA_FAMILY_USAGE = Bytes.toBytes("u"); -095 protected static final byte[] QUOTA_QUALIFIER_SETTINGS = Bytes.toBytes("s"); -096 protected static final byte[] QUOTA_QUALIFIER_SETTINGS_PREFIX = Bytes.toBytes("s."); -097 protected static final byte[] QUOTA_QUALIFIER_POLICY = Bytes.toBytes("p"); -098 protected static final String QUOTA_POLICY_COLUMN = -099 Bytes.toString(QUOTA_FAMILY_USAGE) + ":" + Bytes.toString(QUOTA_QUALIFIER_POLICY); -100 protected static final byte[] QUOTA_USER_ROW_KEY_PREFIX = Bytes.toBytes("u."); -101 protected static final byte[] QUOTA_TABLE_ROW_KEY_PREFIX = Bytes.toBytes("t."); -102 protected static final byte[] QUOTA_NAMESPACE_ROW_KEY_PREFIX = Bytes.toBytes("n."); -103 -104 /* = -105 * Quota "settings" helpers -106 */ -107 public static Quotas getTableQuota(final Connection connection, final TableName table) -108 throws IOException { -109return getQuotas(connection, getTableRowKey(table)); -110 } -111 -112 public static Quotas getNamespaceQuota(final Connection connection, final String namespace) -113 throws IOException { -114return getQuotas(connection, getNamespaceRowKey(namespace)); -115 } -116 -117 public static Quotas getUserQuota(final Connection connection, final String user) -118 throws IOException { -119
[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/77a552c4/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html b/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html index 4017927..708cf4c 100644 --- a/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html +++ b/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html @@ -309,8 +309,8 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true; title="class or interface in java.lang">Void -addColumnFamily(TableNametableName, - HColumnDescriptorcolumnFamily) +addColumnFamily(TableNametableName, + ColumnFamilyDescriptorcolumnFamily) Add a column family to an existing table. @@ -926,8 +926,8 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true; title="class or interface in java.lang">Void -modifyColumnFamily(TableNametableName, - HColumnDescriptorcolumnFamily) +modifyColumnFamily(TableNametableName, + ColumnFamilyDescriptorcolumnFamily) Modify an existing column family on a table. @@ -1852,19 +1852,19 @@ implements + addColumnFamily publichttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true; title="class or interface in java.lang">VoidaddColumnFamily(TableNametableName, - HColumnDescriptorcolumnFamily) -Description copied from interface:AsyncAdmin + ColumnFamilyDescriptorcolumnFamily) +Description copied from interface:AsyncAdmin Add a column family to an existing table. Specified by: -addColumnFamilyin interfaceAsyncAdmin +addColumnFamilyin interfaceAsyncAdmin Parameters: tableName - name of the table to add column family to columnFamily - column family descriptor of column family to be added @@ -1890,19 +1890,19 @@ implements + modifyColumnFamily publichttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true; title="class or interface in java.lang">VoidmodifyColumnFamily(TableNametableName, - HColumnDescriptorcolumnFamily) -Description copied from interface:AsyncAdmin + ColumnFamilyDescriptorcolumnFamily) +Description copied from interface:AsyncAdmin Modify an existing column family on a table. Specified by: -modifyColumnFamilyin interfaceAsyncAdmin +modifyColumnFamilyin interfaceAsyncAdmin Parameters: tableName - name of table columnFamily - new column family descriptor to use http://git-wip-us.apache.org/repos/asf/hbase-site/blob/77a552c4/devapidocs/org/apache/hadoop/hbase/client/ClusterStatusListener.MulticastListener.ClusterStatusHandler.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/ClusterStatusListener.MulticastListener.ClusterStatusHandler.html b/devapidocs/org/apache/hadoop/hbase/client/ClusterStatusListener.MulticastListener.ClusterStatusHandler.html index b9ea258..d44aa44 100644 --- a/devapidocs/org/apache/hadoop/hbase/client/ClusterStatusListener.MulticastListener.ClusterStatusHandler.html +++ b/devapidocs/org/apache/hadoop/hbase/client/ClusterStatusListener.MulticastListener.ClusterStatusHandler.html @@ -50,7 +50,7 @@ var activeTableTab = "activeTableTab"; PrevClass -NextClass +NextClass Frames @@ -351,7 +351,7 @@ extends io.netty.channel.SimpleChannelInboundHandlerio.netty.channel.socket. PrevClass -NextClass +NextClass Frames http://git-wip-us.apache.org/repos/asf/hbase-site/blob/77a552c4/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.html b/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.html new file mode 100644 index 000..7070007 --- /dev/null +++ b/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.html @@ -0,0 +1,881 @@
[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b44796ef/devapidocs/index-all.html -- diff --git a/devapidocs/index-all.html b/devapidocs/index-all.html index 377894e..f6074e2 100644 --- a/devapidocs/index-all.html +++ b/devapidocs/index-all.html @@ -4,7 +4,7 @@ -Index (Apache HBase 2.0.0-SNAPSHOT API) +Index (Apache HBase 3.0.0-SNAPSHOT API) @@ -12,7 +12,7 @@ -var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":6,"i7":6}; -var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],4:["t3","Abstract Methods"],8:["t4","Concrete Methods"]}; -var altColor = "altColor"; -var rowColor = "rowColor"; -var tableTab = "tableTab"; -var activeTableTab = "activeTableTab"; - - -JavaScript is disabled on your browser. - - - - - -Skip navigation links - - - - -Overview -Package -Class -Use -Tree -Deprecated -Index -Help - - - - -PrevClass -NextClass - - -Frames -NoFrames - - -AllClasses - - - - - - - -Summary: -Nested| -Field| -Constr| -Method - - -Detail: -Field| -Constr| -Method - - - - - - - - -org.apache.hadoop.hbase.master -Class BulkAssigner - - - -http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">java.lang.Object - - -org.apache.hadoop.hbase.master.BulkAssigner - - - - - - - -Direct Known Subclasses: -BulkReOpen, DisableTableProcedure.BulkDisabler, GeneralBulkAssigner - - - -@InterfaceAudience.Private -public abstract class BulkAssigner -extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object -Base class used bulk assigning and unassigning regions. - Encapsulates a fixed size thread pool of executors to run assignment/unassignment. - Implement populatePool(java.util.concurrent.ExecutorService) and - waitUntilDone(long). The default implementation of - the getUncaughtExceptionHandler() is to abort the hosting - Server. - - - - - - - - - - - -Field Summary - -Fields - -Modifier and Type -Field and Description - - -protected Server -server - - - - - - - - - -Constructor Summary - -Constructors - -Constructor and Description - - -BulkAssigner(Serverserver) - - - - - - - - - -Method Summary - -All MethodsInstance MethodsAbstract MethodsConcrete Methods - -Modifier and Type -Method and Description - - -boolean -bulkAssign() - - -boolean -bulkAssign(booleansync) -Run the bulk assign. - - - -protected int -getThreadCount() - - -protected http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String -getThreadNamePrefix() - - -protected long -getTimeoutOnRIT() - - -protected http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.UncaughtExceptionHandler.html?is-external=true; title="class or interface in java.lang">Thread.UncaughtExceptionHandler -getUncaughtExceptionHandler() - - -protected abstract void -populatePool(http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true; title="class or interface in java.util.concurrent">ExecutorServicepool) - - -protected abstract boolean -waitUntilDone(longtimeout) -Wait until bulk assign is done. - - - - - - - -Methods inherited from classjava.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object -http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--; title="class or interface in java.lang">clone,
[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dab57116/devapidocs/org/apache/hadoop/hbase/ipc/NettyRpcServer.MessageEncoder.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/ipc/NettyRpcServer.MessageEncoder.html b/devapidocs/org/apache/hadoop/hbase/ipc/NettyRpcServer.MessageEncoder.html deleted file mode 100644 index 0f15520..000 --- a/devapidocs/org/apache/hadoop/hbase/ipc/NettyRpcServer.MessageEncoder.html +++ /dev/null @@ -1,338 +0,0 @@ -http://www.w3.org/TR/html4/loose.dtd;> - - - - - -NettyRpcServer.MessageEncoder (Apache HBase 2.0.0-SNAPSHOT API) - - - - - -var methods = {"i0":10}; -var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; -var altColor = "altColor"; -var rowColor = "rowColor"; -var tableTab = "tableTab"; -var activeTableTab = "activeTableTab"; - - -JavaScript is disabled on your browser. - - - - - -Skip navigation links - - - - -Overview -Package -Class -Use -Tree -Deprecated -Index -Help - - - - -PrevClass -NextClass - - -Frames -NoFrames - - -AllClasses - - - - - - - -Summary: -Nested| -Field| -Constr| -Method - - -Detail: -Field| -Constr| -Method - - - - - - - - -org.apache.hadoop.hbase.ipc -Class NettyRpcServer.MessageEncoder - - - -http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">java.lang.Object - - -io.netty.channel.ChannelHandlerAdapter - - -io.netty.channel.ChannelOutboundHandlerAdapter - - -org.apache.hadoop.hbase.ipc.NettyRpcServer.MessageEncoder - - - - - - - - - - - -All Implemented Interfaces: -io.netty.channel.ChannelHandler, io.netty.channel.ChannelOutboundHandler - - -Enclosing class: -NettyRpcServer - - - -private class NettyRpcServer.MessageEncoder -extends io.netty.channel.ChannelOutboundHandlerAdapter - - - - - - - - - - - -Nested Class Summary - - - - -Nested classes/interfaces inherited from interfaceio.netty.channel.ChannelHandler -io.netty.channel.ChannelHandler.Sharable - - - - - - - - -Constructor Summary - -Constructors - -Modifier -Constructor and Description - - -private -MessageEncoder() - - - - - - - - - -Method Summary - -All MethodsInstance MethodsConcrete Methods - -Modifier and Type -Method and Description - - -void -write(io.netty.channel.ChannelHandlerContextctx, - http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Objectmsg, - io.netty.channel.ChannelPromisepromise) - - - - - - -Methods inherited from classio.netty.channel.ChannelOutboundHandlerAdapter -bind, close, connect, deregister, disconnect, flush, read - - - - - -Methods inherited from classio.netty.channel.ChannelHandlerAdapter -exceptionCaught, handlerAdded, handlerRemoved, isSharable - - - - - -Methods inherited from classjava.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object -http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--; title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-; title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--; title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--; title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--; title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--; title="class or interface in java.lang">notify, http://docs.oracle.com/javase/8/docs/api/java/lang /Object.html?is-external=true#notifyAll--" title="class or interface in java.lang">notifyAll, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--; title="class or interface in java.lang">toString, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--; title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-; title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-int-; title="class
[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.ImplData.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.ImplData.html b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.ImplData.html index 7a0715f..b947231 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.ImplData.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.ImplData.html @@ -33,10 +33,10 @@ 025 requiredArguments = { 026@org.jamon.annotations.Argument(name = "regionServer", type = "HRegionServer")}, 027 optionalArguments = { -028@org.jamon.annotations.Argument(name = "bcv", type = "String"), +028@org.jamon.annotations.Argument(name = "format", type = "String"), 029@org.jamon.annotations.Argument(name = "bcn", type = "String"), 030@org.jamon.annotations.Argument(name = "filter", type = "String"), -031@org.jamon.annotations.Argument(name = "format", type = "String")}) +031@org.jamon.annotations.Argument(name = "bcv", type = "String")}) 032public class RSStatusTmpl 033 extends org.jamon.AbstractTemplateProxy 034{ @@ -77,23 +77,23 @@ 069 return m_regionServer; 070} 071private HRegionServer m_regionServer; -072// 24, 1 -073public void setBcv(String bcv) +072// 22, 1 +073public void setFormat(String format) 074{ -075 // 24, 1 -076 m_bcv = bcv; -077 m_bcv__IsNotDefault = true; +075 // 22, 1 +076 m_format = format; +077 m_format__IsNotDefault = true; 078} -079public String getBcv() +079public String getFormat() 080{ -081 return m_bcv; +081 return m_format; 082} -083private String m_bcv; -084public boolean getBcv__IsNotDefault() +083private String m_format; +084public boolean getFormat__IsNotDefault() 085{ -086 return m_bcv__IsNotDefault; +086 return m_format__IsNotDefault; 087} -088private boolean m_bcv__IsNotDefault; +088private boolean m_format__IsNotDefault; 089// 23, 1 090public void setBcn(String bcn) 091{ @@ -128,23 +128,23 @@ 120 return m_filter__IsNotDefault; 121} 122private boolean m_filter__IsNotDefault; -123// 22, 1 -124public void setFormat(String format) +123// 24, 1 +124public void setBcv(String bcv) 125{ -126 // 22, 1 -127 m_format = format; -128 m_format__IsNotDefault = true; +126 // 24, 1 +127 m_bcv = bcv; +128 m_bcv__IsNotDefault = true; 129} -130public String getFormat() +130public String getBcv() 131{ -132 return m_format; +132 return m_bcv; 133} -134private String m_format; -135public boolean getFormat__IsNotDefault() +134private String m_bcv; +135public boolean getBcv__IsNotDefault() 136{ -137 return m_format__IsNotDefault; +137 return m_bcv__IsNotDefault; 138} -139private boolean m_format__IsNotDefault; +139private boolean m_bcv__IsNotDefault; 140 } 141 @Override 142 protected org.jamon.AbstractTemplateProxy.ImplData makeImplData() @@ -156,10 +156,10 @@ 148return (ImplData) super.getImplData(); 149 } 150 -151 protected String bcv; -152 public final org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl setBcv(String p_bcv) +151 protected String format; +152 public final org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl setFormat(String p_format) 153 { -154(getImplData()).setBcv(p_bcv); +154 (getImplData()).setFormat(p_format); 155return this; 156 } 157 @@ -177,10 +177,10 @@ 169return this; 170 } 171 -172 protected String format; -173 public final org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl setFormat(String p_format) +172 protected String bcv; +173 public final org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl setBcv(String p_bcv) 174 { -175 (getImplData()).setFormat(p_format); +175(getImplData()).setBcv(p_bcv); 176return this; 177 } 178 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c635e71b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.Intf.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.Intf.html b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.Intf.html index 7a0715f..b947231 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.Intf.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.Intf.html @@ -33,10 +33,10 @@ 025 requiredArguments = { 026@org.jamon.annotations.Argument(name = "regionServer", type =
[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b4cf63f/devapidocs/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.html b/devapidocs/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.html index 34f67a8..288d77b 100644 --- a/devapidocs/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.html +++ b/devapidocs/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.html @@ -244,7 +244,7 @@ implements MasterObserver -postAbortProcedure, postAddColumn, postAddColumnFamily, postAddColumnHandler, postAddReplicationPeer, postAddRSGroup, postAssign, postBalance, postBalanceRSGroup, postBalanceSwitch, postCloneSnapshot, postCompletedAddColumnFamilyAction, postCompletedCreateTableAction, postCompletedDeleteColumnFamilyAction, postCompletedDeleteTableAction, postCompletedDisableTableAction, postCompletedEnableTableAction, postCompletedMergeRegionsAction, postCompletedModifyColumnFamilyAction, postCompletedModifyTableAction, postCompletedSplitRegionAction, postCompletedTruncateTableAction, postCreateNamespace, postCreateTable, postCreateTableHandler, postDeleteColumn, postDeleteColumnFamily, postDeleteColumnHandler, postDeleteNamespace, postDeleteSnapshot, postDeleteTable, postDeleteTableHandler, postDisableReplicationPeer, postDisableTable, postDisableTableHandler, postDispatchMerge, postEnableReplicationPeer, postEnableTable, postEnableTableHandler, postGetNamespaceDescriptor, postGetReplicationPeerConfig, postGetTableDescriptors, postGetTableNames, postListLocks, postListNamespaceDescriptors, postListProcedures, postListReplicationPeers, postListSnapshot, postLockHeartbeat, postMergeRegions, postMergeRegionsCommitAction, postModifyColumn, postModifyColumnFamily, postModifyColumnHandler, postModifyNamespace, postModifyTable, postModifyTableHandler, postMove, postMoveServers, postMoveServersAndTables, postMoveTables, postRegionOffline, postRemoveReplicationPeer, postRemoveRSGroup, postRequestLock, postRestoreSnapshot, postRollBackMergeRegionsAction, postRollBackSplitRegionAction, postSetNamespaceQuota, postSetSplitOrMergeEnabled, postSetTableQuota, postSetUserQuota, postSetUserQuota, postSetUserQuota, postSnapshot, postStartMaster, postTableFlush, postTruncateTable, postTruncateTableHandler, postUnassign, postUpdateReplicationPeerConfig, preAbortProcedure, preAddColumn, preAddColumnFamily, preAddColumnFamilyAction, preAddColumnHandler, preAddReplicationPeer, preAddRSGroup, preAssign, preBalance, preBalanceRSGroup, preBalanceSwitch, preCloneSnapshot, preCreateNamespace, preCreateTableAction, preCreateTableHandler, preDeleteColumn, preDeleteColumnFamily, preDeleteColumnFamilyAction, preDeleteColumnHandler, preDeleteNam espace, preDeleteSnapshot, preDeleteTable, preDeleteTableAction, preDeleteTableHandler, preDisableReplicationPeer, preDisableTable, preDisableTableAction, preDisableTableHandler, preDispatchMerge, preEnableReplicationPeer, preEnableTable, preEnableTableAction, preEnableTableHandler, preGetNamespaceDescriptor, preGetReplicationPeerConfig, preGetTableDescriptors, preGetTableNames, preListLocks, preListNamespaceDescriptors, preListProcedures, preListReplicationPeers, preListSnapshot, p reLockHeartbeat, preMasterInitialization, preMergeRegions, preMergeRegionsAction, preMergeRegionsCommitAction, preModifyColumn, preModifyColumnFamily, preModifyColumnFamilyAction, preModifyColumnHandler, preModifyNamespace, preModifyTableAction, preModifyTableHandler, preMove, preMoveServers, preMoveServersAndTables, preMoveTables, preRegionOffline, pr eRemoveReplicationPeer, preRemoveRSGroup, preRequestLock, preRestoreSnapshot, preSetNamespaceQuota, preSetSplitOrMergeEnabled, preSetTableQuota, preSetUserQuota, preSetUserQuota, preSetUserQuota, preShutdown, preSnapshot, preSplitRegion, preSplitRegionAction, preSplitRegionAfterPONRAction, preSplitRegionBeforePONRAction, preStopMaster, preTableFlush, preTruncateTable, preTruncateTableAction, preTruncateTableHandler, preUnassign, preUpdateReplicationPeerConfig +postAbortProcedure, postAddColumn, postAddColumnFamily, postAddColumnHandler, postAddReplicationPeer, postAddRSGroup, postAssign, postBalance, postBalanceRSGroup, postBalanceSwitch, postCloneSnapshot, postCompletedAddColumnFamilyAction, postCompletedCreateTableAction, postCompletedDeleteColumnFamilyAction, postCompletedDeleteTableAction, postCompletedDisableTableAction, postCompletedEnableTableAction, postCompletedMergeRegionsAction, postCompletedModifyColumnFamilyAction, postCompletedModifyTableAction, postCompletedSplitRegionAction, postCompletedTruncateTableAction,
[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f55ebeaa/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.ByteBuffByteInput.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.ByteBuffByteInput.html b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.ByteBuffByteInput.html deleted file mode 100644 index 9e1c66c..000 --- a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.ByteBuffByteInput.html +++ /dev/null @@ -1,1689 +0,0 @@ -http://www.w3.org/TR/html4/loose.dtd;> - - -Source code - - - - -001/** -002 * Licensed to the Apache Software Foundation (ASF) under one -003 * or more contributor license agreements. See the NOTICE file -004 * distributed with this work for additional information -005 * regarding copyright ownership. The ASF licenses this file -006 * to you under the Apache License, Version 2.0 (the -007 * "License"); you may not use this file except in compliance -008 * with the License. You may obtain a copy of the License at -009 * -010 * http://www.apache.org/licenses/LICENSE-2.0 -011 * -012 * Unless required by applicable law or agreed to in writing, software -013 * distributed under the License is distributed on an "AS IS" BASIS, -014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -015 * See the License for the specific language governing permissions and -016 * limitations under the License. -017 */ -018 -019package org.apache.hadoop.hbase.ipc; -020 -021import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION; -022 -023import java.io.ByteArrayInputStream; -024import java.io.ByteArrayOutputStream; -025import java.io.Closeable; -026import java.io.DataOutputStream; -027import java.io.IOException; -028import java.net.InetAddress; -029import java.net.InetSocketAddress; -030import java.nio.ByteBuffer; -031import java.nio.channels.Channels; -032import java.nio.channels.GatheringByteChannel; -033import java.nio.channels.ReadableByteChannel; -034import java.nio.channels.WritableByteChannel; -035import java.security.GeneralSecurityException; -036import java.security.PrivilegedExceptionAction; -037import java.util.ArrayList; -038import java.util.HashMap; -039import java.util.List; -040import java.util.Map; -041import java.util.Properties; -042import java.util.concurrent.atomic.LongAdder; -043 -044import javax.security.sasl.Sasl; -045import javax.security.sasl.SaslException; -046import javax.security.sasl.SaslServer; -047 -048import org.apache.commons.crypto.cipher.CryptoCipherFactory; -049import org.apache.commons.crypto.random.CryptoRandom; -050import org.apache.commons.crypto.random.CryptoRandomFactory; -051import org.apache.commons.logging.Log; -052import org.apache.commons.logging.LogFactory; -053import org.apache.hadoop.conf.Configuration; -054import org.apache.hadoop.hbase.CallQueueTooBigException; -055import org.apache.hadoop.hbase.CellScanner; -056import org.apache.hadoop.hbase.DoNotRetryIOException; -057import org.apache.hadoop.hbase.HBaseInterfaceAudience; -058import org.apache.hadoop.hbase.HConstants; -059import org.apache.hadoop.hbase.Server; -060import org.apache.hadoop.hbase.classification.InterfaceAudience; -061import org.apache.hadoop.hbase.classification.InterfaceStability; -062import org.apache.hadoop.hbase.client.VersionInfoUtil; -063import org.apache.hadoop.hbase.codec.Codec; -064import org.apache.hadoop.hbase.conf.ConfigurationObserver; -065import org.apache.hadoop.hbase.exceptions.RequestTooBigException; -066import org.apache.hadoop.hbase.io.ByteBufferOutputStream; -067import org.apache.hadoop.hbase.io.ByteBufferPool; -068import org.apache.hadoop.hbase.io.crypto.aes.CryptoAES; -069import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler; -070import org.apache.hadoop.hbase.monitoring.TaskMonitor; -071import org.apache.hadoop.hbase.nio.ByteBuff; -072import org.apache.hadoop.hbase.nio.MultiByteBuff; -073import org.apache.hadoop.hbase.nio.SingleByteBuff; -074import org.apache.hadoop.hbase.regionserver.RSRpcServices; -075import org.apache.hadoop.hbase.security.AccessDeniedException; -076import org.apache.hadoop.hbase.security.AuthMethod; -077import org.apache.hadoop.hbase.security.HBaseSaslRpcServer; -078import org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslDigestCallbackHandler; -079import org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslGssCallbackHandler; -080import org.apache.hadoop.hbase.security.SaslStatus; -081import org.apache.hadoop.hbase.security.SaslUtil; -082import org.apache.hadoop.hbase.security.User; -083import org.apache.hadoop.hbase.security.UserProvider; -084import org.apache.hadoop.hbase.security.token.AuthenticationTokenSecretManager; -085import org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService; -086import org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteInput;
[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8e0a5167/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/TableBackupClient.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/TableBackupClient.html b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/TableBackupClient.html index d6d06f8..4e40739 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/TableBackupClient.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/TableBackupClient.html @@ -48,347 +48,433 @@ 040import org.apache.hadoop.hbase.classification.InterfaceAudience; 041import org.apache.hadoop.hbase.client.Admin; 042import org.apache.hadoop.hbase.client.Connection; -043import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -044import org.apache.hadoop.hbase.util.FSUtils; -045 -046/** -047 * Base class for backup operation. Concrete implementation for -048 * full and incremental backup are delegated to corresponding sub-classes: -049 * {@link FullTableBackupClient} and {@link IncrementalTableBackupClient} -050 * -051 */ -052@InterfaceAudience.Private -053public abstract class TableBackupClient { -054 private static final Log LOG = LogFactory.getLog(TableBackupClient.class); +043import org.apache.hadoop.hbase.client.SnapshotDescription; +044import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +045import org.apache.hadoop.hbase.util.FSUtils; +046 +047/** +048 * Base class for backup operation. Concrete implementation for +049 * full and incremental backup are delegated to corresponding sub-classes: +050 * {@link FullTableBackupClient} and {@link IncrementalTableBackupClient} +051 * +052 */ +053@InterfaceAudience.Private +054public abstract class TableBackupClient { 055 -056 protected Configuration conf; -057 protected Connection conn; -058 protected String backupId; -059 protected ListTableName tableList; -060 protected HashMapString, Long newTimestamps = null; -061 -062 protected BackupManager backupManager; -063 protected BackupInfo backupInfo; -064 -065 public TableBackupClient(final Connection conn, final String backupId, BackupRequest request) -066 throws IOException { -067if (request.getBackupType() == BackupType.FULL) { -068 backupManager = new BackupManager(conn, conn.getConfiguration()); -069} else { -070 backupManager = new IncrementalBackupManager(conn, conn.getConfiguration()); -071} -072this.backupId = backupId; -073this.tableList = request.getTableList(); -074this.conn = conn; -075this.conf = conn.getConfiguration(); -076backupInfo = -077 backupManager.createBackupInfo(backupId, request.getBackupType(), tableList, -078 request.getTargetRootDir(), request.getTotalTasks(), request.getBandwidth()); -079if (tableList == null || tableList.isEmpty()) { -080 this.tableList = new ArrayList(backupInfo.getTables()); -081} -082 } -083 -084 /** -085 * Begin the overall backup. -086 * @param backupInfo backup info -087 * @throws IOException exception -088 */ -089 protected void beginBackup(BackupManager backupManager, BackupInfo backupInfo) -090 throws IOException { -091 backupManager.setBackupInfo(backupInfo); -092// set the start timestamp of the overall backup -093long startTs = EnvironmentEdgeManager.currentTime(); -094backupInfo.setStartTs(startTs); -095// set overall backup status: ongoing -096 backupInfo.setState(BackupState.RUNNING); -097 backupInfo.setPhase(BackupPhase.REQUEST); -098LOG.info("Backup " + backupInfo.getBackupId() + " started at " + startTs + "."); -099 -100 backupManager.updateBackupInfo(backupInfo); -101if (LOG.isDebugEnabled()) { -102 LOG.debug("Backup session " + backupInfo.getBackupId() + " has been started."); -103} -104 } -105 -106 private String getMessage(Exception e) { -107String msg = e.getMessage(); -108if (msg == null || msg.equals("")) { -109 msg = e.getClass().getName(); -110} -111return msg; -112 } -113 -114 /** -115 * Delete HBase snapshot for backup. -116 * @param backupInfo backup info -117 * @throws Exception exception -118 */ -119 private void deleteSnapshot(final Connection conn, BackupInfo backupInfo, Configuration conf) -120 throws IOException { -121LOG.debug("Trying to delete snapshot for full backup."); -122for (String snapshotName : backupInfo.getSnapshotNames()) { -123 if (snapshotName == null) { -124continue; -125 } -126 LOG.debug("Trying to delete snapshot: " + snapshotName); -127 -128 try (Admin admin = conn.getAdmin();) { -129 admin.deleteSnapshot(snapshotName); -130 } catch (IOException ioe) { -131LOG.debug("when deleting snapshot " + snapshotName, ioe); -132 } -133 LOG.debug("Deleting the snapshot " + snapshotName + " for backup " +
[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/709b8fcc/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DisableTableFuture.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DisableTableFuture.html b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DisableTableFuture.html index f2c44db..6cf2fc8 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DisableTableFuture.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DisableTableFuture.html @@ -2581,7 +2581,7 @@ 2573try { 2574 // Restore snapshot 2575 get( -2576 internalRestoreSnapshotAsync(snapshotName, tableName, false), +2576 internalRestoreSnapshotAsync(snapshotName, tableName), 2577syncWaitTimeout, 2578TimeUnit.MILLISECONDS); 2579} catch (IOException e) { @@ -2590,7 +2590,7 @@ 2582 if (takeFailSafeSnapshot) { 2583try { 2584 get( -2585 internalRestoreSnapshotAsync(failSafeSnapshotSnapshotName, tableName, false), +2585 internalRestoreSnapshotAsync(failSafeSnapshotSnapshotName, tableName), 2586syncWaitTimeout, 2587TimeUnit.MILLISECONDS); 2588 String msg = "Restore snapshot=" + snapshotName + @@ -2633,7 +2633,7 @@ 2625 throw new TableNotDisabledException(tableName); 2626} 2627 -2628return internalRestoreSnapshotAsync(snapshotName, tableName, false); +2628return internalRestoreSnapshotAsync(snapshotName, tableName); 2629 } 2630 2631 @Override @@ -2643,1621 +2643,1614 @@ 2635 } 2636 2637 @Override -2638 public void cloneSnapshot(String snapshotName, TableName tableName, boolean restoreAcl) +2638 public void cloneSnapshot(final String snapshotName, final TableName tableName) 2639 throws IOException, TableExistsException, RestoreSnapshotException { 2640if (tableExists(tableName)) { 2641 throw new TableExistsException(tableName); 2642} 2643get( -2644 internalRestoreSnapshotAsync(snapshotName, tableName, restoreAcl), +2644 internalRestoreSnapshotAsync(snapshotName, tableName), 2645 Integer.MAX_VALUE, 2646 TimeUnit.MILLISECONDS); 2647 } 2648 2649 @Override -2650 public void cloneSnapshot(final String snapshotName, final TableName tableName) -2651 throws IOException, TableExistsException, RestoreSnapshotException { -2652cloneSnapshot(snapshotName, tableName, false); -2653 } -2654 -2655 @Override -2656 public FutureVoid cloneSnapshotAsync(final String snapshotName, final TableName tableName) -2657 throws IOException, TableExistsException { -2658if (tableExists(tableName)) { -2659 throw new TableExistsException(tableName); -2660} -2661return internalRestoreSnapshotAsync(snapshotName, tableName, false); -2662 } -2663 -2664 @Override -2665 public byte[] execProcedureWithRet(String signature, String instance, MapString, String props) -2666 throws IOException { -2667ProcedureDescription desc = ProtobufUtil.buildProcedureDescription(signature, instance, props); -2668final ExecProcedureRequest request = -2669 ExecProcedureRequest.newBuilder().setProcedure(desc).build(); -2670// run the procedure on the master -2671ExecProcedureResponse response = executeCallable( -2672 new MasterCallableExecProcedureResponse(getConnection(), getRpcControllerFactory()) { -2673@Override -2674protected ExecProcedureResponse rpcCall() throws Exception { -2675 return master.execProcedureWithRet(getRpcController(), request); -2676} -2677 }); -2678 -2679return response.hasReturnData() ? response.getReturnData().toByteArray() : null; -2680 } -2681 -2682 @Override -2683 public void execProcedure(String signature, String instance, MapString, String props) -2684 throws IOException { -2685ProcedureDescription desc = ProtobufUtil.buildProcedureDescription(signature, instance, props); -2686final ExecProcedureRequest request = -2687 ExecProcedureRequest.newBuilder().setProcedure(desc).build(); -2688// run the procedure on the master -2689ExecProcedureResponse response = executeCallable(new MasterCallableExecProcedureResponse( -2690getConnection(), getRpcControllerFactory()) { -2691 @Override -2692 protected ExecProcedureResponse rpcCall() throws Exception { -2693return master.execProcedure(getRpcController(), request); -2694 } -2695}); -2696 -2697long start = EnvironmentEdgeManager.currentTime(); -2698long max = response.getExpectedTimeout(); -2699long maxPauseTime = max / this.numRetries; -2700int tries = 0; -2701LOG.debug("Waiting a max of " + max + " ms for procedure '" + -2702signature + " : " + instance + "'' to complete. (max " + maxPauseTime + " ms
[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1241ee85/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFile.Comparators.GetSeqId.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFile.Comparators.GetSeqId.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFile.Comparators.GetSeqId.html index b01aa5a..8090868 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFile.Comparators.GetSeqId.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreFile.Comparators.GetSeqId.html @@ -597,215 +597,221 @@ 589return reader; 590 } 591 -592 public StoreFileScanner getStreamScanner(boolean canUseDropBehind, boolean cacheBlocks, -593 boolean pread, boolean isCompaction, long readPt, long scannerOrder, -594 boolean canOptimizeForNonNullColumn) throws IOException { -595return createStreamReader(canUseDropBehind).getStoreFileScanner( -596 cacheBlocks, pread, isCompaction, readPt, scannerOrder, canOptimizeForNonNullColumn); -597 } -598 -599 /** -600 * @return Current reader. Must call initReader first else returns null. -601 * @see #initReader() -602 */ -603 public StoreFileReader getReader() { -604return this.reader; -605 } -606 -607 /** -608 * @param evictOnClose whether to evict blocks belonging to this file -609 * @throws IOException -610 */ -611 public synchronized void closeReader(boolean evictOnClose) -612 throws IOException { -613if (this.reader != null) { -614 this.reader.close(evictOnClose); -615 this.reader = null; -616} -617 } -618 -619 /** -620 * Marks the status of the file as compactedAway. -621 */ -622 public void markCompactedAway() { -623this.compactedAway = true; -624 } -625 -626 /** -627 * Delete this file -628 * @throws IOException -629 */ -630 public void deleteReader() throws IOException { -631boolean evictOnClose = -632cacheConf != null? cacheConf.shouldEvictOnClose(): true; -633closeReader(evictOnClose); -634this.fs.delete(getPath(), true); -635 } -636 -637 @Override -638 public String toString() { -639return this.fileInfo.toString(); -640 } -641 -642 /** -643 * @return a length description of this StoreFile, suitable for debug output -644 */ -645 public String toStringDetailed() { -646StringBuilder sb = new StringBuilder(); -647 sb.append(this.getPath().toString()); -648sb.append(", isReference=").append(isReference()); -649sb.append(", isBulkLoadResult=").append(isBulkLoadResult()); -650if (isBulkLoadResult()) { -651 sb.append(", bulkLoadTS=").append(getBulkLoadTimestamp()); -652} else { -653 sb.append(", seqid=").append(getMaxSequenceId()); -654} -655sb.append(", majorCompaction=").append(isMajorCompaction()); -656 -657return sb.toString(); -658 } -659 -660 /** -661 * Gets whether to skip resetting the sequence id for cells. -662 * @param skipResetSeqId The byte array of boolean. -663 * @return Whether to skip resetting the sequence id. -664 */ -665 private boolean isSkipResetSeqId(byte[] skipResetSeqId) { -666if (skipResetSeqId != null skipResetSeqId.length == 1) { -667 return Bytes.toBoolean(skipResetSeqId); -668} -669return false; -670 } -671 -672 /** -673 * @param fs -674 * @param dir Directory to create file in. -675 * @return random filename inside passed codedir/code -676 */ -677 public static Path getUniqueFile(final FileSystem fs, final Path dir) -678 throws IOException { -679if (!fs.getFileStatus(dir).isDirectory()) { -680 throw new IOException("Expecting " + dir.toString() + -681" to be a directory"); -682} -683return new Path(dir, UUID.randomUUID().toString().replaceAll("-", "")); -684 } -685 -686 public Long getMinimumTimestamp() { -687return getReader().timeRange == null? null: getReader().timeRange.getMin(); -688 } -689 -690 public Long getMaximumTimestamp() { -691return getReader().timeRange == null? null: getReader().timeRange.getMax(); -692 } -693 -694 -695 /** -696 * Gets the approximate mid-point of this file that is optimal for use in splitting it. -697 * @param comparator Comparator used to compare KVs. -698 * @return The split point row, or null if splitting is not possible, or reader is null. -699 */ -700 byte[] getFileSplitPoint(CellComparator comparator) throws IOException { -701if (this.reader == null) { -702 LOG.warn("Storefile " + this + " Reader is null; cannot get split point"); -703 return null; -704} -705// Get first, last, and mid keys. Midkey is the key that starts block -706// in middle of hfile. Has column and timestamp. Need to return just -707// the row we want to split on as midkey. -708Cell midkey = this.reader.midkey(); -709if
[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dd7176bf/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AbortProcedureFuture.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AbortProcedureFuture.html b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AbortProcedureFuture.html index 8b22aa1..f2c44db 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AbortProcedureFuture.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AbortProcedureFuture.html @@ -100,4135 +100,4164 @@ 092import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; 093import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos; 094import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; -095import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; -096import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; -097import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; -098import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; -099import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest; -100import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse; -101import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest; -102import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse; -103import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest; -104import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest; -105import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; -106import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest; -107import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse; -108import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; -109import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair; -110import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription; -111import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; -112import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema; -113import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos; -114import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos; -115import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest; -116import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse; -117import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest; -118import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse; -119import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest; -120import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest; -121import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse; -122import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest; -123import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse; -124import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest; -125import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse; -126import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest; -127import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse; -128import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest; -129import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest; -130import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse; -131import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest; -132import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse; -133import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest; -134import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest; -135import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse; -136import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest; -137import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse; -138import
[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7ef4c5a9/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.BlockingServiceAndInterface.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.BlockingServiceAndInterface.html b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.BlockingServiceAndInterface.html index 5cc356a..9e1c66c 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.BlockingServiceAndInterface.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.BlockingServiceAndInterface.html @@ -70,2037 +70,1559 @@ 062import org.apache.hadoop.hbase.client.VersionInfoUtil; 063import org.apache.hadoop.hbase.codec.Codec; 064import org.apache.hadoop.hbase.conf.ConfigurationObserver; -065import org.apache.hadoop.hbase.exceptions.RegionMovedException; -066import org.apache.hadoop.hbase.exceptions.RequestTooBigException; -067import org.apache.hadoop.hbase.io.ByteBufferListOutputStream; -068import org.apache.hadoop.hbase.io.ByteBufferOutputStream; -069import org.apache.hadoop.hbase.io.ByteBufferPool; -070import org.apache.hadoop.hbase.io.crypto.aes.CryptoAES; -071import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler; -072import org.apache.hadoop.hbase.monitoring.TaskMonitor; -073import org.apache.hadoop.hbase.nio.ByteBuff; -074import org.apache.hadoop.hbase.nio.MultiByteBuff; -075import org.apache.hadoop.hbase.nio.SingleByteBuff; -076import org.apache.hadoop.hbase.regionserver.RSRpcServices; -077import org.apache.hadoop.hbase.security.AccessDeniedException; -078import org.apache.hadoop.hbase.security.AuthMethod; -079import org.apache.hadoop.hbase.security.HBaseSaslRpcServer; -080import org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslDigestCallbackHandler; -081import org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslGssCallbackHandler; -082import org.apache.hadoop.hbase.security.SaslStatus; -083import org.apache.hadoop.hbase.security.SaslUtil; -084import org.apache.hadoop.hbase.security.User; -085import org.apache.hadoop.hbase.security.UserProvider; -086import org.apache.hadoop.hbase.security.token.AuthenticationTokenSecretManager; -087import org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService; -088import org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteInput; -089import org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString; -090import org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream; -091import org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream; -092import org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor; -093import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message; -094import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException; -095import org.apache.hadoop.hbase.shaded.com.google.protobuf.TextFormat; -096import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations; -097import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -098import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; -099import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.VersionInfo; -100import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos; -101import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.CellBlockMeta; -102import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ConnectionHeader; -103import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ExceptionResponse; -104import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; -105import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ResponseHeader; -106import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation; -107import org.apache.hadoop.hbase.util.ByteBufferUtils; -108import org.apache.hadoop.hbase.util.Bytes; -109import org.apache.hadoop.hbase.util.Pair; -110import org.apache.hadoop.io.BytesWritable; -111import org.apache.hadoop.io.Writable; -112import org.apache.hadoop.io.WritableUtils; -113import org.apache.hadoop.io.compress.CompressionCodec; -114import org.apache.hadoop.security.UserGroupInformation; -115import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; -116import org.apache.hadoop.security.authorize.AuthorizationException; -117import org.apache.hadoop.security.authorize.PolicyProvider; -118import org.apache.hadoop.security.authorize.ProxyUsers; -119import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; -120import org.apache.hadoop.security.token.SecretManager; -121import org.apache.hadoop.security.token.SecretManager.InvalidToken; -122import org.apache.hadoop.security.token.TokenIdentifier; -123import org.apache.hadoop.util.StringUtils; -124import org.apache.htrace.TraceInfo; -125import
[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/31df4674/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.AdminRpcCall.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.AdminRpcCall.html b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.AdminRpcCall.html index f3f7a46..8750fa2 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.AdminRpcCall.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.AdminRpcCall.html @@ -56,2015 +56,2125 @@ 048import org.apache.hadoop.hbase.MetaTableAccessor; 049import org.apache.hadoop.hbase.MetaTableAccessor.QueryType; 050import org.apache.hadoop.hbase.NotServingRegionException; -051import org.apache.hadoop.hbase.RegionLocations; -052import org.apache.hadoop.hbase.ServerName; -053import org.apache.hadoop.hbase.NamespaceDescriptor; -054import org.apache.hadoop.hbase.HConstants; -055import org.apache.hadoop.hbase.TableExistsException; -056import org.apache.hadoop.hbase.TableName; -057import org.apache.hadoop.hbase.AsyncMetaTableAccessor; -058import org.apache.hadoop.hbase.TableNotDisabledException; -059import org.apache.hadoop.hbase.TableNotFoundException; -060import org.apache.hadoop.hbase.UnknownRegionException; -061import org.apache.hadoop.hbase.classification.InterfaceAudience; -062import org.apache.hadoop.hbase.classification.InterfaceStability; -063import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder; -064import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder; -065import org.apache.hadoop.hbase.client.Scan.ReadType; -066import org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper; -067import org.apache.hadoop.hbase.client.replication.TableCFs; -068import org.apache.hadoop.hbase.exceptions.DeserializationException; -069import org.apache.hadoop.hbase.ipc.HBaseRpcController; -070import org.apache.hadoop.hbase.quotas.QuotaFilter; -071import org.apache.hadoop.hbase.quotas.QuotaSettings; -072import org.apache.hadoop.hbase.quotas.QuotaTableUtil; -073import org.apache.hadoop.hbase.replication.ReplicationException; -074import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; -075import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; -076import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback; -077import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -078import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; -079import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; -080import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; -081import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; -082import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest; -083import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse; -084import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; -085import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema; -086import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest; -087import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse; -088import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest; -089import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse; -090import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest; -091import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse; -092import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest; -093import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse; -094import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest; -095import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse; -096import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest; -097import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse; -098import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest; -099import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse; -100import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest; -101import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse; -102import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest; -103import
[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6f2e75f2/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DisableTableProcedureBiConsumer.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DisableTableProcedureBiConsumer.html b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DisableTableProcedureBiConsumer.html index 6c52543..f3f7a46 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DisableTableProcedureBiConsumer.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DisableTableProcedureBiConsumer.html @@ -31,1797 +31,2040 @@ 023import java.util.ArrayList; 024import java.util.Arrays; 025import java.util.Collection; -026import java.util.HashMap; -027import java.util.LinkedList; -028import java.util.List; -029import java.util.Map; -030import java.util.Optional; -031import java.util.concurrent.CompletableFuture; -032import java.util.concurrent.TimeUnit; -033import java.util.concurrent.atomic.AtomicReference; -034import java.util.function.BiConsumer; -035import java.util.regex.Pattern; -036import java.util.stream.Collectors; -037 -038import com.google.common.annotations.VisibleForTesting; -039 -040import io.netty.util.Timeout; -041import io.netty.util.TimerTask; -042import org.apache.commons.logging.Log; -043import org.apache.commons.logging.LogFactory; -044import org.apache.hadoop.hbase.HColumnDescriptor; -045import org.apache.hadoop.hbase.HRegionInfo; -046import org.apache.hadoop.hbase.HRegionLocation; -047import org.apache.hadoop.hbase.MetaTableAccessor; -048import org.apache.hadoop.hbase.MetaTableAccessor.QueryType; -049import org.apache.hadoop.hbase.NotServingRegionException; -050import org.apache.hadoop.hbase.RegionLocations; -051import org.apache.hadoop.hbase.ServerName; -052import org.apache.hadoop.hbase.NamespaceDescriptor; -053import org.apache.hadoop.hbase.HConstants; -054import org.apache.hadoop.hbase.TableExistsException; -055import org.apache.hadoop.hbase.TableName; -056import org.apache.hadoop.hbase.AsyncMetaTableAccessor; -057import org.apache.hadoop.hbase.TableNotFoundException; -058import org.apache.hadoop.hbase.UnknownRegionException; -059import org.apache.hadoop.hbase.classification.InterfaceAudience; -060import org.apache.hadoop.hbase.classification.InterfaceStability; -061import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder; -062import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder; -063import org.apache.hadoop.hbase.client.Scan.ReadType; -064import org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper; -065import org.apache.hadoop.hbase.client.replication.TableCFs; -066import org.apache.hadoop.hbase.exceptions.DeserializationException; -067import org.apache.hadoop.hbase.ipc.HBaseRpcController; -068import org.apache.hadoop.hbase.quotas.QuotaFilter; -069import org.apache.hadoop.hbase.quotas.QuotaSettings; -070import org.apache.hadoop.hbase.quotas.QuotaTableUtil; -071import org.apache.hadoop.hbase.replication.ReplicationException; -072import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; -073import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; -074import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback; -075import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -076import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; -077import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; -078import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; -079import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; -080import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest; -081import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse; -082import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; -083import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema; -084import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest; -085import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse; -086import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest; -087import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse; -088import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest; -089import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse; -090import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest; -091import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse; -092import
[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/efd0601e/devapidocs/org/apache/hadoop/hbase/class-use/HRegionInfo.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/HRegionInfo.html b/devapidocs/org/apache/hadoop/hbase/class-use/HRegionInfo.html index 08c51f5..87755fa 100644 --- a/devapidocs/org/apache/hadoop/hbase/class-use/HRegionInfo.html +++ b/devapidocs/org/apache/hadoop/hbase/class-use/HRegionInfo.html @@ -1703,31 +1703,46 @@ Input/OutputFormats, a table indexing MapReduce job, and utility methods. +static http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true; title="class or interface in java.util">SetHRegionInfo +FavoredNodesManager.filterNonFNApplicableRegions(http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true; title="class or interface in java.util">CollectionHRegionInforegions) +Filter and return regions for which favored nodes is not applicable. + + + +private http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true; title="class or interface in java.util">MapHRegionInfo,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListServerName +FavoredNodeAssignmentHelper.generateFavoredNodes(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true; title="class or interface in java.util">MapHRegionInfo,ServerNameprimaryRSMap) + + +http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true; title="class or interface in java.util">MapHRegionInfo,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListServerName +FavoredNodeAssignmentHelper.generateFavoredNodesRoundRobin(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true; title="class or interface in java.util">MapServerName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListHRegionInfoassignmentMap, + http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListHRegionInforegions) + + private http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true; title="class or interface in java.util">MapServerName,http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true; title="class or interface in java.util">SetHRegionInfo FavoredNodeAssignmentHelper.mapRSToPrimaries(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true; title="class or interface in java.util">MapHRegionInfo,ServerNameprimaryRSMap) - -(package private) http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true; title="class or interface in java.util">MapHRegionInfo,ServerName[] + +http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true; title="class or interface in java.util">MapHRegionInfo,ServerName[] FavoredNodeAssignmentHelper.placeSecondaryAndTertiaryRS(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true; title="class or interface in java.util">MapHRegionInfo,ServerNameprimaryRSMap) - + http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true; title="class or interface in java.util">MapHRegionInfo,ServerName[] FavoredNodeAssignmentHelper.placeSecondaryAndTertiaryWithRestrictions(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true; title="class or interface in java.util">MapHRegionInfo,ServerNameprimaryRSMap) For regions that share the primary, avoid placing the secondary and tertiary on a same RS. - + http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true; title="class or interface in java.util">MapServerName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListHRegionInfo FavoredNodeLoadBalancer.roundRobinAssignment(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListHRegionInforegions, http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListServerNameservers) - + private Pairhttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true; title="class or interface in java.util">MapServerName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListHRegionInfo,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListHRegionInfo
[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/10601a30/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html index be839b7..72853dd 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html @@ -45,1639 +45,1784 @@ 037 038import com.google.common.annotations.VisibleForTesting; 039 -040import org.apache.commons.logging.Log; -041import org.apache.commons.logging.LogFactory; -042import org.apache.hadoop.hbase.HColumnDescriptor; -043import org.apache.hadoop.hbase.HRegionInfo; -044import org.apache.hadoop.hbase.HRegionLocation; -045import org.apache.hadoop.hbase.HTableDescriptor; -046import org.apache.hadoop.hbase.MetaTableAccessor; -047import org.apache.hadoop.hbase.MetaTableAccessor.QueryType; -048import org.apache.hadoop.hbase.NotServingRegionException; -049import org.apache.hadoop.hbase.RegionLocations; -050import org.apache.hadoop.hbase.ServerName; -051import org.apache.hadoop.hbase.NamespaceDescriptor; -052import org.apache.hadoop.hbase.HConstants; -053import org.apache.hadoop.hbase.TableName; -054import org.apache.hadoop.hbase.AsyncMetaTableAccessor; -055import org.apache.hadoop.hbase.TableNotFoundException; -056import org.apache.hadoop.hbase.UnknownRegionException; -057import org.apache.hadoop.hbase.classification.InterfaceAudience; -058import org.apache.hadoop.hbase.classification.InterfaceStability; -059import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder; -060import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder; -061import org.apache.hadoop.hbase.client.Scan.ReadType; -062import org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper; -063import org.apache.hadoop.hbase.client.replication.TableCFs; -064import org.apache.hadoop.hbase.exceptions.DeserializationException; -065import org.apache.hadoop.hbase.ipc.HBaseRpcController; -066import org.apache.hadoop.hbase.quotas.QuotaFilter; -067import org.apache.hadoop.hbase.quotas.QuotaSettings; -068import org.apache.hadoop.hbase.quotas.QuotaTableUtil; -069import org.apache.hadoop.hbase.replication.ReplicationException; -070import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; -071import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; -072import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback; -073import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -074import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; -075import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; -076import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; -077import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; -078import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest; -079import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse; -080import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema; -081import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest; -082import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse; -083import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest; -084import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse; -085import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest; -086import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse; -087import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest; -088import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse; -089import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest; -090import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse; -091import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest; -092import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse; -093import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest; -094import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse; -095import
[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/662ea7dc/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.MasterRpcCall.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.MasterRpcCall.html b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.MasterRpcCall.html index ac4a9b3..be839b7 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.MasterRpcCall.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.MasterRpcCall.html @@ -30,212 +30,212 @@ 022import java.io.IOException; 023import java.util.ArrayList; 024import java.util.Arrays; -025import java.util.LinkedList; -026import java.util.List; -027import java.util.Optional; -028import java.util.concurrent.CompletableFuture; -029import java.util.concurrent.TimeUnit; -030import java.util.concurrent.atomic.AtomicReference; -031import java.util.function.BiConsumer; -032import java.util.regex.Pattern; -033 -034import com.google.common.annotations.VisibleForTesting; -035import org.apache.commons.logging.Log; -036import org.apache.commons.logging.LogFactory; -037import org.apache.hadoop.hbase.HColumnDescriptor; -038import org.apache.hadoop.hbase.HRegionInfo; -039import org.apache.hadoop.hbase.HRegionLocation; -040import org.apache.hadoop.hbase.HTableDescriptor; -041import org.apache.hadoop.hbase.MetaTableAccessor; -042import org.apache.hadoop.hbase.MetaTableAccessor.QueryType; -043import org.apache.hadoop.hbase.NotServingRegionException; -044import org.apache.hadoop.hbase.RegionLocations; -045import org.apache.hadoop.hbase.ServerName; -046import org.apache.hadoop.hbase.NamespaceDescriptor; -047import org.apache.hadoop.hbase.HConstants; -048import org.apache.hadoop.hbase.TableName; -049import org.apache.hadoop.hbase.AsyncMetaTableAccessor; -050import org.apache.hadoop.hbase.TableNotFoundException; -051import org.apache.hadoop.hbase.UnknownRegionException; -052import org.apache.hadoop.hbase.classification.InterfaceAudience; -053import org.apache.hadoop.hbase.classification.InterfaceStability; -054import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder; -055import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder; -056import org.apache.hadoop.hbase.client.Scan.ReadType; -057import org.apache.hadoop.hbase.exceptions.DeserializationException; -058import org.apache.hadoop.hbase.ipc.HBaseRpcController; -059import org.apache.hadoop.hbase.quotas.QuotaFilter; -060import org.apache.hadoop.hbase.quotas.QuotaSettings; -061import org.apache.hadoop.hbase.quotas.QuotaTableUtil; -062import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback; -063import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -064import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; -065import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; -066import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; -067import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; -068import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest; -069import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse; -070import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema; -071import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest; -072import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse; -073import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest; -074import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse; -075import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest; -076import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse; -077import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest; -078import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse; -079import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest; -080import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse; -081import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest; -082import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse; -083import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest; -084import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse; -085import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest; -086import
[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b4bae59/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html index 19869ca..cb74820 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html @@ -1154,8 +1154,8 @@ implements initializeFileSystem() -private void -initializeMemStoreChunkPool() +protected void +initializeMemStoreChunkCreator() private void @@ -2367,7 +2367,7 @@ protected static finalhttp://docs.oracle.com/javase/8/docs/api/ja UNSPECIFIED_REGION -private static finalbyte[] UNSPECIFIED_REGION +private static finalbyte[] UNSPECIFIED_REGION @@ -2376,7 +2376,7 @@ protected static finalhttp://docs.oracle.com/javase/8/docs/api/ja movedRegions -protectedhttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true; title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String,HRegionServer.MovedRegionInfo movedRegions +protectedhttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true; title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String,HRegionServer.MovedRegionInfo movedRegions @@ -2385,7 +2385,7 @@ protected static finalhttp://docs.oracle.com/javase/8/docs/api/ja TIMEOUT_REGION_MOVED -private static finalint TIMEOUT_REGION_MOVED +private static finalint TIMEOUT_REGION_MOVED See Also: Constant Field Values @@ -2883,13 +2883,13 @@ protected static finalhttp://docs.oracle.com/javase/8/docs/api/ja - + -initializeMemStoreChunkPool -privatevoidinitializeMemStoreChunkPool() +initializeMemStoreChunkCreator +protectedvoidinitializeMemStoreChunkCreator() @@ -2898,7 +2898,7 @@ protected static finalhttp://docs.oracle.com/javase/8/docs/api/ja startHeapMemoryManager -privatevoidstartHeapMemoryManager() +privatevoidstartHeapMemoryManager() @@ -2907,7 +2907,7 @@ protected static finalhttp://docs.oracle.com/javase/8/docs/api/ja createMyEphemeralNode -privatevoidcreateMyEphemeralNode() +privatevoidcreateMyEphemeralNode() throws org.apache.zookeeper.KeeperException, http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException @@ -2923,7 +2923,7 @@ protected static finalhttp://docs.oracle.com/javase/8/docs/api/ja deleteMyEphemeralNode -privatevoiddeleteMyEphemeralNode() +privatevoiddeleteMyEphemeralNode() throws org.apache.zookeeper.KeeperException Throws: @@ -2937,7 +2937,7 @@ protected static finalhttp://docs.oracle.com/javase/8/docs/api/ja getRegionServerAccounting -publicRegionServerAccountinggetRegionServerAccounting() +publicRegionServerAccountinggetRegionServerAccounting() Specified by: getRegionServerAccountingin interfaceRegionServerServices @@ -2952,7 +2952,7 @@ protected static finalhttp://docs.oracle.com/javase/8/docs/api/ja createRegionLoad -org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoadcreateRegionLoad(Regionr, +org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoadcreateRegionLoad(Regionr, org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad.BuilderregionLoadBldr, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.BuilderregionSpecifier) throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true; title="class or interface in java.io">IOException @@ -2968,7 +2968,7 @@ protected static finalhttp://docs.oracle.com/javase/8/docs/api/ja createRegionLoad -publicorg.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoadcreateRegionLoad(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringencodedRegionName) +publicorg.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoadcreateRegionLoad(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringencodedRegionName)
[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2fcc2ae0/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html index a58f559..98b388b 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html @@ -756,2562 +756,2560 @@ 748 749this.masterActiveTime = System.currentTimeMillis(); 750// TODO: Do this using Dependency Injection, using PicoContainer, Guice or Spring. -751// Initialize the chunkCreator -752initializeMemStoreChunkCreator(); -753this.fileSystemManager = new MasterFileSystem(this); -754this.walManager = new MasterWalManager(this); -755 -756// enable table descriptors cache -757this.tableDescriptors.setCacheOn(); -758// set the META's descriptor to the correct replication -759 this.tableDescriptors.get(TableName.META_TABLE_NAME).setRegionReplication( -760 conf.getInt(HConstants.META_REPLICAS_NUM, HConstants.DEFAULT_META_REPLICA_NUM)); -761// warm-up HTDs cache on master initialization -762if (preLoadTableDescriptors) { -763 status.setStatus("Pre-loading table descriptors"); -764 this.tableDescriptors.getAll(); -765} -766 -767// publish cluster ID -768status.setStatus("Publishing Cluster ID in ZooKeeper"); -769 ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId()); -770this.initLatch.countDown(); +751this.fileSystemManager = new MasterFileSystem(this); +752this.walManager = new MasterWalManager(this); +753 +754// enable table descriptors cache +755this.tableDescriptors.setCacheOn(); +756// set the META's descriptor to the correct replication +757 this.tableDescriptors.get(TableName.META_TABLE_NAME).setRegionReplication( +758 conf.getInt(HConstants.META_REPLICAS_NUM, HConstants.DEFAULT_META_REPLICA_NUM)); +759// warm-up HTDs cache on master initialization +760if (preLoadTableDescriptors) { +761 status.setStatus("Pre-loading table descriptors"); +762 this.tableDescriptors.getAll(); +763} +764 +765// publish cluster ID +766status.setStatus("Publishing Cluster ID in ZooKeeper"); +767 ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId()); +768this.initLatch.countDown(); +769 +770this.serverManager = createServerManager(this); 771 -772this.serverManager = createServerManager(this); +772this.tableStateManager = new TableStateManager(this); 773 -774this.tableStateManager = new TableStateManager(this); -775 -776status.setStatus("Initializing ZK system trackers"); -777initializeZKBasedSystemTrackers(); -778 -779// This is for backwards compatibility -780// See HBASE-11393 -781status.setStatus("Update TableCFs node in ZNode"); -782TableCFsUpdater tableCFsUpdater = new TableCFsUpdater(zooKeeper, -783conf, this.clusterConnection); -784tableCFsUpdater.update(); -785 -786// initialize master side coprocessors before we start handling requests -787status.setStatus("Initializing master coprocessors"); -788this.cpHost = new MasterCoprocessorHost(this, this.conf); -789 -790// start up all service threads. -791status.setStatus("Initializing master service threads"); -792startServiceThreads(); -793 -794// Wake up this server to check in -795sleeper.skipSleepCycle(); -796 -797// Wait for region servers to report in -798status.setStatus("Wait for region servers to report in"); -799waitForRegionServers(status); -800 -801// get a list for previously failed RS which need log splitting work -802// we recover hbase:meta region servers inside master initialization and -803// handle other failed servers in SSH in order to start up master node ASAP -804MasterMetaBootstrap metaBootstrap = createMetaBootstrap(this, status); -805 metaBootstrap.splitMetaLogsBeforeAssignment(); +774status.setStatus("Initializing ZK system trackers"); +775initializeZKBasedSystemTrackers(); +776 +777// This is for backwards compatibility +778// See HBASE-11393 +779status.setStatus("Update TableCFs node in ZNode"); +780TableCFsUpdater tableCFsUpdater = new TableCFsUpdater(zooKeeper, +781conf, this.clusterConnection); +782tableCFsUpdater.update(); +783 +784// initialize master side coprocessors before we start handling requests +785status.setStatus("Initializing master coprocessors"); +786this.cpHost = new MasterCoprocessorHost(this, this.conf); +787 +788// start up all service threads. +789status.setStatus("Initializing master service threads"); +790startServiceThreads(); +791 +792// Wake up this server to check in
[22/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e57d1b63/apidocs/org/apache/hadoop/hbase/class-use/HColumnDescriptor.html -- diff --git a/apidocs/org/apache/hadoop/hbase/class-use/HColumnDescriptor.html b/apidocs/org/apache/hadoop/hbase/class-use/HColumnDescriptor.html deleted file mode 100644 index 473e890..000 --- a/apidocs/org/apache/hadoop/hbase/class-use/HColumnDescriptor.html +++ /dev/null @@ -1,522 +0,0 @@ -http://www.w3.org/TR/html4/loose.dtd;> - - - - - -Uses of Class org.apache.hadoop.hbase.HColumnDescriptor (Apache HBase 2.0.0-SNAPSHOT API) - - - - - - - -JavaScript is disabled on your browser. - - - - - -Skip navigation links - - - - -Overview -Package -Class -Use -Tree -Deprecated -Index -Help - - - - -Prev -Next - - -Frames -NoFrames - - -AllClasses - - - - - - - - - - -Uses of Classorg.apache.hadoop.hbase.HColumnDescriptor - - - - - -Packages that use HColumnDescriptor - -Package -Description - - - -org.apache.hadoop.hbase - - - -org.apache.hadoop.hbase.client - -Provides HBase Client - - - - - - - - - - -Uses of HColumnDescriptor in org.apache.hadoop.hbase - -Methods in org.apache.hadoop.hbase that return HColumnDescriptor - -Modifier and Type -Method and Description - - - -HColumnDescriptor[] -HTableDescriptor.getColumnFamilies() -Returns an array all the HColumnDescriptor of the column families - of the table. - - - -HColumnDescriptor -HTableDescriptor.getFamily(byte[]column) -Returns the HColumnDescriptor for a specific column family with name as - specified by the parameter column. - - - -static HColumnDescriptor -HColumnDescriptor.parseFrom(byte[]bytes) - - -HColumnDescriptor -HTableDescriptor.removeFamily(byte[]column) -Removes the HColumnDescriptor with name specified by the parameter column - from the table descriptor - - - -HColumnDescriptor -HColumnDescriptor.setBlockCacheEnabled(booleanblockCacheEnabled) - - -HColumnDescriptor -HColumnDescriptor.setBlocksize(ints) - - -HColumnDescriptor -HColumnDescriptor.setBloomFilterType(BloomTypebt) - - -HColumnDescriptor -HColumnDescriptor.setCacheBloomsOnWrite(booleanvalue) - - -HColumnDescriptor -HColumnDescriptor.setCacheDataInL1(booleanvalue) - - -HColumnDescriptor -HColumnDescriptor.setCacheDataOnWrite(booleanvalue) - - -HColumnDescriptor -HColumnDescriptor.setCacheIndexesOnWrite(booleanvalue) - - -HColumnDescriptor -HColumnDescriptor.setCompactionCompressionType(org.apache.hadoop.hbase.io.compress.Compression.Algorithmtype) -Compression types supported in hbase. - - - -HColumnDescriptor -HColumnDescriptor.setCompressionType(org.apache.hadoop.hbase.io.compress.Compression.Algorithmtype) -Compression types supported in hbase. - - - -HColumnDescriptor -HColumnDescriptor.setCompressTags(booleancompressTags) -Set whether the tags should be compressed along with DataBlockEncoding. - - - -HColumnDescriptor -HColumnDescriptor.setConfiguration(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringkey, -http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringvalue) -Setter for storing a configuration setting in configuration map. - - - -HColumnDescriptor -HColumnDescriptor.setDataBlockEncoding(DataBlockEncodingtype) -Set data block encoding algorithm used in block cache. - - - -HColumnDescriptor -HColumnDescriptor.setDFSReplication(shortreplication) -Set the replication factor to hfile(s) belonging to this family - - - -HColumnDescriptor -HColumnDescriptor.setEncryptionKey(byte[]keyBytes) -Set the raw crypto key attribute for the family - - - -HColumnDescriptor -HColumnDescriptor.setEncryptionType(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringalgorithm) -Set the encryption algorithm for use with this family - - - -HColumnDescriptor -HColumnDescriptor.setEvictBlocksOnClose(booleanvalue) - - -HColumnDescriptor -HColumnDescriptor.setInMemory(booleaninMemory) - - -HColumnDescriptor -HColumnDescriptor.setInMemoryCompaction(MemoryCompactionPolicyinMemoryCompaction) - - -HColumnDescriptor -HColumnDescriptor.setKeepDeletedCells(KeepDeletedCellskeepDeletedCells) - - -HColumnDescriptor -HColumnDescriptor.setMaxVersions(intmaxVersions) - - -HColumnDescriptor -HColumnDescriptor.setMinVersions(intminVersions) - - -HColumnDescriptor