[08/51] [partial] hbase-site git commit: Published site at 620d70d6186fb800299bcc62ad7179fccfd1be41.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa3fb87f/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.html b/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.html new file mode 100644 index 000..1fa0940 --- /dev/null +++ b/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.html @@ -0,0 +1,1477 @@ +http://www.w3.org/TR/html4/loose.dtd;> + + +Source code + + + + +001/** +002 * +003 * Licensed to the Apache Software Foundation (ASF) under one +004 * or more contributor license agreements. See the NOTICE file +005 * distributed with this work for additional information +006 * regarding copyright ownership. The ASF licenses this file +007 * to you under the Apache License, Version 2.0 (the +008 * "License"); you may not use this file except in compliance +009 * with the License. You may obtain a copy of the License at +010 * +011 * http://www.apache.org/licenses/LICENSE-2.0 +012 * +013 * Unless required by applicable law or agreed to in writing, software +014 * distributed under the License is distributed on an "AS IS" BASIS, +015 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +016 * See the License for the specific language governing permissions and +017 * limitations under the License. +018 */ +019package org.apache.hadoop.hbase.thrift2.client; +020 +021import java.io.IOException; +022import java.nio.ByteBuffer; +023import java.util.EnumSet; +024import java.util.List; +025import java.util.Map; +026import java.util.Set; +027import java.util.concurrent.Future; +028import java.util.regex.Pattern; +029 +030import org.apache.commons.lang3.NotImplementedException; +031import org.apache.hadoop.conf.Configuration; +032import org.apache.hadoop.hbase.CacheEvictionStats; +033import org.apache.hadoop.hbase.ClusterMetrics; +034import org.apache.hadoop.hbase.HConstants; +035import org.apache.hadoop.hbase.HRegionInfo; +036import org.apache.hadoop.hbase.HTableDescriptor; +037import org.apache.hadoop.hbase.NamespaceDescriptor; +038import org.apache.hadoop.hbase.NamespaceNotFoundException; +039import org.apache.hadoop.hbase.RegionMetrics; +040import org.apache.hadoop.hbase.ServerName; +041import org.apache.hadoop.hbase.TableName; +042import org.apache.hadoop.hbase.TableNotFoundException; +043import org.apache.hadoop.hbase.client.Admin; +044import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +045import org.apache.hadoop.hbase.client.CompactType; +046import org.apache.hadoop.hbase.client.CompactionState; +047import org.apache.hadoop.hbase.client.Connection; +048import org.apache.hadoop.hbase.client.RegionInfo; +049import org.apache.hadoop.hbase.client.SnapshotDescription; +050import org.apache.hadoop.hbase.client.SnapshotType; +051import org.apache.hadoop.hbase.client.TableDescriptor; +052import org.apache.hadoop.hbase.client.replication.TableCFs; +053import org.apache.hadoop.hbase.client.security.SecurityCapability; +054import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; +055import org.apache.hadoop.hbase.quotas.QuotaFilter; +056import org.apache.hadoop.hbase.quotas.QuotaRetriever; +057import org.apache.hadoop.hbase.quotas.QuotaSettings; +058import org.apache.hadoop.hbase.replication.ReplicationException; +059import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; +060import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; +061import org.apache.hadoop.hbase.replication.SyncReplicationState; +062import org.apache.hadoop.hbase.thrift2.ThriftUtilities; +063import org.apache.hadoop.hbase.thrift2.generated.TColumnFamilyDescriptor; +064import org.apache.hadoop.hbase.thrift2.generated.THBaseService; +065import org.apache.hadoop.hbase.thrift2.generated.TNamespaceDescriptor; +066import org.apache.hadoop.hbase.thrift2.generated.TTableDescriptor; +067import org.apache.hadoop.hbase.thrift2.generated.TTableName; +068import org.apache.hadoop.hbase.util.Bytes; +069import org.apache.hadoop.hbase.util.Pair; +070import org.apache.thrift.TException; +071import org.apache.thrift.transport.TTransport; +072import org.apache.yetus.audience.InterfaceAudience; +073 +074@InterfaceAudience.Private +075public class ThriftAdmin implements Admin { +076 +077 private THBaseService.Client client; +078 private TTransport transport; +079 private int operationTimeout; +080 private Configuration conf; +081 +082 +083 public ThriftAdmin(THBaseService.Client client, TTransport tTransport, Configuration conf) { +084this.client = client; +085this.transport = tTransport; +086this.operationTimeout = conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, +087 HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); +088this.conf = conf; +089 } +090 +091 @Override +092 public
[08/51] [partial] hbase-site git commit: Published site at 281d6429e55149cc4c05430dcc1d1dc136d8b245.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/901d593a/devapidocs/org/apache/hadoop/hbase/quotas/RegionSizeStoreImpl.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/quotas/RegionSizeStoreImpl.html b/devapidocs/org/apache/hadoop/hbase/quotas/RegionSizeStoreImpl.html index 0b85ddf..67d4f04 100644 --- a/devapidocs/org/apache/hadoop/hbase/quotas/RegionSizeStoreImpl.html +++ b/devapidocs/org/apache/hadoop/hbase/quotas/RegionSizeStoreImpl.html @@ -50,7 +50,7 @@ var activeTableTab = "activeTableTab"; PrevClass -NextClass +NextClass Frames @@ -499,7 +499,7 @@ implements PrevClass -NextClass +NextClass Frames http://git-wip-us.apache.org/repos/asf/hbase-site/blob/901d593a/devapidocs/org/apache/hadoop/hbase/quotas/RpcThrottleStorage.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/quotas/RpcThrottleStorage.html b/devapidocs/org/apache/hadoop/hbase/quotas/RpcThrottleStorage.html new file mode 100644 index 000..6de3b41 --- /dev/null +++ b/devapidocs/org/apache/hadoop/hbase/quotas/RpcThrottleStorage.html @@ -0,0 +1,387 @@ +http://www.w3.org/TR/html4/loose.dtd;> + + + + + +RpcThrottleStorage (Apache HBase 3.0.0-SNAPSHOT API) + + + + + +var methods = {"i0":10,"i1":10}; +var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; +var altColor = "altColor"; +var rowColor = "rowColor"; +var tableTab = "tableTab"; +var activeTableTab = "activeTableTab"; + + +JavaScript is disabled on your browser. + + + + + +Skip navigation links + + + + +Overview +Package +Class +Use +Tree +Deprecated +Index +Help + + + + +PrevClass +NextClass + + +Frames +NoFrames + + +AllClasses + + + + + + + +Summary: +Nested| +Field| +Constr| +Method + + +Detail: +Field| +Constr| +Method + + + + + + + + +org.apache.hadoop.hbase.quotas +Class RpcThrottleStorage + + + +https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">java.lang.Object + + +org.apache.hadoop.hbase.quotas.RpcThrottleStorage + + + + + + + + +@InterfaceAudience.Private +public class RpcThrottleStorage +extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object +ZK based rpc throttle storage. + + + + + + + + + + + +Field Summary + +Fields + +Modifier and Type +Field and Description + + +static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String +RPC_THROTTLE_ZNODE + + +static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String +RPC_THROTTLE_ZNODE_DEFAULT + + +private https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String +rpcThrottleZNode + + +private ZKWatcher +zookeeper + + + + + + + + + +Constructor Summary + +Constructors + +Constructor and Description + + +RpcThrottleStorage(ZKWatcherzookeeper, + org.apache.hadoop.conf.Configurationconf) + + + + + + + + + +Method Summary + +All MethodsInstance MethodsConcrete Methods + +Modifier and Type +Method and Description + + +boolean +isRpcThrottleEnabled() + + +void +switchRpcThrottle(booleanenable) +Store the rpc throttle value. + + + + + + + +Methods inherited from classjava.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object +https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--; title="class or interface in java.lang">clone, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-; title="class or interface in java.lang">equals, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--; title="class or interface in java.lang">finalize, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--; title="class or interface in java.lang">getClass, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--; title="class or interface in java.lang">hashCode, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--; title="class or interface in java.lang">notify, https://docs.oracle.com/javase/8/docs/api/ja
[08/51] [partial] hbase-site git commit: Published site at 466fa920fee572fe20db3b77ebf539dc304d5f31.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2bf59208/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html index 736388b..197b99d 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html @@ -26,3624 +26,3599 @@ 018package org.apache.hadoop.hbase.client; 019 020import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME; -021 -022import com.google.protobuf.Message; -023import com.google.protobuf.RpcChannel; -024import java.io.IOException; -025import java.util.ArrayList; -026import java.util.Arrays; -027import java.util.Collections; -028import java.util.EnumSet; -029import java.util.HashMap; -030import java.util.List; -031import java.util.Map; -032import java.util.Optional; -033import java.util.Set; -034import java.util.concurrent.CompletableFuture; -035import java.util.concurrent.ConcurrentHashMap; -036import java.util.concurrent.TimeUnit; -037import java.util.concurrent.atomic.AtomicReference; -038import java.util.function.BiConsumer; -039import java.util.function.Function; -040import java.util.function.Supplier; -041import java.util.regex.Pattern; -042import java.util.stream.Collectors; -043import java.util.stream.Stream; -044import org.apache.commons.io.IOUtils; -045import org.apache.hadoop.conf.Configuration; -046import org.apache.hadoop.hbase.AsyncMetaTableAccessor; -047import org.apache.hadoop.hbase.CacheEvictionStats; -048import org.apache.hadoop.hbase.CacheEvictionStatsAggregator; -049import org.apache.hadoop.hbase.ClusterMetrics; -050import org.apache.hadoop.hbase.ClusterMetrics.Option; -051import org.apache.hadoop.hbase.ClusterMetricsBuilder; -052import org.apache.hadoop.hbase.HConstants; -053import org.apache.hadoop.hbase.HRegionLocation; -054import org.apache.hadoop.hbase.MetaTableAccessor; -055import org.apache.hadoop.hbase.MetaTableAccessor.QueryType; -056import org.apache.hadoop.hbase.NamespaceDescriptor; -057import org.apache.hadoop.hbase.RegionLocations; -058import org.apache.hadoop.hbase.RegionMetrics; -059import org.apache.hadoop.hbase.RegionMetricsBuilder; -060import org.apache.hadoop.hbase.ServerName; -061import org.apache.hadoop.hbase.TableExistsException; -062import org.apache.hadoop.hbase.TableName; -063import org.apache.hadoop.hbase.TableNotDisabledException; -064import org.apache.hadoop.hbase.TableNotEnabledException; -065import org.apache.hadoop.hbase.TableNotFoundException; -066import org.apache.hadoop.hbase.UnknownRegionException; -067import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder; -068import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder; -069import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.ServerRequestCallerBuilder; -070import org.apache.hadoop.hbase.client.Scan.ReadType; -071import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil; -072import org.apache.hadoop.hbase.client.replication.TableCFs; -073import org.apache.hadoop.hbase.client.security.SecurityCapability; -074import org.apache.hadoop.hbase.exceptions.DeserializationException; -075import org.apache.hadoop.hbase.ipc.HBaseRpcController; -076import org.apache.hadoop.hbase.quotas.QuotaFilter; -077import org.apache.hadoop.hbase.quotas.QuotaSettings; -078import org.apache.hadoop.hbase.quotas.QuotaTableUtil; -079import org.apache.hadoop.hbase.replication.ReplicationException; -080import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; -081import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; -082import org.apache.hadoop.hbase.replication.SyncReplicationState; -083import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; -084import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException; -085import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; -086import org.apache.hadoop.hbase.util.Bytes; -087import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -088import org.apache.hadoop.hbase.util.ForeignExceptionUtil; -089import org.apache.yetus.audience.InterfaceAudience; -090import org.slf4j.Logger; -091import org.slf4j.LoggerFactory; -092 -093import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; -094import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; -095import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; -096import org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer; -097import
[08/51] [partial] hbase-site git commit: Published site at e4b6b4afb933a961f543537875f87a2dc62d3757.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/849d84a8/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/ThriftServer.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/ThriftServer.html b/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/ThriftServer.html index 783dc34..5898688 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/ThriftServer.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/ThriftServer.html @@ -26,609 +26,99 @@ 018 */ 019package org.apache.hadoop.hbase.thrift2; 020 -021import java.io.IOException; -022import java.net.InetAddress; -023import java.net.InetSocketAddress; -024import java.net.UnknownHostException; -025import java.security.PrivilegedAction; -026import java.util.Map; -027import java.util.concurrent.ExecutorService; -028import java.util.concurrent.LinkedBlockingQueue; -029import java.util.concurrent.SynchronousQueue; -030import java.util.concurrent.ThreadPoolExecutor; -031import java.util.concurrent.TimeUnit; -032 -033import javax.security.auth.callback.Callback; -034import javax.security.auth.callback.UnsupportedCallbackException; -035import javax.security.sasl.AuthorizeCallback; -036import javax.security.sasl.SaslServer; -037 -038import org.apache.hadoop.conf.Configuration; -039import org.apache.hadoop.conf.Configured; -040import org.apache.hadoop.hbase.HBaseConfiguration; -041import org.apache.hadoop.hbase.HBaseInterfaceAudience; -042import org.apache.hadoop.hbase.filter.ParseFilter; -043import org.apache.hadoop.hbase.http.InfoServer; -044import org.apache.hadoop.hbase.security.SaslUtil; -045import org.apache.hadoop.hbase.security.SecurityUtil; -046import org.apache.hadoop.hbase.security.UserProvider; -047import org.apache.hadoop.hbase.thrift.CallQueue; -048import org.apache.hadoop.hbase.thrift.THBaseThreadPoolExecutor; -049import org.apache.hadoop.hbase.thrift.ThriftMetrics; -050import org.apache.hadoop.hbase.thrift2.generated.THBaseService; -051import org.apache.hadoop.hbase.util.DNS; -052import org.apache.hadoop.hbase.util.JvmPauseMonitor; -053import org.apache.hadoop.hbase.util.Strings; -054import org.apache.hadoop.security.SaslRpcServer.SaslGssCallbackHandler; -055import org.apache.hadoop.security.UserGroupInformation; -056import org.apache.hadoop.util.Tool; -057import org.apache.hadoop.util.ToolRunner; -058import org.apache.thrift.TException; -059import org.apache.thrift.TProcessor; -060import org.apache.thrift.protocol.TBinaryProtocol; -061import org.apache.thrift.protocol.TCompactProtocol; -062import org.apache.thrift.protocol.TProtocol; -063import org.apache.thrift.protocol.TProtocolFactory; -064import org.apache.thrift.server.THsHaServer; -065import org.apache.thrift.server.TNonblockingServer; -066import org.apache.thrift.server.TServer; -067import org.apache.thrift.server.TThreadPoolServer; -068import org.apache.thrift.server.TThreadedSelectorServer; -069import org.apache.thrift.transport.TFramedTransport; -070import org.apache.thrift.transport.TNonblockingServerSocket; -071import org.apache.thrift.transport.TNonblockingServerTransport; -072import org.apache.thrift.transport.TSaslServerTransport; -073import org.apache.thrift.transport.TServerSocket; -074import org.apache.thrift.transport.TServerTransport; -075import org.apache.thrift.transport.TTransportException; -076import org.apache.thrift.transport.TTransportFactory; -077import org.apache.yetus.audience.InterfaceAudience; -078import org.slf4j.Logger; -079import org.slf4j.LoggerFactory; -080import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; -081import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; -082import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLineParser; -083import org.apache.hbase.thirdparty.org.apache.commons.cli.DefaultParser; -084import org.apache.hbase.thirdparty.org.apache.commons.cli.HelpFormatter; -085import org.apache.hbase.thirdparty.org.apache.commons.cli.Option; -086import org.apache.hbase.thirdparty.org.apache.commons.cli.OptionGroup; -087import org.apache.hbase.thirdparty.org.apache.commons.cli.Options; -088import org.apache.hbase.thirdparty.org.apache.commons.cli.ParseException; -089 -090/** -091 * ThriftServer - this class starts up a Thrift server which implements the HBase API specified in -092 * the HbaseClient.thrift IDL file. -093 */ -094@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) -095@SuppressWarnings({ "rawtypes", "unchecked" }) -096public class ThriftServer extends Configured implements Tool { -097 private static final Logger log = LoggerFactory.getLogger(ThriftServer.class); -098 -099 /** -100 * Thrift quality of protection configuration key. Valid values can be: -101 * privacy: authentication, integrity and confidentiality checking -102 * integrity: authentication and integrity checking
[08/51] [partial] hbase-site git commit: Published site at 3ab895979b643a2980bcdb7fee2078f14b614210.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/mapreduce/CellCounter.CellCounterMapper.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/mapreduce/CellCounter.CellCounterMapper.html b/devapidocs/org/apache/hadoop/hbase/mapreduce/CellCounter.CellCounterMapper.html index 603735b..84ecbfd 100644 --- a/devapidocs/org/apache/hadoop/hbase/mapreduce/CellCounter.CellCounterMapper.html +++ b/devapidocs/org/apache/hadoop/hbase/mapreduce/CellCounter.CellCounterMapper.html @@ -497,6 +497,6 @@ extends Copyright 20072018 https://www.apache.org/;>The Apache Software Foundation. All rights reserved. +Copyright 20072019 https://www.apache.org/;>The Apache Software Foundation. All rights reserved. http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/mapreduce/CellCounter.IntSumReducer.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/mapreduce/CellCounter.IntSumReducer.html b/devapidocs/org/apache/hadoop/hbase/mapreduce/CellCounter.IntSumReducer.html index bb074a9..3b84726 100644 --- a/devapidocs/org/apache/hadoop/hbase/mapreduce/CellCounter.IntSumReducer.html +++ b/devapidocs/org/apache/hadoop/hbase/mapreduce/CellCounter.IntSumReducer.html @@ -349,6 +349,6 @@ extends org.apache.hadoop.mapreduce.ReducerKey,org.apache.hadoop.io.IntWrita -Copyright 20072018 https://www.apache.org/;>The Apache Software Foundation. All rights reserved. +Copyright 20072019 https://www.apache.org/;>The Apache Software Foundation. All rights reserved. http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/mapreduce/CellCounter.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/mapreduce/CellCounter.html b/devapidocs/org/apache/hadoop/hbase/mapreduce/CellCounter.html index ed1f34c..bb26cde 100644 --- a/devapidocs/org/apache/hadoop/hbase/mapreduce/CellCounter.html +++ b/devapidocs/org/apache/hadoop/hbase/mapreduce/CellCounter.html @@ -528,6 +528,6 @@ implements org.apache.hadoop.util.Tool -Copyright 20072018 https://www.apache.org/;>The Apache Software Foundation. All rights reserved. +Copyright 20072019 https://www.apache.org/;>The Apache Software Foundation. All rights reserved. http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/mapreduce/CellCreator.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/mapreduce/CellCreator.html b/devapidocs/org/apache/hadoop/hbase/mapreduce/CellCreator.html index 3e42624..6de0884 100644 --- a/devapidocs/org/apache/hadoop/hbase/mapreduce/CellCreator.html +++ b/devapidocs/org/apache/hadoop/hbase/mapreduce/CellCreator.html @@ -510,6 +510,6 @@ publicCopyright 20072018 https://www.apache.org/;>The Apache Software Foundation. All rights reserved. +Copyright 20072019 https://www.apache.org/;>The Apache Software Foundation. All rights reserved. http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/mapreduce/CellSerialization.CellDeserializer.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/mapreduce/CellSerialization.CellDeserializer.html b/devapidocs/org/apache/hadoop/hbase/mapreduce/CellSerialization.CellDeserializer.html index 9688902..e0f2fd6 100644 --- a/devapidocs/org/apache/hadoop/hbase/mapreduce/CellSerialization.CellDeserializer.html +++ b/devapidocs/org/apache/hadoop/hbase/mapreduce/CellSerialization.CellDeserializer.html @@ -359,6 +359,6 @@ implements org.apache.hadoop.io.serializer.DeserializerCopyright 20072018 https://www.apache.org/;>The Apache Software Foundation. All rights reserved. +Copyright 20072019 https://www.apache.org/;>The Apache Software Foundation. All rights reserved. http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/mapreduce/CellSerialization.CellSerializer.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/mapreduce/CellSerialization.CellSerializer.html b/devapidocs/org/apache/hadoop/hbase/mapreduce/CellSerialization.CellSerializer.html index 31aaf71..e42f20e 100644 --- a/devapidocs/org/apache/hadoop/hbase/mapreduce/CellSerialization.CellSerializer.html +++ b/devapidocs/org/apache/hadoop/hbase/mapreduce/CellSerialization.CellSerializer.html @@ -359,6 +359,6 @@ implements org.apache.hadoop.io.serializer.SerializerCopyright 20072018 https://www.apache.org/;>The Apache Software Foundation. All rights reserved. +Copyright 20072019 https://www.apache.org/;>The Apache Software Foundation. All rights
[08/51] [partial] hbase-site git commit: Published site at 7820ba1dbdba58b1002cdfde08eb21aa7a0bb6da.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/09ea0d5f/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html index 0f5a095..50bf692 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html @@ -78,8712 +78,8714 @@ 070import java.util.concurrent.locks.ReadWriteLock; 071import java.util.concurrent.locks.ReentrantReadWriteLock; 072import java.util.function.Function; -073import org.apache.hadoop.conf.Configuration; -074import org.apache.hadoop.fs.FileStatus; -075import org.apache.hadoop.fs.FileSystem; -076import org.apache.hadoop.fs.LocatedFileStatus; -077import org.apache.hadoop.fs.Path; -078import org.apache.hadoop.hbase.Cell; -079import org.apache.hadoop.hbase.CellBuilderType; -080import org.apache.hadoop.hbase.CellComparator; -081import org.apache.hadoop.hbase.CellComparatorImpl; -082import org.apache.hadoop.hbase.CellScanner; -083import org.apache.hadoop.hbase.CellUtil; -084import org.apache.hadoop.hbase.CompareOperator; -085import org.apache.hadoop.hbase.CompoundConfiguration; -086import org.apache.hadoop.hbase.DoNotRetryIOException; -087import org.apache.hadoop.hbase.DroppedSnapshotException; -088import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; -089import org.apache.hadoop.hbase.HConstants; -090import org.apache.hadoop.hbase.HConstants.OperationStatusCode; -091import org.apache.hadoop.hbase.HDFSBlocksDistribution; -092import org.apache.hadoop.hbase.KeyValue; -093import org.apache.hadoop.hbase.KeyValueUtil; -094import org.apache.hadoop.hbase.NamespaceDescriptor; -095import org.apache.hadoop.hbase.NotServingRegionException; -096import org.apache.hadoop.hbase.PrivateCellUtil; -097import org.apache.hadoop.hbase.RegionTooBusyException; -098import org.apache.hadoop.hbase.Tag; -099import org.apache.hadoop.hbase.TagUtil; -100import org.apache.hadoop.hbase.UnknownScannerException; -101import org.apache.hadoop.hbase.client.Append; -102import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; -103import org.apache.hadoop.hbase.client.CompactionState; -104import org.apache.hadoop.hbase.client.Delete; -105import org.apache.hadoop.hbase.client.Durability; -106import org.apache.hadoop.hbase.client.Get; -107import org.apache.hadoop.hbase.client.Increment; -108import org.apache.hadoop.hbase.client.IsolationLevel; -109import org.apache.hadoop.hbase.client.Mutation; -110import org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor; -111import org.apache.hadoop.hbase.client.Put; -112import org.apache.hadoop.hbase.client.RegionInfo; -113import org.apache.hadoop.hbase.client.RegionInfoBuilder; -114import org.apache.hadoop.hbase.client.RegionReplicaUtil; -115import org.apache.hadoop.hbase.client.Result; -116import org.apache.hadoop.hbase.client.RowMutations; -117import org.apache.hadoop.hbase.client.Scan; -118import org.apache.hadoop.hbase.client.TableDescriptor; -119import org.apache.hadoop.hbase.client.TableDescriptorBuilder; -120import org.apache.hadoop.hbase.conf.ConfigurationManager; -121import org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver; -122import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; -123import org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType; -124import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare; -125import org.apache.hadoop.hbase.exceptions.FailedSanityCheckException; -126import org.apache.hadoop.hbase.exceptions.TimeoutIOException; -127import org.apache.hadoop.hbase.exceptions.UnknownProtocolException; -128import org.apache.hadoop.hbase.filter.ByteArrayComparable; -129import org.apache.hadoop.hbase.filter.FilterWrapper; -130import org.apache.hadoop.hbase.filter.IncompatibleFilterException; -131import org.apache.hadoop.hbase.io.HFileLink; -132import org.apache.hadoop.hbase.io.HeapSize; -133import org.apache.hadoop.hbase.io.TimeRange; -134import org.apache.hadoop.hbase.io.hfile.BlockCache; -135import org.apache.hadoop.hbase.io.hfile.HFile; -136import org.apache.hadoop.hbase.ipc.CallerDisconnectedException; -137import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; -138import org.apache.hadoop.hbase.ipc.RpcCall; -139import org.apache.hadoop.hbase.ipc.RpcServer; -140import org.apache.hadoop.hbase.mob.MobFileCache; -141import org.apache.hadoop.hbase.monitoring.MonitoredTask; -142import org.apache.hadoop.hbase.monitoring.TaskMonitor; -143import org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager; -144import
[08/51] [partial] hbase-site git commit: Published site at c448604ceb987d113913f0583452b2abce04db0d.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4f8b8424/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.BlockIndexNotLoadedException.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.BlockIndexNotLoadedException.html b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.BlockIndexNotLoadedException.html index 79cb21b..d8d391b 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.BlockIndexNotLoadedException.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.BlockIndexNotLoadedException.html @@ -378,1508 +378,1510 @@ 370 371 @Override 372 public void returnBlock(HFileBlock block) { -373BlockCache blockCache = this.cacheConf.getBlockCache(); -374if (blockCache != null block != null) { -375 BlockCacheKey cacheKey = new BlockCacheKey(this.getFileContext().getHFileName(), -376 block.getOffset(), this.isPrimaryReplicaReader(), block.getBlockType()); -377 blockCache.returnBlock(cacheKey, block); -378} -379 } -380 /** -381 * @return the first key in the file. May be null if file has no entries. Note -382 * that this is not the first row key, but rather the byte form of the -383 * first KeyValue. -384 */ -385 @Override -386 public OptionalCell getFirstKey() { -387if (dataBlockIndexReader == null) { -388 throw new BlockIndexNotLoadedException(); -389} -390return dataBlockIndexReader.isEmpty() ? Optional.empty() -391: Optional.of(dataBlockIndexReader.getRootBlockKey(0)); -392 } -393 -394 /** -395 * TODO left from {@link HFile} version 1: move this to StoreFile after Ryan's -396 * patch goes in to eliminate {@link KeyValue} here. -397 * -398 * @return the first row key, or null if the file is empty. -399 */ -400 @Override -401 public Optionalbyte[] getFirstRowKey() { -402// We have to copy the row part to form the row key alone -403return getFirstKey().map(CellUtil::cloneRow); -404 } -405 -406 /** -407 * TODO left from {@link HFile} version 1: move this to StoreFile after -408 * Ryan's patch goes in to eliminate {@link KeyValue} here. -409 * -410 * @return the last row key, or null if the file is empty. -411 */ -412 @Override -413 public Optionalbyte[] getLastRowKey() { -414// We have to copy the row part to form the row key alone -415return getLastKey().map(CellUtil::cloneRow); -416 } -417 -418 /** @return number of KV entries in this HFile */ -419 @Override -420 public long getEntries() { -421return trailer.getEntryCount(); -422 } -423 -424 /** @return comparator */ -425 @Override -426 public CellComparator getComparator() { -427return comparator; -428 } -429 -430 /** @return compression algorithm */ -431 @Override -432 public Compression.Algorithm getCompressionAlgorithm() { -433return compressAlgo; -434 } -435 -436 /** -437 * @return the total heap size of data and meta block indexes in bytes. Does -438 * not take into account non-root blocks of a multilevel data index. -439 */ -440 @Override -441 public long indexSize() { -442return (dataBlockIndexReader != null ? dataBlockIndexReader.heapSize() : 0) -443+ ((metaBlockIndexReader != null) ? metaBlockIndexReader.heapSize() -444: 0); -445 } -446 -447 @Override -448 public String getName() { -449return name; -450 } -451 -452 @Override -453 public HFileBlockIndex.BlockIndexReader getDataBlockIndexReader() { -454return dataBlockIndexReader; -455 } -456 -457 @Override -458 public FixedFileTrailer getTrailer() { -459return trailer; -460 } -461 -462 @Override -463 public boolean isPrimaryReplicaReader() { -464return primaryReplicaReader; -465 } -466 -467 @Override -468 public FileInfo loadFileInfo() throws IOException { -469return fileInfo; -470 } -471 -472 /** -473 * An exception thrown when an operation requiring a scanner to be seeked -474 * is invoked on a scanner that is not seeked. -475 */ -476 @SuppressWarnings("serial") -477 public static class NotSeekedException extends IllegalStateException { -478public NotSeekedException() { -479 super("Not seeked to a key/value"); -480} -481 } -482 -483 protected static class HFileScannerImpl implements HFileScanner { -484private ByteBuff blockBuffer; -485protected final boolean cacheBlocks; -486protected final boolean pread; -487protected final boolean isCompaction; -488private int currKeyLen; -489private int currValueLen; -490private int currMemstoreTSLen; -491private long currMemstoreTS; -492// Updated but never read? -493protected AtomicInteger blockFetches = new AtomicInteger(0); -494protected final HFile.Reader reader; -495private int currTagsLen; -496//
[08/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html index a957d31..62f81b6 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html @@ -142,5192 +142,5186 @@ 134import org.apache.hadoop.hbase.wal.WAL; 135import org.apache.hadoop.hbase.wal.WALFactory; 136import org.apache.hadoop.hbase.wal.WALSplitter; -137import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; -138import org.apache.hadoop.hbase.zookeeper.ZKUtil; -139import org.apache.hadoop.hbase.zookeeper.ZKWatcher; -140import org.apache.hadoop.hbase.zookeeper.ZNodePaths; -141import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; -142import org.apache.hadoop.ipc.RemoteException; -143import org.apache.hadoop.security.UserGroupInformation; -144import org.apache.hadoop.util.ReflectionUtils; -145import org.apache.hadoop.util.Tool; -146import org.apache.hadoop.util.ToolRunner; -147import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; -148import org.apache.hbase.thirdparty.com.google.common.collect.Sets; -149import org.apache.yetus.audience.InterfaceAudience; -150import org.apache.yetus.audience.InterfaceStability; -151import org.apache.zookeeper.KeeperException; -152import org.slf4j.Logger; -153import org.slf4j.LoggerFactory; -154 -155import org.apache.hbase.thirdparty.com.google.common.base.Joiner; -156import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; -157import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; -158import org.apache.hbase.thirdparty.com.google.common.collect.Lists; -159import org.apache.hbase.thirdparty.com.google.common.collect.Multimap; -160import org.apache.hbase.thirdparty.com.google.common.collect.Ordering; -161import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap; -162 -163import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -164import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface; -165 -166/** -167 * HBaseFsck (hbck) is a tool for checking and repairing region consistency and -168 * table integrity problems in a corrupted HBase. This tool was written for hbase-1.x. It does not -169 * work with hbase-2.x; it can read state but is not allowed to change state; i.e. effect 'repair'. -170 * See hbck2 (HBASE-19121) for a hbck tool for hbase2. -171 * -172 * p -173 * Region consistency checks verify that hbase:meta, region deployment on region -174 * servers and the state of data in HDFS (.regioninfo files) all are in -175 * accordance. -176 * p -177 * Table integrity checks verify that all possible row keys resolve to exactly -178 * one region of a table. This means there are no individual degenerate -179 * or backwards regions; no holes between regions; and that there are no -180 * overlapping regions. -181 * p -182 * The general repair strategy works in two phases: -183 * ol -184 * li Repair Table Integrity on HDFS. (merge or fabricate regions) -185 * li Repair Region Consistency with hbase:meta and assignments -186 * /ol -187 * p -188 * For table integrity repairs, the tables' region directories are scanned -189 * for .regioninfo files. Each table's integrity is then verified. If there -190 * are any orphan regions (regions with no .regioninfo files) or holes, new -191 * regions are fabricated. Backwards regions are sidelined as well as empty -192 * degenerate (endkey==startkey) regions. If there are any overlapping regions, -193 * a new region is created and all data is merged into the new region. -194 * p -195 * Table integrity repairs deal solely with HDFS and could potentially be done -196 * offline -- the hbase region servers or master do not need to be running. -197 * This phase can eventually be used to completely reconstruct the hbase:meta table in -198 * an offline fashion. -199 * p -200 * Region consistency requires three conditions -- 1) valid .regioninfo file -201 * present in an HDFS region dir, 2) valid row with .regioninfo data in META, -202 * and 3) a region is deployed only at the regionserver that was assigned to -203 * with proper state in the master. -204 * p -205 * Region consistency repairs require hbase to be online so that hbck can -206 * contact the HBase master and region servers. The hbck#connect() method must -207 * first be called successfully. Much of the region consistency information -208 * is transient and less risky to repair. -209 * p -210 * If hbck is run from the command line, there are a
[08/51] [partial] hbase-site git commit: Published site at 1acbd36c903b048141866b143507bfce124a5c5f.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5299e667/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.html b/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.html index 333b785..da8def9 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.html @@ -34,1359 +34,1365 @@ 026import java.util.List; 027import java.util.UUID; 028import java.util.regex.Pattern; -029 -030import org.apache.commons.lang3.ArrayUtils; -031import org.apache.hadoop.hbase.util.Bytes; -032import org.apache.yetus.audience.InterfaceAudience; -033 -034/** -035 * HConstants holds a bunch of HBase-related constants -036 */ -037@InterfaceAudience.Public -038public final class HConstants { -039 // NOTICE Please do not add a constants here, unless they are referenced by a lot of classes. -040 -041 //Bytes.UTF8_ENCODING should be updated if this changed -042 /** When we encode strings, we always specify UTF8 encoding */ -043 public static final String UTF8_ENCODING = "UTF-8"; -044 -045 //Bytes.UTF8_CHARSET should be updated if this changed -046 /** When we encode strings, we always specify UTF8 encoding */ -047 public static final Charset UTF8_CHARSET = Charset.forName(UTF8_ENCODING); -048 /** -049 * Default block size for an HFile. -050 */ -051 public final static int DEFAULT_BLOCKSIZE = 64 * 1024; -052 -053 /** Used as a magic return value while optimized index key feature enabled(HBASE-7845) */ -054 public final static int INDEX_KEY_MAGIC = -2; -055 /* -056 * Name of directory that holds recovered edits written by the wal log -057 * splitting code, one per region -058 */ -059 public static final String RECOVERED_EDITS_DIR = "recovered.edits"; -060 /** -061 * The first four bytes of Hadoop RPC connections -062 */ -063 public static final byte[] RPC_HEADER = new byte[] { 'H', 'B', 'a', 's' }; -064 public static final byte RPC_CURRENT_VERSION = 0; -065 -066 // HFileBlock constants. TODO THESE DEFINES BELONG IN HFILEBLOCK, NOT UP HERE. -067 // Needed down in hbase-common though by encoders but these encoders should not be dealing -068 // in the internals of hfileblocks. Fix encapsulation. -069 -070 /** The size data structures with minor version is 0 */ -071 public static final int HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM = MAGIC_LENGTH + 2 * Bytes.SIZEOF_INT -072 + Bytes.SIZEOF_LONG; -073 /** The size of a version 2 HFile block header, minor version 1. -074 * There is a 1 byte checksum type, followed by a 4 byte bytesPerChecksum -075 * followed by another 4 byte value to store sizeofDataOnDisk. -076 */ -077 public static final int HFILEBLOCK_HEADER_SIZE = HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM + -078Bytes.SIZEOF_BYTE + 2 * Bytes.SIZEOF_INT; -079 /** Just an array of bytes of the right size. */ -080 public static final byte[] HFILEBLOCK_DUMMY_HEADER = new byte[HFILEBLOCK_HEADER_SIZE]; -081 -082 //End HFileBlockConstants. -083 -084 /** -085 * Status codes used for return values of bulk operations. -086 */ -087 @InterfaceAudience.Private -088 public enum OperationStatusCode { -089NOT_RUN, -090SUCCESS, -091BAD_FAMILY, -092STORE_TOO_BUSY, -093SANITY_CHECK_FAILURE, -094FAILURE -095 } -096 -097 /** long constant for zero */ -098 public static final Long ZERO_L = Long.valueOf(0L); -099 public static final String NINES = "99"; -100 public static final String ZEROES = "00"; -101 -102 // For migration -103 -104 /** name of version file */ -105 public static final String VERSION_FILE_NAME = "hbase.version"; -106 -107 /** -108 * Current version of file system. -109 * Version 4 supports only one kind of bloom filter. -110 * Version 5 changes versions in catalog table regions. -111 * Version 6 enables blockcaching on catalog tables. -112 * Version 7 introduces hfile -- hbase 0.19 to 0.20.. -113 * Version 8 introduces namespace -114 */ -115 // public static final String FILE_SYSTEM_VERSION = "6"; -116 public static final String FILE_SYSTEM_VERSION = "8"; -117 -118 // Configuration parameters -119 -120 //TODO: Is having HBase homed on port 60k OK? -121 -122 /** Cluster is in distributed mode or not */ -123 public static final String CLUSTER_DISTRIBUTED = "hbase.cluster.distributed"; -124 -125 /** Config for pluggable load balancers */ -126 public static final String HBASE_MASTER_LOADBALANCER_CLASS = "hbase.master.loadbalancer.class"; -127 -128 /** Config for balancing the cluster by table */ -129 public static final String HBASE_MASTER_LOADBALANCE_BYTABLE = "hbase.master.loadbalance.bytable"; -130 -131 /** Config for the max percent of regions in transition */ -132 public static final String
[08/51] [partial] hbase-site git commit: Published site at 130057f13774f6b213cdb06952c805a29d59396e.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/68eae623/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html b/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html index 6e82899..152a081 100644 --- a/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html +++ b/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html @@ -234,10 +234,10 @@ java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true; title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true; title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true; title="class or interface in java.io">Serializable) +org.apache.hadoop.hbase.procedure2.TestProcedureRecovery.TestStateMachineProcedure.State org.apache.hadoop.hbase.procedure2.TestStateMachineProcedure.TestSMProcedureState -org.apache.hadoop.hbase.procedure2.TestYieldProcedures.TestStateMachineProcedure.State org.apache.hadoop.hbase.procedure2.TestProcedureBypass.StuckStateMachineState -org.apache.hadoop.hbase.procedure2.TestProcedureRecovery.TestStateMachineProcedure.State +org.apache.hadoop.hbase.procedure2.TestYieldProcedures.TestStateMachineProcedure.State http://git-wip-us.apache.org/repos/asf/hbase-site/blob/68eae623/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html b/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html index d7c69fc..06907ed 100644 --- a/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html +++ b/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html @@ -701,10 +701,10 @@ java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true; title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true; title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true; title="class or interface in java.io">Serializable) -org.apache.hadoop.hbase.regionserver.TestAtomicOperation.TestStep org.apache.hadoop.hbase.regionserver.TestMultiLogThreshold.ActionType -org.apache.hadoop.hbase.regionserver.DataBlockEncodingTool.Manipulation org.apache.hadoop.hbase.regionserver.TestRegionServerReadRequestMetrics.Metric +org.apache.hadoop.hbase.regionserver.TestAtomicOperation.TestStep +org.apache.hadoop.hbase.regionserver.DataBlockEncodingTool.Manipulation org.apache.hadoop.hbase.regionserver.TestCacheOnWriteInSchema.CacheOnWriteType http://git-wip-us.apache.org/repos/asf/hbase-site/blob/68eae623/testdevapidocs/org/apache/hadoop/hbase/security/access/TestAccessController.MyShellBasedUnixGroupsMapping.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/security/access/TestAccessController.MyShellBasedUnixGroupsMapping.html b/testdevapidocs/org/apache/hadoop/hbase/security/access/TestAccessController.MyShellBasedUnixGroupsMapping.html index 79ffc82..da97f01 100644 --- a/testdevapidocs/org/apache/hadoop/hbase/security/access/TestAccessController.MyShellBasedUnixGroupsMapping.html +++ b/testdevapidocs/org/apache/hadoop/hbase/security/access/TestAccessController.MyShellBasedUnixGroupsMapping.html @@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab"; -public static class TestAccessController.MyShellBasedUnixGroupsMapping +public static class TestAccessController.MyShellBasedUnixGroupsMapping extends org.apache.hadoop.security.ShellBasedUnixGroupsMapping implements org.apache.hadoop.security.GroupMappingServiceProvider @@ -221,7 +221,7 @@ implements org.apache.hadoop.security.GroupMappingServiceProvider MyShellBasedUnixGroupsMapping -publicMyShellBasedUnixGroupsMapping() +publicMyShellBasedUnixGroupsMapping() @@ -238,7 +238,7 @@ implements org.apache.hadoop.security.GroupMappingServiceProvider getGroups -publichttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">Listhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringgetGroups(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringuser) +publichttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in
[08/51] [partial] hbase-site git commit: Published site at d5e4faacc354c1bc4d93efa71ca97ee3a056123e.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b5e107c3/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.html b/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.html index afc4268..9ad4207 100644 --- a/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.html +++ b/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.html @@ -49,7 +49,7 @@ var activeTableTab = "activeTableTab"; -PrevClass +PrevClass NextClass @@ -535,7 +535,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html -PrevClass +PrevClass NextClass http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b5e107c3/testdevapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureTestingUtility.NoopProcedure.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureTestingUtility.NoopProcedure.html b/testdevapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureTestingUtility.NoopProcedure.html index 885d5c2..fbb062a 100644 --- a/testdevapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureTestingUtility.NoopProcedure.html +++ b/testdevapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureTestingUtility.NoopProcedure.html @@ -156,6 +156,26 @@ static class TestMasterProcedureScheduler.TestTableProcedureWithEvent + +static class +TestSchedulerQueueDeadLock.TableExclusiveProcedure + + +static class +TestSchedulerQueueDeadLock.TableExclusiveProcedureWithId + + +static class +TestSchedulerQueueDeadLock.TableShardParentProcedure + + +static class +TestSchedulerQueueDeadLock.TableSharedProcedure + + +static class +TestSchedulerQueueDeadLock.TableSharedProcedureWithId + http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b5e107c3/testdevapidocs/org/apache/hadoop/hbase/procedure2/class-use/TestLockAndQueue.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/procedure2/class-use/TestLockAndQueue.html b/testdevapidocs/org/apache/hadoop/hbase/procedure2/class-use/TestLockAndQueue.html new file mode 100644 index 000..4838f1a --- /dev/null +++ b/testdevapidocs/org/apache/hadoop/hbase/procedure2/class-use/TestLockAndQueue.html @@ -0,0 +1,125 @@ +http://www.w3.org/TR/html4/loose.dtd;> + + + + + +Uses of Class org.apache.hadoop.hbase.procedure2.TestLockAndQueue (Apache HBase 3.0.0-SNAPSHOT Test API) + + + + + + + +JavaScript is disabled on your browser. + + + + + +Skip navigation links + + + + +Overview +Package +Class +Use +Tree +Deprecated +Index +Help + + + + +Prev +Next + + +Frames +NoFrames + + +AllClasses + + + + + + + + + + +Uses of Classorg.apache.hadoop.hbase.procedure2.TestLockAndQueue + +No usage of org.apache.hadoop.hbase.procedure2.TestLockAndQueue + + + + +Skip navigation links + + + + +Overview +Package +Class +Use +Tree +Deprecated +Index +Help + + + + +Prev +Next + + +Frames +NoFrames + + +AllClasses + + + + + + + + + +Copyright 20072018 https://www.apache.org/;>The Apache Software Foundation. All rights reserved. + + http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b5e107c3/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-frame.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-frame.html b/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-frame.html index 2e7f2ac..238aa0d 100644 --- a/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-frame.html +++ b/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-frame.html @@ -22,6 +22,7 @@ TestChildProcedures.TestChildProcedure TestChildProcedures.TestProcEnv TestChildProcedures.TestRootProcedure +TestLockAndQueue TestProcedureBypass TestProcedureBypass.RootProcedure TestProcedureBypass.StuckProcedure http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b5e107c3/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-summary.html
[08/51] [partial] hbase-site git commit: Published site at 3fe8649b2c9ba1271c25e8f476548907e4c7a90d.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8f09a71d/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.html b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.html index 30963fa..257263c 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.html @@ -81,187 +81,198 @@ 073 074 private ProcedureWALFormat() {} 075 -076 public static void load(IteratorProcedureWALFile logs, ProcedureStoreTracker tracker, -077 Loader loader) throws IOException { -078ProcedureWALFormatReader reader = new ProcedureWALFormatReader(tracker, loader); -079tracker.setKeepDeletes(true); -080try { -081 // Ignore the last log which is current active log. -082 while (logs.hasNext()) { -083ProcedureWALFile log = logs.next(); -084log.open(); -085try { -086 reader.read(log); -087} finally { -088 log.close(); -089} -090 } -091 reader.finish(); -092 -093 // The tracker is now updated with all the procedures read from the logs -094 if (tracker.isPartial()) { -095tracker.setPartialFlag(false); -096 } -097 tracker.resetModified(); -098} finally { -099 tracker.setKeepDeletes(false); -100} -101 } -102 -103 public static void writeHeader(OutputStream stream, ProcedureWALHeader header) -104 throws IOException { -105header.writeDelimitedTo(stream); -106 } -107 -108 /* -109 * +-+ -110 * | END OF WAL DATA | ---+ -111 * +-+ | -112 * | | | -113 * | Tracker | | -114 * | | | -115 * +-+ | -116 * | version | | -117 * +-+ | -118 * | TRAILER_MAGIC | | -119 * +-+ | -120 * | offset |-+ -121 * +-+ -122 */ -123 public static long writeTrailer(FSDataOutputStream stream, ProcedureStoreTracker tracker) -124 throws IOException { -125long offset = stream.getPos(); -126 -127// Write EOF Entry -128ProcedureWALEntry.newBuilder() -129 .setType(ProcedureWALEntry.Type.PROCEDURE_WAL_EOF) -130 .build().writeDelimitedTo(stream); -131 -132// Write Tracker -133 tracker.toProto().writeDelimitedTo(stream); -134 -135stream.write(TRAILER_VERSION); -136StreamUtils.writeLong(stream, TRAILER_MAGIC); -137StreamUtils.writeLong(stream, offset); -138return stream.getPos() - offset; -139 } -140 -141 public static ProcedureWALHeader readHeader(InputStream stream) -142 throws IOException { -143ProcedureWALHeader header; -144try { -145 header = ProcedureWALHeader.parseDelimitedFrom(stream); -146} catch (InvalidProtocolBufferException e) { -147 throw new InvalidWALDataException(e); -148} -149 -150if (header == null) { -151 throw new InvalidWALDataException("No data available to read the Header"); -152} -153 -154if (header.getVersion() 0 || header.getVersion() != HEADER_VERSION) { -155 throw new InvalidWALDataException("Invalid Header version. got " + header.getVersion() + -156 " expected " + HEADER_VERSION); -157} -158 -159if (header.getType() 0 || header.getType() LOG_TYPE_MAX_VALID) { -160 throw new InvalidWALDataException("Invalid header type. got " + header.getType()); -161} -162 -163return header; -164 } -165 -166 public static ProcedureWALTrailer readTrailer(FSDataInputStream stream, long startPos, long size) -167 throws IOException { -168// Beginning of the Trailer Jump. 17 = 1 byte version + 8 byte magic + 8 byte offset -169long trailerPos = size - 17; -170 -171if (trailerPos startPos) { -172 throw new InvalidWALDataException("Missing trailer: size=" + size + " startPos=" + startPos); -173} -174 -175stream.seek(trailerPos); -176int version = stream.read(); -177if (version != TRAILER_VERSION) { -178 throw new InvalidWALDataException("Invalid Trailer version. got " + version + -179 " expected " + TRAILER_VERSION); -180} +076 /** +077 * Load all the procedures in these ProcedureWALFiles, and rebuild the given {@code tracker} if +078 * needed, i.e, the {@code tracker} is a partial one. +079 * p/ +080 * The method in the give {@code loader} will be called at the end after we load all the +081 * procedures and construct the hierarchy. +082 * p/ +083 * And we will call the {@link ProcedureStoreTracker#resetModified()} method for the given +084 * {@code tracker} before returning, as it will
[08/51] [partial] hbase-site git commit: Published site at 7adf590106826b9e4432cfeee06acdc0ccff8c6e.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/425db230/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHRegion.FlushThread.html -- diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHRegion.FlushThread.html b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHRegion.FlushThread.html index ed3db7a..156dabb 100644 --- a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHRegion.FlushThread.html +++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHRegion.FlushThread.html @@ -5542,785 +5542,825 @@ 5534 } 5535 5536 @Test -5537 public void testWriteRequestsCounter() throws IOException { -5538byte[] fam = Bytes.toBytes("info"); -5539byte[][] families = { fam }; -5540this.region = initHRegion(tableName, method, CONF, families); +5537 public void testReverseScanWhenPutCellsAfterOpenReverseScan() throws Exception { +5538byte[] cf1 = Bytes.toBytes("CF1"); +5539byte[][] families = { cf1 }; +5540byte[] col = Bytes.toBytes("C"); 5541 -5542Assert.assertEquals(0L, region.getWriteRequestsCount()); -5543 -5544Put put = new Put(row); -5545put.addColumn(fam, fam, fam); -5546 -5547Assert.assertEquals(0L, region.getWriteRequestsCount()); -5548region.put(put); -5549Assert.assertEquals(1L, region.getWriteRequestsCount()); -5550region.put(put); -5551Assert.assertEquals(2L, region.getWriteRequestsCount()); -5552region.put(put); -5553Assert.assertEquals(3L, region.getWriteRequestsCount()); -5554 -region.delete(new Delete(row)); -5556Assert.assertEquals(4L, region.getWriteRequestsCount()); -5557 } -5558 -5559 @Test -5560 public void testOpenRegionWrittenToWAL() throws Exception { -5561final ServerName serverName = ServerName.valueOf(name.getMethodName(), 100, 42); -5562final RegionServerServices rss = spy(TEST_UTIL.createMockRegionServerService(serverName)); -5563 -5564HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName())); -5565htd.addFamily(new HColumnDescriptor(fam1)); -5566htd.addFamily(new HColumnDescriptor(fam2)); -5567 -5568HRegionInfo hri = new HRegionInfo(htd.getTableName(), -5569 HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY); -5570 -5571// open the region w/o rss and wal and flush some files -5572region = -5573 HBaseTestingUtility.createRegionAndWAL(hri, TEST_UTIL.getDataTestDir(), TEST_UTIL -5574 .getConfiguration(), htd); -5575assertNotNull(region); -5576 -5577// create a file in fam1 for the region before opening in OpenRegionHandler -5578region.put(new Put(Bytes.toBytes("a")).addColumn(fam1, fam1, fam1)); -5579region.flush(true); -5580 HBaseTestingUtility.closeRegionAndWAL(region); +5542HBaseConfiguration conf = new HBaseConfiguration(); +5543this.region = initHRegion(tableName, method, conf, families); +5544 +5545Put put = new Put(Bytes.toBytes("16")); +5546put.addColumn(cf1, col, Bytes.toBytes("val")); +5547region.put(put); +5548Put put2 = new Put(Bytes.toBytes("15")); +5549put2.addColumn(cf1, col, Bytes.toBytes("val")); +5550region.put(put2); +5551 +5552// Create a reverse scan +5553Scan scan = new Scan(Bytes.toBytes("16")); +5554scan.setReversed(true); +RegionScannerImpl scanner = region.getScanner(scan); +5556 +5557// Put a lot of cells that have sequenceIDs grater than the readPt of the reverse scan +5558for (int i = 10; i 20; i++) { +5559 Put p = new Put(Bytes.toBytes("" + i)); +5560 p.addColumn(cf1, col, Bytes.toBytes("" + i)); +5561 region.put(p); +5562} +5563ListCell currRow = new ArrayList(); +5564boolean hasNext; +5565do { +5566 hasNext = scanner.next(currRow); +5567} while (hasNext); +5568 +5569assertEquals(2, currRow.size()); +5570assertEquals("16", Bytes.toString(currRow.get(0).getRowArray(), +5571 currRow.get(0).getRowOffset(), currRow.get(0).getRowLength())); +5572assertEquals("15", Bytes.toString(currRow.get(1).getRowArray(), +5573 currRow.get(1).getRowOffset(), currRow.get(1).getRowLength())); +5574 } +5575 +5576 @Test +5577 public void testWriteRequestsCounter() throws IOException { +5578byte[] fam = Bytes.toBytes("info"); +5579byte[][] families = { fam }; +5580this.region = initHRegion(tableName, method, CONF, families); 5581 -5582ArgumentCaptorWALEdit editCaptor = ArgumentCaptor.forClass(WALEdit.class); +5582Assert.assertEquals(0L, region.getWriteRequestsCount()); 5583 -5584// capture append() calls -5585WAL wal = mockWAL(); -5586when(rss.getWAL((HRegionInfo) any())).thenReturn(wal); -5587 -5588region = HRegion.openHRegion(hri, htd, rss.getWAL(hri), -5589 TEST_UTIL.getConfiguration(),
[08/51] [partial] hbase-site git commit: Published site at 5fbb227deb365fe812d433fe39b85ac4b0ddee20.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9ebe686/devapidocs/org/apache/hadoop/hbase/util/ByteBufferUtils.Converter.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/util/ByteBufferUtils.Converter.html b/devapidocs/org/apache/hadoop/hbase/util/ByteBufferUtils.Converter.html new file mode 100644 index 000..61475fa --- /dev/null +++ b/devapidocs/org/apache/hadoop/hbase/util/ByteBufferUtils.Converter.html @@ -0,0 +1,421 @@ +http://www.w3.org/TR/html4/loose.dtd;> + + + + + +ByteBufferUtils.Converter (Apache HBase 3.0.0-SNAPSHOT API) + + + + + +var methods = {"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":6,"i7":6,"i8":6,"i9":6}; +var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],4:["t3","Abstract Methods"]}; +var altColor = "altColor"; +var rowColor = "rowColor"; +var tableTab = "tableTab"; +var activeTableTab = "activeTableTab"; + + +JavaScript is disabled on your browser. + + + + + +Skip navigation links + + + + +Overview +Package +Class +Use +Tree +Deprecated +Index +Help + + + + +PrevClass +NextClass + + +Frames +NoFrames + + +AllClasses + + + + + + + +Summary: +Nested| +Field| +Constr| +Method + + +Detail: +Field| +Constr| +Method + + + + + + + + +org.apache.hadoop.hbase.util +Class ByteBufferUtils.Converter + + + +https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">java.lang.Object + + +org.apache.hadoop.hbase.util.ByteBufferUtils.Converter + + + + + + + +Direct Known Subclasses: +ByteBufferUtils.ConverterHolder.PureJavaConverter, ByteBufferUtils.ConverterHolder.UnsafeConverter + + +Enclosing class: +ByteBufferUtils + + + +abstract static class ByteBufferUtils.Converter +extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object + + + + + + + + + + + +Constructor Summary + +Constructors + +Constructor and Description + + +Converter() + + + + + + + + + +Method Summary + +All MethodsInstance MethodsAbstract Methods + +Modifier and Type +Method and Description + + +(package private) abstract void +putInt(https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true; title="class or interface in java.nio">ByteBufferbuffer, + intval) + + +(package private) abstract int +putInt(https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true; title="class or interface in java.nio">ByteBufferbuffer, + intindex, + intval) + + +(package private) abstract int +putLong(https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true; title="class or interface in java.nio">ByteBufferbuffer, + intindex, + longval) + + +(package private) abstract void +putLong(https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true; title="class or interface in java.nio">ByteBufferbuffer, + longval) + + +(package private) abstract int +putShort(https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true; title="class or interface in java.nio">ByteBufferbuffer, +intindex, +shortval) + + +(package private) abstract void +putShort(https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true; title="class or interface in java.nio">ByteBufferbuffer, +shortval) + + +(package private) abstract int +toInt(https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true; title="class or interface in java.nio">ByteBufferbuffer) + + +(package private) abstract int +toInt(https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true; title="class or interface in java.nio">ByteBufferbuffer, + intoffset) + + +(package private) abstract long +toLong(https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true; title="class or interface in java.nio">ByteBufferbuffer, + intoffset) + + +(package private) abstract short +toShort(https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true; title="class or interface in java.nio">ByteBufferbuffer, + intoffset) + + + + + + +Methods inherited from classjava.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object +https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--; title="class or interface in
[08/51] [partial] hbase-site git commit: Published site at 821e4d7de2d576189f4288d1c2acf9e9a9471f5c.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/323b17d9/testdevapidocs/src-html/org/apache/hadoop/hbase/master/TestDeadServer.html -- diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/TestDeadServer.html b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/TestDeadServer.html index 11d5ba1..35c9eee 100644 --- a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/TestDeadServer.html +++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/TestDeadServer.html @@ -131,60 +131,62 @@ 123 124DeadServer d = new DeadServer(); 125 -126 -127d.add(hostname123); -128mee.incValue(1); -129d.add(hostname1234); -130mee.incValue(1); -131d.add(hostname12345); -132 -133ListPairServerName, Long copy = d.copyDeadServersSince(2L); -134Assert.assertEquals(2, copy.size()); -135 -136Assert.assertEquals(hostname1234, copy.get(0).getFirst()); -137Assert.assertEquals(new Long(2L), copy.get(0).getSecond()); -138 -139Assert.assertEquals(hostname12345, copy.get(1).getFirst()); -140Assert.assertEquals(new Long(3L), copy.get(1).getSecond()); -141 -142EnvironmentEdgeManager.reset(); -143 } -144 -145 @Test -146 public void testClean(){ -147DeadServer d = new DeadServer(); -148d.add(hostname123); -149 -150 d.cleanPreviousInstance(hostname12345); -151Assert.assertFalse(d.isEmpty()); -152 -153 d.cleanPreviousInstance(hostname1234); -154Assert.assertFalse(d.isEmpty()); -155 -156 d.cleanPreviousInstance(hostname123_2); -157Assert.assertTrue(d.isEmpty()); -158 } -159 -160 @Test -161 public void testClearDeadServer(){ -162DeadServer d = new DeadServer(); -163d.add(hostname123); -164d.add(hostname1234); -165Assert.assertEquals(2, d.size()); -166 +126d.add(hostname123); +127mee.incValue(1); +128d.add(hostname1234); +129mee.incValue(1); +130d.add(hostname12345); +131 +132ListPairServerName, Long copy = d.copyDeadServersSince(2L); +133Assert.assertEquals(2, copy.size()); +134 +135Assert.assertEquals(hostname1234, copy.get(0).getFirst()); +136Assert.assertEquals(new Long(2L), copy.get(0).getSecond()); +137 +138Assert.assertEquals(hostname12345, copy.get(1).getFirst()); +139Assert.assertEquals(new Long(3L), copy.get(1).getSecond()); +140 +141EnvironmentEdgeManager.reset(); +142 } +143 +144 @Test +145 public void testClean(){ +146DeadServer d = new DeadServer(); +147d.add(hostname123); +148 +149 d.cleanPreviousInstance(hostname12345); +150Assert.assertFalse(d.isEmpty()); +151 +152 d.cleanPreviousInstance(hostname1234); +153Assert.assertFalse(d.isEmpty()); +154 +155 d.cleanPreviousInstance(hostname123_2); +156Assert.assertTrue(d.isEmpty()); +157 } +158 +159 @Test +160 public void testClearDeadServer(){ +161DeadServer d = new DeadServer(); +162d.add(hostname123); +163d.add(hostname1234); +164Assert.assertEquals(2, d.size()); +165 +166d.finish(hostname123); 167d.removeDeadServer(hostname123); 168Assert.assertEquals(1, d.size()); -169d.removeDeadServer(hostname1234); -170Assert.assertTrue(d.isEmpty()); -171 -172d.add(hostname1234); -173 Assert.assertFalse(d.removeDeadServer(hostname123_2)); -174Assert.assertEquals(1, d.size()); -175 Assert.assertTrue(d.removeDeadServer(hostname1234)); -176Assert.assertTrue(d.isEmpty()); -177 } -178} -179 +169d.finish(hostname1234); +170d.removeDeadServer(hostname1234); +171Assert.assertTrue(d.isEmpty()); +172 +173d.add(hostname1234); +174 Assert.assertFalse(d.removeDeadServer(hostname123_2)); +175Assert.assertEquals(1, d.size()); +176d.finish(hostname1234); +177 Assert.assertTrue(d.removeDeadServer(hostname1234)); +178Assert.assertTrue(d.isEmpty()); +179 } +180} +181 http://git-wip-us.apache.org/repos/asf/hbase-site/blob/323b17d9/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestMergeTableRegionsProcedure.html -- diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestMergeTableRegionsProcedure.html b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestMergeTableRegionsProcedure.html index 72719d2..c3ecaef 100644 --- a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestMergeTableRegionsProcedure.html +++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestMergeTableRegionsProcedure.html @@ -32,20 +32,20 @@ 024import org.apache.hadoop.conf.Configuration; 025import org.apache.hadoop.hbase.HBaseClassTestRule; 026import org.apache.hadoop.hbase.HBaseTestingUtility; -027import org.apache.hadoop.hbase.HConstants; -028import org.apache.hadoop.hbase.MetaTableAccessor; -029import org.apache.hadoop.hbase.TableName; -030import
[08/51] [partial] hbase-site git commit: Published site at fa5fa6ecdd071b72b58971058ff3ab9d28c3e709.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d1341859/devapidocs/src-html/org/apache/hadoop/hbase/wal/RegionGroupingProvider.IdentityGroupingStrategy.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/wal/RegionGroupingProvider.IdentityGroupingStrategy.html b/devapidocs/src-html/org/apache/hadoop/hbase/wal/RegionGroupingProvider.IdentityGroupingStrategy.html index 2142742..be2a512 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/wal/RegionGroupingProvider.IdentityGroupingStrategy.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/wal/RegionGroupingProvider.IdentityGroupingStrategy.html @@ -129,174 +129,171 @@ 121 122 /** delegate provider for WAL creation/roll/close */ 123 public static final String DELEGATE_PROVIDER = "hbase.wal.regiongrouping.delegate.provider"; -124 public static final String DELEGATE_PROVIDER_CLASS = -125 "hbase.wal.regiongrouping.delegate.provider.class"; -126 public static final String DEFAULT_DELEGATE_PROVIDER = WALFactory.Providers.defaultProvider -127 .name(); +124 public static final String DEFAULT_DELEGATE_PROVIDER = WALFactory.Providers.defaultProvider +125 .name(); +126 +127 private static final String META_WAL_GROUP_NAME = "meta"; 128 -129 private static final String META_WAL_GROUP_NAME = "meta"; -130 -131 /** A group-provider mapping, make sure one-one rather than many-one mapping */ -132 private final ConcurrentMapString, WALProvider cached = new ConcurrentHashMap(); +129 /** A group-provider mapping, make sure one-one rather than many-one mapping */ +130 private final ConcurrentMapString, WALProvider cached = new ConcurrentHashMap(); +131 +132 private final KeyLockerString createLock = new KeyLocker(); 133 -134 private final KeyLockerString createLock = new KeyLocker(); -135 -136 private RegionGroupingStrategy strategy; -137 private WALFactory factory; -138 private Configuration conf; -139 private ListWALActionsListener listeners = new ArrayList(); -140 private String providerId; -141 private Class? extends WALProvider providerClass; -142 -143 @Override -144 public void init(WALFactory factory, Configuration conf, String providerId) throws IOException { -145if (null != strategy) { -146 throw new IllegalStateException("WALProvider.init should only be called once."); -147} -148this.conf = conf; -149this.factory = factory; -150StringBuilder sb = new StringBuilder().append(factory.factoryId); -151if (providerId != null) { -152 if (providerId.startsWith(WAL_FILE_NAME_DELIMITER)) { -153sb.append(providerId); -154 } else { -155 sb.append(WAL_FILE_NAME_DELIMITER).append(providerId); -156 } -157} -158this.providerId = sb.toString(); -159this.strategy = getStrategy(conf, REGION_GROUPING_STRATEGY, DEFAULT_REGION_GROUPING_STRATEGY); -160this.providerClass = factory.getProviderClass(DELEGATE_PROVIDER_CLASS, DELEGATE_PROVIDER, -161DEFAULT_DELEGATE_PROVIDER); -162 } -163 -164 private WALProvider createProvider(String group) throws IOException { -165WALProvider provider = WALFactory.createProvider(providerClass); -166provider.init(factory, conf, -167 META_WAL_PROVIDER_ID.equals(providerId) ? META_WAL_PROVIDER_ID : group); -168provider.addWALActionsListener(new MetricsWAL()); -169return provider; -170 } -171 -172 @Override -173 public ListWAL getWALs() { -174return cached.values().stream().flatMap(p - p.getWALs().stream()).collect(Collectors.toList()); -175 } -176 -177 private WAL getWAL(String group) throws IOException { -178WALProvider provider = cached.get(group); -179if (provider == null) { -180 Lock lock = createLock.acquireLock(group); -181 try { -182provider = cached.get(group); -183if (provider == null) { -184 provider = createProvider(group); -185 listeners.forEach(provider::addWALActionsListener); -186 cached.put(group, provider); -187} -188 } finally { -189lock.unlock(); -190 } -191} -192return provider.getWAL(null); -193 } -194 -195 @Override -196 public WAL getWAL(RegionInfo region) throws IOException { -197String group; -198if (META_WAL_PROVIDER_ID.equals(this.providerId)) { -199 group = META_WAL_GROUP_NAME; -200} else { -201 byte[] id; -202 byte[] namespace; -203 if (region != null) { -204id = region.getEncodedNameAsBytes(); -205namespace = region.getTable().getNamespace(); -206 } else { -207id = HConstants.EMPTY_BYTE_ARRAY; -208namespace = null; -209 } -210 group = strategy.group(id, namespace); -211} -212return getWAL(group); -213 } -214 -215 @Override -216 public void shutdown() throws IOException { -217// save the last exception and rethrow -218IOException failure =
[08/51] [partial] hbase-site git commit: Published site at 6bc7089f9e0793efc9bdd46a84f5ccd9bc4579ad.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/419d0338/testdevapidocs/org/apache/hadoop/hbase/master/replication/class-use/TestModifyPeerProcedureRetryBackoff.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/master/replication/class-use/TestModifyPeerProcedureRetryBackoff.html b/testdevapidocs/org/apache/hadoop/hbase/master/replication/class-use/TestModifyPeerProcedureRetryBackoff.html new file mode 100644 index 000..bb498f0 --- /dev/null +++ b/testdevapidocs/org/apache/hadoop/hbase/master/replication/class-use/TestModifyPeerProcedureRetryBackoff.html @@ -0,0 +1,125 @@ +http://www.w3.org/TR/html4/loose.dtd;> + + + + + +Uses of Class org.apache.hadoop.hbase.master.replication.TestModifyPeerProcedureRetryBackoff (Apache HBase 3.0.0-SNAPSHOT Test API) + + + + + + + +JavaScript is disabled on your browser. + + + + + +Skip navigation links + + + + +Overview +Package +Class +Use +Tree +Deprecated +Index +Help + + + + +Prev +Next + + +Frames +NoFrames + + +AllClasses + + + + + + + + + + +Uses of Classorg.apache.hadoop.hbase.master.replication.TestModifyPeerProcedureRetryBackoff + +No usage of org.apache.hadoop.hbase.master.replication.TestModifyPeerProcedureRetryBackoff + + + + +Skip navigation links + + + + +Overview +Package +Class +Use +Tree +Deprecated +Index +Help + + + + +Prev +Next + + +Frames +NoFrames + + +AllClasses + + + + + + + + + +Copyright 20072018 https://www.apache.org/;>The Apache Software Foundation. All rights reserved. + + http://git-wip-us.apache.org/repos/asf/hbase-site/blob/419d0338/testdevapidocs/org/apache/hadoop/hbase/master/replication/package-frame.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/master/replication/package-frame.html b/testdevapidocs/org/apache/hadoop/hbase/master/replication/package-frame.html new file mode 100644 index 000..7ac3a9a --- /dev/null +++ b/testdevapidocs/org/apache/hadoop/hbase/master/replication/package-frame.html @@ -0,0 +1,21 @@ +http://www.w3.org/TR/html4/loose.dtd;> + + + + + +org.apache.hadoop.hbase.master.replication (Apache HBase 3.0.0-SNAPSHOT Test API) + + + + +org.apache.hadoop.hbase.master.replication + +Classes + +TestModifyPeerProcedureRetryBackoff +TestModifyPeerProcedureRetryBackoff.TestModifyPeerProcedure + + + + http://git-wip-us.apache.org/repos/asf/hbase-site/blob/419d0338/testdevapidocs/org/apache/hadoop/hbase/master/replication/package-summary.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/master/replication/package-summary.html b/testdevapidocs/org/apache/hadoop/hbase/master/replication/package-summary.html new file mode 100644 index 000..f735dab --- /dev/null +++ b/testdevapidocs/org/apache/hadoop/hbase/master/replication/package-summary.html @@ -0,0 +1,147 @@ +http://www.w3.org/TR/html4/loose.dtd;> + + + + + +org.apache.hadoop.hbase.master.replication (Apache HBase 3.0.0-SNAPSHOT Test API) + + + + + + + +JavaScript is disabled on your browser. + + + + + +Skip navigation links + + + + +Overview +Package +Class +Use +Tree +Deprecated +Index +Help + + + + +PrevPackage +NextPackage + + +Frames +NoFrames + + +AllClasses + + + + + + + + + + +Packageorg.apache.hadoop.hbase.master.replication + + + + + +Class Summary + +Class +Description + + + +TestModifyPeerProcedureRetryBackoff + + + +TestModifyPeerProcedureRetryBackoff.TestModifyPeerProcedure + + + + + + + + + + + +Skip navigation links + + + + +Overview +Package +Class +Use +Tree +Deprecated +Index +Help + + + + +PrevPackage +NextPackage + + +Frames +NoFrames + + +AllClasses + + + -var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10}; +var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab"; @InterfaceAudience.Private -public class ProcedureExecutorTEnvironment +public class ProcedureExecutorTEnvironment extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object Thread Pool that executes the submitted procedures. The executor has a ProcedureStore associated. @@ -429,200 +429,206 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html getActiveExecutorCount() +https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true; title="class or interface in java.util">CollectionProcedureTEnvironment +getActiveProceduresNoCopy() +Should only be used when starting up, where the procedure workers have not been started. + + + https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true; title="class or interface in java.util">Sethttps://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true; title="class or interface in java.lang">Long getActiveProcIds() - + int getCorePoolSize() - + TEnvironment getEnvironment() - + long getKeepAliveTime(https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true; title="class or interface in java.util.concurrent">TimeUnittimeUnit) - + protected long getLastProcId() - + T extends ProcedureTEnvironmentT getProcedure(https://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true; title="class or interface in java.lang">ClassTclazz, longprocId) - + ProcedureTEnvironment getProcedure(longprocId) - + https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListProcedureTEnvironment getProcedures() Get procedures. - + (package private) RootProcedureStateTEnvironment getProcStack(longrootProcId) - + ProcedureTEnvironment getResult(longprocId) - + ProcedureTEnvironment getResultOrProcedure(longprocId) - + (package private) https://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true; title="class or interface in java.lang">Long getRootProcedureId(ProcedureTEnvironmentproc) - + (package private) ProcedureScheduler getScheduler() - + ProcedureStore getStore() - + int getWorkerThreadCount() - + private void handleInterruptedException(ProcedureTEnvironmentproc, https://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true; title="class or interface in java.lang">InterruptedExceptione) - + void init(intnumThreads, booleanabortOnCorruption) Initialize the procedure executor, but do not start workers. - + private ProcedureTEnvironment[] initializeChildren(RootProcedureStateTEnvironmentprocStack, ProcedureTEnvironmentprocedure, ProcedureTEnvironment[]subprocs) - + boolean isFinished(longprocId) Return true
[08/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.SinkWriter.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.SinkWriter.html b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.SinkWriter.html index 63e4b46..514f830 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.SinkWriter.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.SinkWriter.html @@ -468,15 +468,15 @@ 460 * creating it if necessary. 461 * @param logEntry 462 * @param fileNameBeingSplit the file being split currently. Used to generate tmp file name. -463 * @param conf -464 * @return Path to file into which to dump split log edits. -465 * @throws IOException -466 */ -467 @SuppressWarnings("deprecation") -468 @VisibleForTesting -469 static Path getRegionSplitEditsPath(final Entry logEntry, String fileNameBeingSplit, -470 Configuration conf) -471 throws IOException { +463 * @param tmpDirName of the directory used to sideline old recovered edits file +464 * @param conf +465 * @return Path to file into which to dump split log edits. +466 * @throws IOException +467 */ +468 @SuppressWarnings("deprecation") +469 @VisibleForTesting +470 static Path getRegionSplitEditsPath(final Entry logEntry, String fileNameBeingSplit, +471 String tmpDirName, Configuration conf) throws IOException { 472FileSystem fs = FileSystem.get(conf); 473Path rootDir = FSUtils.getRootDir(conf); 474Path tableDir = FSUtils.getTableDir(rootDir, logEntry.getKey().getTableName()); @@ -491,7 +491,7 @@ 483 return null; 484} 485if (fs.exists(dir) fs.isFile(dir)) { -486 Path tmp = new Path("/tmp"); +486 Path tmp = new Path(tmpDirName); 487 if (!fs.exists(tmp)) { 488fs.mkdirs(tmp); 489 } @@ -1520,411 +1520,413 @@ 1512 * @return a path with a write for that path. caller should close. 1513 */ 1514WriterAndPath createWAP(byte[] region, Entry entry) throws IOException { -1515 Path regionedits = getRegionSplitEditsPath(entry, -1516 fileBeingSplit.getPath().getName(), conf); -1517 if (regionedits == null) { -1518return null; -1519 } -1520 FileSystem rootFs = FileSystem.get(conf); -1521 if (rootFs.exists(regionedits)) { -1522LOG.warn("Found old edits file. It could be the " -1523+ "result of a previous failed split attempt. Deleting " + regionedits + ", length=" -1524+ rootFs.getFileStatus(regionedits).getLen()); -1525if (!rootFs.delete(regionedits, false)) { -1526 LOG.warn("Failed delete of old {}", regionedits); -1527} -1528 } -1529 Writer w = createWriter(regionedits); -1530 LOG.debug("Creating writer path={}", regionedits); -1531 return new WriterAndPath(regionedits, w, entry.getKey().getSequenceId()); -1532} -1533 -1534void filterCellByStore(Entry logEntry) { -1535 Mapbyte[], Long maxSeqIdInStores = -1536 regionMaxSeqIdInStores.get(Bytes.toString(logEntry.getKey().getEncodedRegionName())); -1537 if (MapUtils.isEmpty(maxSeqIdInStores)) { -1538return; -1539 } -1540 // Create the array list for the cells that aren't filtered. -1541 // We make the assumption that most cells will be kept. -1542 ArrayListCell keptCells = new ArrayList(logEntry.getEdit().getCells().size()); -1543 for (Cell cell : logEntry.getEdit().getCells()) { -1544if (CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) { -1545 keptCells.add(cell); -1546} else { -1547 byte[] family = CellUtil.cloneFamily(cell); -1548 Long maxSeqId = maxSeqIdInStores.get(family); -1549 // Do not skip cell even if maxSeqId is null. Maybe we are in a rolling upgrade, -1550 // or the master was crashed before and we can not get the information. -1551 if (maxSeqId == null || maxSeqId.longValue() logEntry.getKey().getSequenceId()) { -1552keptCells.add(cell); -1553 } -1554} -1555 } -1556 -1557 // Anything in the keptCells array list is still live. -1558 // So rather than removing the cells from the array list -1559 // which would be an O(n^2) operation, we just replace the list -1560 logEntry.getEdit().setCells(keptCells); -1561} -1562 -1563@Override -1564public void append(RegionEntryBuffer buffer) throws IOException { -1565 appendBuffer(buffer, true); -1566} -1567 -1568WriterAndPath appendBuffer(RegionEntryBuffer buffer, boolean reusable) throws IOException{ -1569 ListEntry entries = buffer.entryBuffer; -1570 if (entries.isEmpty()) { -1571LOG.warn("got an empty buffer, skipping"); -1572return null; -1573 }
[08/51] [partial] hbase-site git commit: Published site at 613d831429960348dc42c3bdb6ea5d31be15c81c.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7cf6034b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.html b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.html index d2d8da1..5bbbf0c 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.html @@ -90,391 +90,392 @@ 082 static final String DEFAULT_WAL_PROVIDER = Providers.defaultProvider.name(); 083 084 public static final String META_WAL_PROVIDER = "hbase.wal.meta_provider"; -085 static final String DEFAULT_META_WAL_PROVIDER = Providers.defaultProvider.name(); -086 -087 final String factoryId; -088 private final WALProvider provider; -089 // The meta updates are written to a different wal. If this -090 // regionserver holds meta regions, then this ref will be non-null. -091 // lazily intialized; most RegionServers don't deal with META -092 private final AtomicReferenceWALProvider metaProvider = new AtomicReference(); -093 -094 /** -095 * Configuration-specified WAL Reader used when a custom reader is requested -096 */ -097 private final Class? extends AbstractFSWALProvider.Reader logReaderClass; -098 -099 /** -100 * How long to attempt opening in-recovery wals -101 */ -102 private final int timeoutMillis; -103 -104 private final Configuration conf; -105 -106 // Used for the singleton WALFactory, see below. -107 private WALFactory(Configuration conf) { -108// this code is duplicated here so we can keep our members final. -109// until we've moved reader/writer construction down into providers, this initialization must -110// happen prior to provider initialization, in case they need to instantiate a reader/writer. -111timeoutMillis = conf.getInt("hbase.hlog.open.timeout", 30); -112/* TODO Both of these are probably specific to the fs wal provider */ -113logReaderClass = conf.getClass("hbase.regionserver.hlog.reader.impl", ProtobufLogReader.class, -114 AbstractFSWALProvider.Reader.class); -115this.conf = conf; -116// end required early initialization -117 -118// this instance can't create wals, just reader/writers. -119provider = null; -120factoryId = SINGLETON_ID; -121 } -122 -123 @VisibleForTesting -124 public Class? extends WALProvider getProviderClass(String key, String defaultValue) { -125try { -126 Providers provider = Providers.valueOf(conf.get(key, defaultValue)); -127 if (provider != Providers.defaultProvider) { -128// User gives a wal provider explicitly, just use that one -129return provider.clazz; -130 } -131 // AsyncFSWAL has better performance in most cases, and also uses less resources, we will try -132 // to use it if possible. But it deeply hacks into the internal of DFSClient so will be easily -133 // broken when upgrading hadoop. If it is broken, then we fall back to use FSHLog. -134 if (AsyncFSWALProvider.load()) { -135return AsyncFSWALProvider.class; -136 } else { -137return FSHLogProvider.class; -138 } -139} catch (IllegalArgumentException exception) { -140 // Fall back to them specifying a class name -141 // Note that the passed default class shouldn't actually be used, since the above only fails -142 // when there is a config value present. -143 return conf.getClass(key, Providers.defaultProvider.clazz, WALProvider.class); -144} -145 } -146 -147 static WALProvider createProvider(Class? extends WALProvider clazz) throws IOException { -148LOG.info("Instantiating WALProvider of type {}", clazz); -149try { -150 return clazz.getDeclaredConstructor().newInstance(); -151} catch (Exception e) { -152 LOG.error("couldn't set up WALProvider, the configured class is " + clazz); -153 LOG.debug("Exception details for failure to load WALProvider.", e); -154 throw new IOException("couldn't set up WALProvider", e); -155} -156 } -157 -158 /** -159 * @param conf must not be null, will keep a reference to read params in later reader/writer -160 * instances. -161 * @param factoryId a unique identifier for this factory. used i.e. by filesystem implementations -162 * to make a directory -163 */ -164 public WALFactory(Configuration conf, String factoryId) throws IOException { -165// default enableSyncReplicationWALProvider is true, only disable SyncReplicationWALProvider -166// for HMaster or HRegionServer which take system table only. See HBASE-1 -167this(conf, factoryId, true); -168 } -169 -170 /** -171 * @param conf must not be null, will keep a reference to read params in later reader/writer -172 * instances. -173 * @param factoryId a unique identifier
[08/51] [partial] hbase-site git commit: Published site at ba5d1c1f28301adc99019d9d6c4a04fac98ae511.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/804782f0/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html index 95f2a65..073d0d0 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html @@ -931,7 +931,7 @@ 923InitMetaProcedure initMetaProc = null; 924if (assignmentManager.getRegionStates().getRegionState(RegionInfoBuilder.FIRST_META_REGIONINFO) 925 .isOffline()) { -926 OptionalProcedure? optProc = procedureExecutor.getProcedures().stream() +926 OptionalProcedureMasterProcedureEnv optProc = procedureExecutor.getProcedures().stream() 927.filter(p - p instanceof InitMetaProcedure).findAny(); 928 if (optProc.isPresent()) { 929initMetaProc = (InitMetaProcedure) optProc.get(); @@ -3210,566 +3210,567 @@ 3202 cpHost.preGetProcedures(); 3203} 3204 -3205final ListProcedure? procList = this.procedureExecutor.getProcedures(); -3206 -3207if (cpHost != null) { -3208 cpHost.postGetProcedures(procList); -3209} -3210 -3211return procList; -3212 } -3213 -3214 @Override -3215 public ListLockedResource getLocks() throws IOException { -3216if (cpHost != null) { -3217 cpHost.preGetLocks(); -3218} -3219 -3220MasterProcedureScheduler procedureScheduler = -3221 procedureExecutor.getEnvironment().getProcedureScheduler(); -3222 -3223final ListLockedResource lockedResources = procedureScheduler.getLocks(); -3224 -3225if (cpHost != null) { -3226 cpHost.postGetLocks(lockedResources); -3227} -3228 -3229return lockedResources; -3230 } -3231 -3232 /** -3233 * Returns the list of table descriptors that match the specified request -3234 * @param namespace the namespace to query, or null if querying for all -3235 * @param regex The regular expression to match against, or null if querying for all -3236 * @param tableNameList the list of table names, or null if querying for all -3237 * @param includeSysTables False to match only against userspace tables -3238 * @return the list of table descriptors -3239 */ -3240 public ListTableDescriptor listTableDescriptors(final String namespace, final String regex, -3241 final ListTableName tableNameList, final boolean includeSysTables) -3242 throws IOException { -3243ListTableDescriptor htds = new ArrayList(); -3244if (cpHost != null) { -3245 cpHost.preGetTableDescriptors(tableNameList, htds, regex); -3246} -3247htds = getTableDescriptors(htds, namespace, regex, tableNameList, includeSysTables); -3248if (cpHost != null) { -3249 cpHost.postGetTableDescriptors(tableNameList, htds, regex); -3250} -3251return htds; -3252 } -3253 -3254 /** -3255 * Returns the list of table names that match the specified request -3256 * @param regex The regular expression to match against, or null if querying for all -3257 * @param namespace the namespace to query, or null if querying for all -3258 * @param includeSysTables False to match only against userspace tables -3259 * @return the list of table names -3260 */ -3261 public ListTableName listTableNames(final String namespace, final String regex, -3262 final boolean includeSysTables) throws IOException { -3263ListTableDescriptor htds = new ArrayList(); -3264if (cpHost != null) { -3265 cpHost.preGetTableNames(htds, regex); -3266} -3267htds = getTableDescriptors(htds, namespace, regex, null, includeSysTables); -3268if (cpHost != null) { -3269 cpHost.postGetTableNames(htds, regex); -3270} -3271ListTableName result = new ArrayList(htds.size()); -3272for (TableDescriptor htd: htds) result.add(htd.getTableName()); -3273return result; -3274 } -3275 -3276 /** -3277 * @return list of table table descriptors after filtering by regex and whether to include system -3278 *tables, etc. -3279 * @throws IOException -3280 */ -3281 private ListTableDescriptor getTableDescriptors(final ListTableDescriptor htds, -3282 final String namespace, final String regex, final ListTableName tableNameList, -3283 final boolean includeSysTables) -3284 throws IOException { -3285if (tableNameList == null || tableNameList.isEmpty()) { -3286 // request for all TableDescriptors -3287 CollectionTableDescriptor allHtds; -3288 if (namespace != null namespace.length() 0) { -3289// Do a check on the namespace existence. Will fail if does not exist. -3290 this.clusterSchemaService.getNamespace(namespace); -3291allHtds =
[08/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.TableSkewCostFunction.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.TableSkewCostFunction.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.TableSkewCostFunction.html index 233dba3..91b9055 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.TableSkewCostFunction.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.TableSkewCostFunction.html @@ -540,1205 +540,1204 @@ 532 sm.getRegionMetrics().forEach((byte[] regionName, RegionMetrics rm) - { 533DequeBalancerRegionLoad rLoads = oldLoads.get(Bytes.toString(regionName)); 534if (rLoads == null) { -535 // There was nothing there -536 rLoads = new ArrayDeque(); -537} else if (rLoads.size() = numRegionLoadsToRemember) { -538 rLoads.remove(); -539} -540rLoads.add(new BalancerRegionLoad(rm)); -541 loads.put(Bytes.toString(regionName), rLoads); -542 }); -543}); -544 -545for(CostFromRegionLoadFunction cost : regionLoadFunctions) { -546 cost.setLoads(loads); -547} -548 } -549 -550 protected void initCosts(Cluster cluster) { -551for (CostFunction c:costFunctions) { -552 c.init(cluster); -553} -554 } -555 -556 protected void updateCostsWithAction(Cluster cluster, Action action) { -557for (CostFunction c : costFunctions) { -558 c.postAction(action); -559} -560 } -561 -562 /** -563 * Get the names of the cost functions -564 */ -565 public String[] getCostFunctionNames() { -566if (costFunctions == null) return null; -567String[] ret = new String[costFunctions.length]; -568for (int i = 0; i costFunctions.length; i++) { -569 CostFunction c = costFunctions[i]; -570 ret[i] = c.getClass().getSimpleName(); -571} -572 -573return ret; -574 } -575 -576 /** -577 * This is the main cost function. It will compute a cost associated with a proposed cluster -578 * state. All different costs will be combined with their multipliers to produce a double cost. -579 * -580 * @param cluster The state of the cluster -581 * @param previousCost the previous cost. This is used as an early out. -582 * @return a double of a cost associated with the proposed cluster state. This cost is an -583 * aggregate of all individual cost functions. -584 */ -585 protected double computeCost(Cluster cluster, double previousCost) { -586double total = 0; -587 -588for (int i = 0; i costFunctions.length; i++) { -589 CostFunction c = costFunctions[i]; -590 this.tempFunctionCosts[i] = 0.0; -591 -592 if (c.getMultiplier() = 0) { -593continue; -594 } -595 -596 Float multiplier = c.getMultiplier(); -597 Double cost = c.cost(); -598 -599 this.tempFunctionCosts[i] = multiplier*cost; -600 total += this.tempFunctionCosts[i]; -601 -602 if (total previousCost) { -603break; -604 } -605} -606 -607return total; -608 } -609 -610 /** Generates a candidate action to be applied to the cluster for cost function search */ -611 abstract static class CandidateGenerator { -612abstract Cluster.Action generate(Cluster cluster); -613 -614/** -615 * From a list of regions pick a random one. Null can be returned which -616 * {@link StochasticLoadBalancer#balanceCluster(Map)} recognize as signal to try a region move -617 * rather than swap. -618 * -619 * @param clusterThe state of the cluster -620 * @param server index of the server -621 * @param chanceOfNoSwap Chance that this will decide to try a move rather -622 * than a swap. -623 * @return a random {@link RegionInfo} or null if an asymmetrical move is -624 * suggested. -625 */ -626protected int pickRandomRegion(Cluster cluster, int server, double chanceOfNoSwap) { -627 // Check to see if this is just a move. -628 if (cluster.regionsPerServer[server].length == 0 || RANDOM.nextFloat() chanceOfNoSwap) { -629// signal a move only. -630return -1; -631 } -632 int rand = RANDOM.nextInt(cluster.regionsPerServer[server].length); -633 return cluster.regionsPerServer[server][rand]; -634 -635} -636protected int pickRandomServer(Cluster cluster) { -637 if (cluster.numServers 1) { -638return -1; -639 } -640 -641 return RANDOM.nextInt(cluster.numServers); -642} -643 -644protected int pickRandomRack(Cluster cluster) { -645 if (cluster.numRacks 1) { -646return -1; -647 } -648
[08/51] [partial] hbase-site git commit: Published site at e66a6603e36ecd67237ca16acd5e2de03f0d372d.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0c6f447e/apidocs/org/apache/hadoop/hbase/class-use/CellScanner.html -- diff --git a/apidocs/org/apache/hadoop/hbase/class-use/CellScanner.html b/apidocs/org/apache/hadoop/hbase/class-use/CellScanner.html index dae577c..6b2e30c 100644 --- a/apidocs/org/apache/hadoop/hbase/class-use/CellScanner.html +++ b/apidocs/org/apache/hadoop/hbase/class-use/CellScanner.html @@ -1,10 +1,10 @@ http://www.w3.org/TR/html4/loose.dtd;> - + -æ¥å£ org.apache.hadoop.hbase.CellScannerçä½¿ç¨ (Apache HBase 3.0.0-SNAPSHOT API) +Uses of Interface org.apache.hadoop.hbase.CellScanner (Apache HBase 3.0.0-SNAPSHOT API) @@ -12,7 +12,7 @@ -æ¨çæµè§å¨å·²ç¦ç¨ JavaScriptã +JavaScript is disabled on your browser. -è·³è¿å¯¼èªé¾æ¥ +Skip navigation links - -æ¦è§ -ç¨åºå -ç±» -ä½¿ç¨ -æ -å·²è¿æ¶ -ç´¢å¼ -å¸®å© + +Overview +Package +Class +Use +Tree +Deprecated +Index +Help -ä¸ä¸ä¸ª -ä¸ä¸ä¸ª +Prev +Next -æ¡æ¶ -æ æ¡æ¶ +Frames +NoFrames -ææç±» +AllClasses-æ¥å£ç使ç¨
+
org.apache.hadoop.hbase.CellScannerUses of Interface
org.apache.hadoop.hbase.CellScanner
- -