[06/51] [partial] hbase-site git commit: Published site at 620d70d6186fb800299bcc62ad7179fccfd1be41.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa3fb87f/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.html b/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.html new file mode 100644 index 000..fee41a8 --- /dev/null +++ b/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.html @@ -0,0 +1,394 @@ +http://www.w3.org/TR/html4/loose.dtd;> + + +Source code + + + + +001/** +002 * +003 * Licensed to the Apache Software Foundation (ASF) under one +004 * or more contributor license agreements. See the NOTICE file +005 * distributed with this work for additional information +006 * regarding copyright ownership. The ASF licenses this file +007 * to you under the Apache License, Version 2.0 (the +008 * "License"); you may not use this file except in compliance +009 * with the License. You may obtain a copy of the License at +010 * +011 * http://www.apache.org/licenses/LICENSE-2.0 +012 * +013 * Unless required by applicable law or agreed to in writing, software +014 * distributed under the License is distributed on an "AS IS" BASIS, +015 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +016 * See the License for the specific language governing permissions and +017 * limitations under the License. +018 */ +019package org.apache.hadoop.hbase.thrift2.client; +020 +021import static org.apache.hadoop.hbase.ipc.RpcClient.DEFAULT_SOCKET_TIMEOUT_CONNECT; +022import static org.apache.hadoop.hbase.ipc.RpcClient.SOCKET_TIMEOUT_CONNECT; +023 +024import java.io.IOException; +025import java.lang.reflect.Constructor; +026import java.util.HashMap; +027import java.util.Map; +028import java.util.concurrent.ExecutorService; +029 +030import org.apache.commons.lang3.NotImplementedException; +031import org.apache.hadoop.conf.Configuration; +032import org.apache.hadoop.hbase.HConstants; +033import org.apache.hadoop.hbase.TableName; +034import org.apache.hadoop.hbase.client.Admin; +035import org.apache.hadoop.hbase.client.BufferedMutator; +036import org.apache.hadoop.hbase.client.BufferedMutatorParams; +037import org.apache.hadoop.hbase.client.Connection; +038import org.apache.hadoop.hbase.client.RegionLocator; +039import org.apache.hadoop.hbase.client.Table; +040import org.apache.hadoop.hbase.client.TableBuilder; +041import org.apache.hadoop.hbase.security.User; +042import org.apache.hadoop.hbase.thrift.Constants; +043import org.apache.hadoop.hbase.thrift2.generated.THBaseService; +044import org.apache.hadoop.hbase.util.Pair; +045import org.apache.http.client.HttpClient; +046import org.apache.http.client.config.RequestConfig; +047import org.apache.http.client.utils.HttpClientUtils; +048import org.apache.http.impl.client.HttpClientBuilder; +049import org.apache.thrift.protocol.TBinaryProtocol; +050import org.apache.thrift.protocol.TCompactProtocol; +051import org.apache.thrift.protocol.TProtocol; +052import org.apache.thrift.transport.TFramedTransport; +053import org.apache.thrift.transport.THttpClient; +054import org.apache.thrift.transport.TSocket; +055import org.apache.thrift.transport.TTransport; +056import org.apache.thrift.transport.TTransportException; +057import org.apache.yetus.audience.InterfaceAudience; +058 +059import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; +060 +061@InterfaceAudience.Private +062public class ThriftConnection implements Connection { +063 private Configuration conf; +064 private User user; +065 // For HTTP protocol +066 private HttpClient httpClient; +067 private boolean httpClientCreated = false; +068 private boolean isClosed = false; +069 +070 private String host; +071 private int port; +072 private boolean isFramed = false; +073 private boolean isCompact = false; +074 +075 private ThriftClientBuilder clientBuilder; +076 +077 private int operationTimeout; +078 private int connectTimeout; +079 +080 public ThriftConnection(Configuration conf, ExecutorService pool, final User user) +081 throws IOException { +082this.conf = conf; +083this.user = user; +084this.host = conf.get(Constants.HBASE_THRIFT_SERVER_NAME); +085this.port = conf.getInt(Constants.HBASE_THRIFT_SERVER_PORT, -1); +086Preconditions.checkArgument(port 0); +087Preconditions.checkArgument(host != null); +088this.isFramed = conf.getBoolean(Constants.FRAMED_CONF_KEY, Constants.FRAMED_CONF_DEFAULT); +089this.isCompact = conf.getBoolean(Constants.COMPACT_CONF_KEY, Constants.COMPACT_CONF_DEFAULT); +090this.operationTimeout = conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, +091 HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); +092this.connectTimeout = conf.getInt(SOCKET_TIMEOUT_CONNECT,
[06/51] [partial] hbase-site git commit: Published site at 281d6429e55149cc4c05430dcc1d1dc136d8b245.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/901d593a/devapidocs/org/apache/hadoop/hbase/regionserver/SplitLogWorker.TaskExecutor.Status.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/SplitLogWorker.TaskExecutor.Status.html b/devapidocs/org/apache/hadoop/hbase/regionserver/SplitLogWorker.TaskExecutor.Status.html index 1126570..cd96401 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/SplitLogWorker.TaskExecutor.Status.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/SplitLogWorker.TaskExecutor.Status.html @@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab"; -public static enum SplitLogWorker.TaskExecutor.Status +public static enum SplitLogWorker.TaskExecutor.Status extends https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true; title="class or interface in java.lang">EnumSplitLogWorker.TaskExecutor.Status @@ -216,7 +216,7 @@ the order they are declared. DONE -public static finalSplitLogWorker.TaskExecutor.Status DONE +public static finalSplitLogWorker.TaskExecutor.Status DONE @@ -225,7 +225,7 @@ the order they are declared. ERR -public static finalSplitLogWorker.TaskExecutor.Status ERR +public static finalSplitLogWorker.TaskExecutor.Status ERR @@ -234,7 +234,7 @@ the order they are declared. RESIGNED -public static finalSplitLogWorker.TaskExecutor.Status RESIGNED +public static finalSplitLogWorker.TaskExecutor.Status RESIGNED @@ -243,7 +243,7 @@ the order they are declared. PREEMPTED -public static finalSplitLogWorker.TaskExecutor.Status PREEMPTED +public static finalSplitLogWorker.TaskExecutor.Status PREEMPTED http://git-wip-us.apache.org/repos/asf/hbase-site/blob/901d593a/devapidocs/org/apache/hadoop/hbase/regionserver/SplitLogWorker.TaskExecutor.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/SplitLogWorker.TaskExecutor.html b/devapidocs/org/apache/hadoop/hbase/regionserver/SplitLogWorker.TaskExecutor.html index 8ae8dd4..36ae80d 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/SplitLogWorker.TaskExecutor.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/SplitLogWorker.TaskExecutor.html @@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab"; https://docs.oracle.com/javase/8/docs/api/java/lang/FunctionalInterface.html?is-external=true; title="class or interface in java.lang">@FunctionalInterface -public static interface SplitLogWorker.TaskExecutor +public static interface SplitLogWorker.TaskExecutor Objects implementing this interface actually do the task that has been acquired by a SplitLogWorker. Since there isn't a water-tight guarantee that two workers will not be executing the same task therefore it @@ -180,7 +180,7 @@ public static interface exec -SplitLogWorker.TaskExecutor.Statusexec(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringname, +SplitLogWorker.TaskExecutor.Statusexec(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringname, CancelableProgressablep) http://git-wip-us.apache.org/repos/asf/hbase-site/blob/901d593a/devapidocs/org/apache/hadoop/hbase/regionserver/SplitLogWorker.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/SplitLogWorker.html b/devapidocs/org/apache/hadoop/hbase/regionserver/SplitLogWorker.html index 322dc6b..36f1ac8 100644 --- a/devapidocs/org/apache/hadoop/hbase/regionserver/SplitLogWorker.html +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/SplitLogWorker.html @@ -248,7 +248,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/lang/Runnable run() -private static SplitLogWorker.TaskExecutor.Status +(package private) static SplitLogWorker.TaskExecutor.Status splitLog(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringname, CancelableProgressablep, org.apache.hadoop.conf.Configurationconf, @@ -396,12 +396,12 @@ implements https://docs.oracle.com/javase/8/docs/api/java/lang/Runnable splitLog -private staticSplitLogWorker.TaskExecutor.StatussplitLog(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringname, - CancelableProgressablep, - org.apache.hadoop.conf.Configurationconf, - RegionServerServicesserver, -
[06/51] [partial] hbase-site git commit: Published site at 466fa920fee572fe20db3b77ebf539dc304d5f31.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2bf59208/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteNamespaceProcedureBiConsumer.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteNamespaceProcedureBiConsumer.html b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteNamespaceProcedureBiConsumer.html index 736388b..197b99d 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteNamespaceProcedureBiConsumer.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteNamespaceProcedureBiConsumer.html @@ -26,3624 +26,3599 @@ 018package org.apache.hadoop.hbase.client; 019 020import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME; -021 -022import com.google.protobuf.Message; -023import com.google.protobuf.RpcChannel; -024import java.io.IOException; -025import java.util.ArrayList; -026import java.util.Arrays; -027import java.util.Collections; -028import java.util.EnumSet; -029import java.util.HashMap; -030import java.util.List; -031import java.util.Map; -032import java.util.Optional; -033import java.util.Set; -034import java.util.concurrent.CompletableFuture; -035import java.util.concurrent.ConcurrentHashMap; -036import java.util.concurrent.TimeUnit; -037import java.util.concurrent.atomic.AtomicReference; -038import java.util.function.BiConsumer; -039import java.util.function.Function; -040import java.util.function.Supplier; -041import java.util.regex.Pattern; -042import java.util.stream.Collectors; -043import java.util.stream.Stream; -044import org.apache.commons.io.IOUtils; -045import org.apache.hadoop.conf.Configuration; -046import org.apache.hadoop.hbase.AsyncMetaTableAccessor; -047import org.apache.hadoop.hbase.CacheEvictionStats; -048import org.apache.hadoop.hbase.CacheEvictionStatsAggregator; -049import org.apache.hadoop.hbase.ClusterMetrics; -050import org.apache.hadoop.hbase.ClusterMetrics.Option; -051import org.apache.hadoop.hbase.ClusterMetricsBuilder; -052import org.apache.hadoop.hbase.HConstants; -053import org.apache.hadoop.hbase.HRegionLocation; -054import org.apache.hadoop.hbase.MetaTableAccessor; -055import org.apache.hadoop.hbase.MetaTableAccessor.QueryType; -056import org.apache.hadoop.hbase.NamespaceDescriptor; -057import org.apache.hadoop.hbase.RegionLocations; -058import org.apache.hadoop.hbase.RegionMetrics; -059import org.apache.hadoop.hbase.RegionMetricsBuilder; -060import org.apache.hadoop.hbase.ServerName; -061import org.apache.hadoop.hbase.TableExistsException; -062import org.apache.hadoop.hbase.TableName; -063import org.apache.hadoop.hbase.TableNotDisabledException; -064import org.apache.hadoop.hbase.TableNotEnabledException; -065import org.apache.hadoop.hbase.TableNotFoundException; -066import org.apache.hadoop.hbase.UnknownRegionException; -067import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder; -068import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder; -069import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.ServerRequestCallerBuilder; -070import org.apache.hadoop.hbase.client.Scan.ReadType; -071import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil; -072import org.apache.hadoop.hbase.client.replication.TableCFs; -073import org.apache.hadoop.hbase.client.security.SecurityCapability; -074import org.apache.hadoop.hbase.exceptions.DeserializationException; -075import org.apache.hadoop.hbase.ipc.HBaseRpcController; -076import org.apache.hadoop.hbase.quotas.QuotaFilter; -077import org.apache.hadoop.hbase.quotas.QuotaSettings; -078import org.apache.hadoop.hbase.quotas.QuotaTableUtil; -079import org.apache.hadoop.hbase.replication.ReplicationException; -080import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; -081import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; -082import org.apache.hadoop.hbase.replication.SyncReplicationState; -083import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; -084import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException; -085import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; -086import org.apache.hadoop.hbase.util.Bytes; -087import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -088import org.apache.hadoop.hbase.util.ForeignExceptionUtil; -089import org.apache.yetus.audience.InterfaceAudience; -090import org.slf4j.Logger; -091import org.slf4j.LoggerFactory; -092 -093import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; -094import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; -095import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; -096import
[06/51] [partial] hbase-site git commit: Published site at e4b6b4afb933a961f543537875f87a2dc62d3757.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/849d84a8/testdevapidocs/org/apache/hadoop/hbase/coprocessor/package-summary.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/coprocessor/package-summary.html b/testdevapidocs/org/apache/hadoop/hbase/coprocessor/package-summary.html index f17e275..ec3348b 100644 --- a/testdevapidocs/org/apache/hadoop/hbase/coprocessor/package-summary.html +++ b/testdevapidocs/org/apache/hadoop/hbase/coprocessor/package-summary.html @@ -479,58 +479,62 @@ -TestRegionObserverBypass +TestRegionCoprocessorHost -TestRegionObserverBypass.TestCoprocessor +TestRegionObserverBypass +TestRegionObserverBypass.TestCoprocessor + + + TestRegionObserverBypass.TestCoprocessor2 Calls through to TestCoprocessor. - + TestRegionObserverBypass.TestCoprocessor3 Calls through to TestCoprocessor. - + TestRegionObserverForAddingMutationsFromCoprocessors - + TestRegionObserverForAddingMutationsFromCoprocessors.TestDeleteCellCoprocessor - + TestRegionObserverForAddingMutationsFromCoprocessors.TestDeleteFamilyCoprocessor - + TestRegionObserverForAddingMutationsFromCoprocessors.TestDeleteRowCoprocessor - + TestRegionObserverForAddingMutationsFromCoprocessors.TestMultiMutationCoprocessor - + TestRegionObserverForAddingMutationsFromCoprocessors.TestWALObserver - + TestRegionObserverInterface - + TestRegionObserverInterface.EvenOnlyCompactor - + TestRegionObserverPreFlushAndPreCompact Test that we fail if a Coprocessor tries to return a null scanner out @@ -539,131 +543,131 @@ CompactionLifeCycleTracker, CompactionRequest) - + TestRegionObserverPreFlushAndPreCompact.TestRegionObserver Coprocessor that returns null when preCompact or preFlush is called. - + TestRegionObserverScannerOpenHook - + TestRegionObserverScannerOpenHook.CompactionCompletionNotifyingRegion - + TestRegionObserverScannerOpenHook.EmptyRegionObsever Do the default logic in RegionObserver interface. - + TestRegionObserverScannerOpenHook.NoDataFilter - + TestRegionObserverScannerOpenHook.NoDataFromCompaction Don't allow any data to be written out in the compaction by creating a custom StoreScanner. - + TestRegionObserverScannerOpenHook.NoDataFromFlush Don't allow any data in a flush by creating a custom StoreScanner. - + TestRegionObserverScannerOpenHook.NoDataFromScan Don't return any data from a scan by creating a custom StoreScanner. - + TestRegionObserverStacking - + TestRegionObserverStacking.ObserverA - + TestRegionObserverStacking.ObserverB - + TestRegionObserverStacking.ObserverC - + TestRegionServerCoprocessorEndpoint - + TestRegionServerCoprocessorEndpoint.DummyRegionServerEndpoint - + TestRegionServerCoprocessorExceptionWithAbort Tests unhandled exceptions thrown by coprocessors running on a regionserver.. - + TestRegionServerCoprocessorExceptionWithAbort.BuggyRegionObserver - + TestRegionServerCoprocessorExceptionWithAbort.FailedInitializationObserver - + TestRegionServerCoprocessorExceptionWithRemove Tests unhandled exceptions thrown by coprocessors running on regionserver. - + TestRegionServerCoprocessorExceptionWithRemove.BuggyRegionObserver - + TestRowProcessorEndpoint Verifies ProcessEndpoint works. - + TestRowProcessorEndpoint.RowProcessorEndpointS extends com.google.protobuf.Message,T extends com.google.protobuf.Message This class defines two RowProcessors: IncrementCounterProcessor and FriendsOfFriendsProcessor. - + TestRowProcessorEndpoint.RowProcessorEndpoint.FriendsOfFriendsProcessor - + TestRowProcessorEndpoint.RowProcessorEndpoint.IncrementCounterProcessor - + TestRowProcessorEndpoint.RowProcessorEndpoint.RowSwapProcessor - + TestRowProcessorEndpoint.RowProcessorEndpoint.TimeoutProcessor - + TestSecureExport - + TestWALObserver Tests invocation of the http://git-wip-us.apache.org/repos/asf/hbase-site/blob/849d84a8/testdevapidocs/org/apache/hadoop/hbase/coprocessor/package-tree.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/coprocessor/package-tree.html b/testdevapidocs/org/apache/hadoop/hbase/coprocessor/package-tree.html index b6f3005..98006bc 100644 --- a/testdevapidocs/org/apache/hadoop/hbase/coprocessor/package-tree.html +++ b/testdevapidocs/org/apache/hadoop/hbase/coprocessor/package-tree.html @@ -240,6 +240,7 @@ org.apache.hadoop.hbase.coprocessor.TestPostIncrementAndAppendBeforeWAL.ChangeCellWithDifferntColumnFamilyObserver (implements org.apache.hadoop.hbase.coprocessor.RegionCoprocessor, org.apache.hadoop.hbase.coprocessor.RegionObserver) org.apache.hadoop.hbase.coprocessor.TestPostIncrementAndAppendBeforeWAL.ChangeCellWithNotExistColumnFamilyObserver (implements
[06/51] [partial] hbase-site git commit: Published site at 3ab895979b643a2980bcdb7fee2078f14b614210.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/MutationSerialization.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/MutationSerialization.html b/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/MutationSerialization.html index a7ab356..eefc000 100644 --- a/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/MutationSerialization.html +++ b/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/MutationSerialization.html @@ -120,6 +120,6 @@ -Copyright 20072018 https://www.apache.org/;>The Apache Software Foundation. All rights reserved. +Copyright 20072019 https://www.apache.org/;>The Apache Software Foundation. All rights reserved. http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/PutCombiner.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/PutCombiner.html b/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/PutCombiner.html index 8ad5ca0..475a882 100644 --- a/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/PutCombiner.html +++ b/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/PutCombiner.html @@ -120,6 +120,6 @@ -Copyright 20072018 https://www.apache.org/;>The Apache Software Foundation. All rights reserved. +Copyright 20072019 https://www.apache.org/;>The Apache Software Foundation. All rights reserved. http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/PutSortReducer.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/PutSortReducer.html b/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/PutSortReducer.html index d246f74..56dd2a5 100644 --- a/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/PutSortReducer.html +++ b/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/PutSortReducer.html @@ -120,6 +120,6 @@ -Copyright 20072018 https://www.apache.org/;>The Apache Software Foundation. All rights reserved. +Copyright 20072019 https://www.apache.org/;>The Apache Software Foundation. All rights reserved. http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/RegionSizeCalculator.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/RegionSizeCalculator.html b/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/RegionSizeCalculator.html index 61b7bde..e08521b 100644 --- a/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/RegionSizeCalculator.html +++ b/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/RegionSizeCalculator.html @@ -164,6 +164,6 @@ Input/OutputFormats, a table indexing MapReduce job, and utility methods. -Copyright 20072018 https://www.apache.org/;>The Apache Software Foundation. All rights reserved. +Copyright 20072019 https://www.apache.org/;>The Apache Software Foundation. All rights reserved. http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/ResultSerialization.Result94Deserializer.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/ResultSerialization.Result94Deserializer.html b/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/ResultSerialization.Result94Deserializer.html index 06e0de7..72b0648 100644 --- a/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/ResultSerialization.Result94Deserializer.html +++ b/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/ResultSerialization.Result94Deserializer.html @@ -120,6 +120,6 @@ -Copyright 20072018 https://www.apache.org/;>The Apache Software Foundation. All rights reserved. +Copyright 20072019 https://www.apache.org/;>The Apache Software Foundation. All rights reserved. http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/ResultSerialization.ResultDeserializer.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/ResultSerialization.ResultDeserializer.html b/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/ResultSerialization.ResultDeserializer.html index b380dc1..dbd9375 100644 --- a/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/ResultSerialization.ResultDeserializer.html +++ b/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/ResultSerialization.ResultDeserializer.html @@ -120,6 +120,6 @@ -Copyright
[06/51] [partial] hbase-site git commit: Published site at 7820ba1dbdba58b1002cdfde08eb21aa7a0bb6da.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/09ea0d5f/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html index 0f5a095..50bf692 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html @@ -78,8712 +78,8714 @@ 070import java.util.concurrent.locks.ReadWriteLock; 071import java.util.concurrent.locks.ReentrantReadWriteLock; 072import java.util.function.Function; -073import org.apache.hadoop.conf.Configuration; -074import org.apache.hadoop.fs.FileStatus; -075import org.apache.hadoop.fs.FileSystem; -076import org.apache.hadoop.fs.LocatedFileStatus; -077import org.apache.hadoop.fs.Path; -078import org.apache.hadoop.hbase.Cell; -079import org.apache.hadoop.hbase.CellBuilderType; -080import org.apache.hadoop.hbase.CellComparator; -081import org.apache.hadoop.hbase.CellComparatorImpl; -082import org.apache.hadoop.hbase.CellScanner; -083import org.apache.hadoop.hbase.CellUtil; -084import org.apache.hadoop.hbase.CompareOperator; -085import org.apache.hadoop.hbase.CompoundConfiguration; -086import org.apache.hadoop.hbase.DoNotRetryIOException; -087import org.apache.hadoop.hbase.DroppedSnapshotException; -088import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; -089import org.apache.hadoop.hbase.HConstants; -090import org.apache.hadoop.hbase.HConstants.OperationStatusCode; -091import org.apache.hadoop.hbase.HDFSBlocksDistribution; -092import org.apache.hadoop.hbase.KeyValue; -093import org.apache.hadoop.hbase.KeyValueUtil; -094import org.apache.hadoop.hbase.NamespaceDescriptor; -095import org.apache.hadoop.hbase.NotServingRegionException; -096import org.apache.hadoop.hbase.PrivateCellUtil; -097import org.apache.hadoop.hbase.RegionTooBusyException; -098import org.apache.hadoop.hbase.Tag; -099import org.apache.hadoop.hbase.TagUtil; -100import org.apache.hadoop.hbase.UnknownScannerException; -101import org.apache.hadoop.hbase.client.Append; -102import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; -103import org.apache.hadoop.hbase.client.CompactionState; -104import org.apache.hadoop.hbase.client.Delete; -105import org.apache.hadoop.hbase.client.Durability; -106import org.apache.hadoop.hbase.client.Get; -107import org.apache.hadoop.hbase.client.Increment; -108import org.apache.hadoop.hbase.client.IsolationLevel; -109import org.apache.hadoop.hbase.client.Mutation; -110import org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor; -111import org.apache.hadoop.hbase.client.Put; -112import org.apache.hadoop.hbase.client.RegionInfo; -113import org.apache.hadoop.hbase.client.RegionInfoBuilder; -114import org.apache.hadoop.hbase.client.RegionReplicaUtil; -115import org.apache.hadoop.hbase.client.Result; -116import org.apache.hadoop.hbase.client.RowMutations; -117import org.apache.hadoop.hbase.client.Scan; -118import org.apache.hadoop.hbase.client.TableDescriptor; -119import org.apache.hadoop.hbase.client.TableDescriptorBuilder; -120import org.apache.hadoop.hbase.conf.ConfigurationManager; -121import org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver; -122import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; -123import org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType; -124import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare; -125import org.apache.hadoop.hbase.exceptions.FailedSanityCheckException; -126import org.apache.hadoop.hbase.exceptions.TimeoutIOException; -127import org.apache.hadoop.hbase.exceptions.UnknownProtocolException; -128import org.apache.hadoop.hbase.filter.ByteArrayComparable; -129import org.apache.hadoop.hbase.filter.FilterWrapper; -130import org.apache.hadoop.hbase.filter.IncompatibleFilterException; -131import org.apache.hadoop.hbase.io.HFileLink; -132import org.apache.hadoop.hbase.io.HeapSize; -133import org.apache.hadoop.hbase.io.TimeRange; -134import org.apache.hadoop.hbase.io.hfile.BlockCache; -135import org.apache.hadoop.hbase.io.hfile.HFile; -136import org.apache.hadoop.hbase.ipc.CallerDisconnectedException; -137import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; -138import org.apache.hadoop.hbase.ipc.RpcCall; -139import org.apache.hadoop.hbase.ipc.RpcServer; -140import org.apache.hadoop.hbase.mob.MobFileCache; -141import org.apache.hadoop.hbase.monitoring.MonitoredTask; -142import org.apache.hadoop.hbase.monitoring.TaskMonitor; -143import org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager; -144import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl.WriteEntry; -145import
[06/51] [partial] hbase-site git commit: Published site at c448604ceb987d113913f0583452b2abce04db0d.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4f8b8424/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.HFileScannerImpl.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.HFileScannerImpl.html b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.HFileScannerImpl.html index 79cb21b..d8d391b 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.HFileScannerImpl.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.HFileScannerImpl.html @@ -378,1508 +378,1510 @@ 370 371 @Override 372 public void returnBlock(HFileBlock block) { -373BlockCache blockCache = this.cacheConf.getBlockCache(); -374if (blockCache != null block != null) { -375 BlockCacheKey cacheKey = new BlockCacheKey(this.getFileContext().getHFileName(), -376 block.getOffset(), this.isPrimaryReplicaReader(), block.getBlockType()); -377 blockCache.returnBlock(cacheKey, block); -378} -379 } -380 /** -381 * @return the first key in the file. May be null if file has no entries. Note -382 * that this is not the first row key, but rather the byte form of the -383 * first KeyValue. -384 */ -385 @Override -386 public OptionalCell getFirstKey() { -387if (dataBlockIndexReader == null) { -388 throw new BlockIndexNotLoadedException(); -389} -390return dataBlockIndexReader.isEmpty() ? Optional.empty() -391: Optional.of(dataBlockIndexReader.getRootBlockKey(0)); -392 } -393 -394 /** -395 * TODO left from {@link HFile} version 1: move this to StoreFile after Ryan's -396 * patch goes in to eliminate {@link KeyValue} here. -397 * -398 * @return the first row key, or null if the file is empty. -399 */ -400 @Override -401 public Optionalbyte[] getFirstRowKey() { -402// We have to copy the row part to form the row key alone -403return getFirstKey().map(CellUtil::cloneRow); -404 } -405 -406 /** -407 * TODO left from {@link HFile} version 1: move this to StoreFile after -408 * Ryan's patch goes in to eliminate {@link KeyValue} here. -409 * -410 * @return the last row key, or null if the file is empty. -411 */ -412 @Override -413 public Optionalbyte[] getLastRowKey() { -414// We have to copy the row part to form the row key alone -415return getLastKey().map(CellUtil::cloneRow); -416 } -417 -418 /** @return number of KV entries in this HFile */ -419 @Override -420 public long getEntries() { -421return trailer.getEntryCount(); -422 } -423 -424 /** @return comparator */ -425 @Override -426 public CellComparator getComparator() { -427return comparator; -428 } -429 -430 /** @return compression algorithm */ -431 @Override -432 public Compression.Algorithm getCompressionAlgorithm() { -433return compressAlgo; -434 } -435 -436 /** -437 * @return the total heap size of data and meta block indexes in bytes. Does -438 * not take into account non-root blocks of a multilevel data index. -439 */ -440 @Override -441 public long indexSize() { -442return (dataBlockIndexReader != null ? dataBlockIndexReader.heapSize() : 0) -443+ ((metaBlockIndexReader != null) ? metaBlockIndexReader.heapSize() -444: 0); -445 } -446 -447 @Override -448 public String getName() { -449return name; -450 } -451 -452 @Override -453 public HFileBlockIndex.BlockIndexReader getDataBlockIndexReader() { -454return dataBlockIndexReader; -455 } -456 -457 @Override -458 public FixedFileTrailer getTrailer() { -459return trailer; -460 } -461 -462 @Override -463 public boolean isPrimaryReplicaReader() { -464return primaryReplicaReader; -465 } -466 -467 @Override -468 public FileInfo loadFileInfo() throws IOException { -469return fileInfo; -470 } -471 -472 /** -473 * An exception thrown when an operation requiring a scanner to be seeked -474 * is invoked on a scanner that is not seeked. -475 */ -476 @SuppressWarnings("serial") -477 public static class NotSeekedException extends IllegalStateException { -478public NotSeekedException() { -479 super("Not seeked to a key/value"); -480} -481 } -482 -483 protected static class HFileScannerImpl implements HFileScanner { -484private ByteBuff blockBuffer; -485protected final boolean cacheBlocks; -486protected final boolean pread; -487protected final boolean isCompaction; -488private int currKeyLen; -489private int currValueLen; -490private int currMemstoreTSLen; -491private long currMemstoreTS; -492// Updated but never read? -493protected AtomicInteger blockFetches = new AtomicInteger(0); -494protected final HFile.Reader reader; -495private int currTagsLen; -496// buffer backed keyonlyKV -497private
[06/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html index a957d31..62f81b6 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html @@ -142,5192 +142,5186 @@ 134import org.apache.hadoop.hbase.wal.WAL; 135import org.apache.hadoop.hbase.wal.WALFactory; 136import org.apache.hadoop.hbase.wal.WALSplitter; -137import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; -138import org.apache.hadoop.hbase.zookeeper.ZKUtil; -139import org.apache.hadoop.hbase.zookeeper.ZKWatcher; -140import org.apache.hadoop.hbase.zookeeper.ZNodePaths; -141import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; -142import org.apache.hadoop.ipc.RemoteException; -143import org.apache.hadoop.security.UserGroupInformation; -144import org.apache.hadoop.util.ReflectionUtils; -145import org.apache.hadoop.util.Tool; -146import org.apache.hadoop.util.ToolRunner; -147import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; -148import org.apache.hbase.thirdparty.com.google.common.collect.Sets; -149import org.apache.yetus.audience.InterfaceAudience; -150import org.apache.yetus.audience.InterfaceStability; -151import org.apache.zookeeper.KeeperException; -152import org.slf4j.Logger; -153import org.slf4j.LoggerFactory; -154 -155import org.apache.hbase.thirdparty.com.google.common.base.Joiner; -156import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; -157import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; -158import org.apache.hbase.thirdparty.com.google.common.collect.Lists; -159import org.apache.hbase.thirdparty.com.google.common.collect.Multimap; -160import org.apache.hbase.thirdparty.com.google.common.collect.Ordering; -161import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap; -162 -163import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -164import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface; -165 -166/** -167 * HBaseFsck (hbck) is a tool for checking and repairing region consistency and -168 * table integrity problems in a corrupted HBase. This tool was written for hbase-1.x. It does not -169 * work with hbase-2.x; it can read state but is not allowed to change state; i.e. effect 'repair'. -170 * See hbck2 (HBASE-19121) for a hbck tool for hbase2. -171 * -172 * p -173 * Region consistency checks verify that hbase:meta, region deployment on region -174 * servers and the state of data in HDFS (.regioninfo files) all are in -175 * accordance. -176 * p -177 * Table integrity checks verify that all possible row keys resolve to exactly -178 * one region of a table. This means there are no individual degenerate -179 * or backwards regions; no holes between regions; and that there are no -180 * overlapping regions. -181 * p -182 * The general repair strategy works in two phases: -183 * ol -184 * li Repair Table Integrity on HDFS. (merge or fabricate regions) -185 * li Repair Region Consistency with hbase:meta and assignments -186 * /ol -187 * p -188 * For table integrity repairs, the tables' region directories are scanned -189 * for .regioninfo files. Each table's integrity is then verified. If there -190 * are any orphan regions (regions with no .regioninfo files) or holes, new -191 * regions are fabricated. Backwards regions are sidelined as well as empty -192 * degenerate (endkey==startkey) regions. If there are any overlapping regions, -193 * a new region is created and all data is merged into the new region. -194 * p -195 * Table integrity repairs deal solely with HDFS and could potentially be done -196 * offline -- the hbase region servers or master do not need to be running. -197 * This phase can eventually be used to completely reconstruct the hbase:meta table in -198 * an offline fashion. -199 * p -200 * Region consistency requires three conditions -- 1) valid .regioninfo file -201 * present in an HDFS region dir, 2) valid row with .regioninfo data in META, -202 * and 3) a region is deployed only at the regionserver that was assigned to -203 * with proper state in the master. -204 * p -205 * Region consistency repairs require hbase to be online so that hbck can -206 * contact the HBase master and region servers. The hbck#connect() method must -207 * first be called successfully. Much of the region consistency information -208 * is transient and less risky to repair. -209 * p -210 * If
[06/51] [partial] hbase-site git commit: Published site at 1acbd36c903b048141866b143507bfce124a5c5f.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5299e667/devapidocs/src-html/org/apache/hadoop/hbase/TableName.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/TableName.html b/devapidocs/src-html/org/apache/hadoop/hbase/TableName.html index 10f7ae8..99abe5f 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/TableName.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/TableName.html @@ -88,462 +88,467 @@ 080 public static final TableName META_TABLE_NAME = 081 valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "meta"); 082 -083 /** The Namespace table's name. */ -084 public static final TableName NAMESPACE_TABLE_NAME = -085 valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "namespace"); -086 -087 public static final String OLD_META_STR = ".META."; -088 public static final String OLD_ROOT_STR = "-ROOT-"; -089 -090 /** One globally disallowed name */ -091 public static final String DISALLOWED_TABLE_NAME = "zookeeper"; -092 -093 /** -094 * @return True if codetn/code is the hbase:meta table name. -095 */ -096 public static boolean isMetaTableName(final TableName tn) { -097return tn.equals(TableName.META_TABLE_NAME); -098 } -099 -100 /** -101 * TableName for old -ROOT- table. It is used to read/process old WALs which have -102 * ROOT edits. -103 */ -104 public static final TableName OLD_ROOT_TABLE_NAME = getADummyTableName(OLD_ROOT_STR); +083 /** +084 * The Namespace table's name. +085 * @deprecated We have folded the data in namespace table into meta table, so do not use it any +086 * more. +087 */ +088 @Deprecated +089 public static final TableName NAMESPACE_TABLE_NAME = +090 valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "namespace"); +091 +092 public static final String OLD_META_STR = ".META."; +093 public static final String OLD_ROOT_STR = "-ROOT-"; +094 +095 /** One globally disallowed name */ +096 public static final String DISALLOWED_TABLE_NAME = "zookeeper"; +097 +098 /** +099 * @return True if codetn/code is the hbase:meta table name. +100 */ +101 public static boolean isMetaTableName(final TableName tn) { +102return tn.equals(TableName.META_TABLE_NAME); +103 } +104 105 /** -106 * TableName for old .META. table. Used in testing. -107 */ -108 public static final TableName OLD_META_TABLE_NAME = getADummyTableName(OLD_META_STR); -109 -110 private final byte[] name; -111 private final String nameAsString; -112 private final byte[] namespace; -113 private final String namespaceAsString; -114 private final byte[] qualifier; -115 private final String qualifierAsString; -116 private final boolean systemTable; -117 private final int hashCode; -118 -119 /** -120 * Check passed byte array, "tableName", is legal user-space table name. -121 * @return Returns passed codetableName/code param -122 * @throws IllegalArgumentException if passed a tableName is null or -123 * is made of other than 'word' characters or underscores: i.e. -124 * code[\p{IsAlphabetic}\p{Digit}.-:]/code. The ':' is used to delimit the namespace -125 * from the table name and can be used for nothing else. -126 * -127 * Namespace names can only contain 'word' characters -128 * code[\p{IsAlphabetic}\p{Digit}]/code or '_' -129 * -130 * Qualifier names can only contain 'word' characters -131 * code[\p{IsAlphabetic}\p{Digit}]/code or '_', '.' or '-'. -132 * The name may not start with '.' or '-'. -133 * -134 * Valid fully qualified table names: -135 * foo:bar, namespace=gt;foo, table=gt;bar -136 * org:foo.bar, namespace=org, table=gt;foo.bar -137 */ -138 public static byte [] isLegalFullyQualifiedTableName(final byte[] tableName) { -139if (tableName == null || tableName.length = 0) { -140 throw new IllegalArgumentException("Name is null or empty"); -141} -142 -143int namespaceDelimIndex = -144 org.apache.hbase.thirdparty.com.google.common.primitives.Bytes.lastIndexOf(tableName, -145(byte) NAMESPACE_DELIM); -146if (namespaceDelimIndex 0){ -147 isLegalTableQualifierName(tableName); -148} else { -149 isLegalNamespaceName(tableName, 0, namespaceDelimIndex); -150 isLegalTableQualifierName(tableName, namespaceDelimIndex + 1, tableName.length); -151} -152return tableName; -153 } -154 -155 public static byte [] isLegalTableQualifierName(final byte[] qualifierName) { -156 isLegalTableQualifierName(qualifierName, 0, qualifierName.length, false); -157return qualifierName; +106 * TableName for old -ROOT- table. It is used to read/process old WALs which have +107 * ROOT edits. +108 */ +109 public static final TableName OLD_ROOT_TABLE_NAME = getADummyTableName(OLD_ROOT_STR); +110 /** +111 * TableName for old .META. table. Used in testing. +112 */ +113 public static final TableName
[06/51] [partial] hbase-site git commit: Published site at 130057f13774f6b213cdb06952c805a29d59396e.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/68eae623/testdevapidocs/src-html/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.html -- diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.html b/testdevapidocs/src-html/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.html index 1222951..6f09c2f 100644 --- a/testdevapidocs/src-html/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.html +++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.html @@ -51,389 +51,414 @@ 043import org.apache.hadoop.hbase.Tag; 044import org.apache.hadoop.hbase.io.ByteArrayOutputStream; 045import org.apache.hadoop.hbase.io.compress.Compression; -046import org.apache.hadoop.hbase.io.hfile.HFileContext; -047import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; -048import org.apache.hadoop.hbase.nio.SingleByteBuff; -049import org.apache.hadoop.hbase.testclassification.IOTests; -050import org.apache.hadoop.hbase.testclassification.LargeTests; -051import org.apache.hadoop.hbase.util.Bytes; -052import org.apache.hadoop.hbase.util.RedundantKVGenerator; -053import org.junit.ClassRule; -054import org.junit.Test; -055import org.junit.experimental.categories.Category; -056import org.junit.runner.RunWith; -057import org.junit.runners.Parameterized; -058import org.junit.runners.Parameterized.Parameters; -059import org.slf4j.Logger; -060import org.slf4j.LoggerFactory; -061 -062/** -063 * Test all of the data block encoding algorithms for correctness. Most of the -064 * class generate data which will test different branches in code. -065 */ -066@Category({IOTests.class, LargeTests.class}) -067@RunWith(Parameterized.class) -068public class TestDataBlockEncoders { -069 -070 @ClassRule -071 public static final HBaseClassTestRule CLASS_RULE = -072 HBaseClassTestRule.forClass(TestDataBlockEncoders.class); -073 -074 private static final Logger LOG = LoggerFactory.getLogger(TestDataBlockEncoders.class); +046import org.apache.hadoop.hbase.io.compress.Compression.Algorithm; +047import org.apache.hadoop.hbase.io.hfile.HFileContext; +048import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; +049import org.apache.hadoop.hbase.nio.SingleByteBuff; +050import org.apache.hadoop.hbase.testclassification.IOTests; +051import org.apache.hadoop.hbase.testclassification.LargeTests; +052import org.apache.hadoop.hbase.util.Bytes; +053import org.apache.hadoop.hbase.util.RedundantKVGenerator; +054import org.junit.Assert; +055import org.junit.ClassRule; +056import org.junit.Test; +057import org.junit.experimental.categories.Category; +058import org.junit.runner.RunWith; +059import org.junit.runners.Parameterized; +060import org.junit.runners.Parameterized.Parameters; +061import org.slf4j.Logger; +062import org.slf4j.LoggerFactory; +063 +064/** +065 * Test all of the data block encoding algorithms for correctness. Most of the +066 * class generate data which will test different branches in code. +067 */ +068@Category({IOTests.class, LargeTests.class}) +069@RunWith(Parameterized.class) +070public class TestDataBlockEncoders { +071 +072 @ClassRule +073 public static final HBaseClassTestRule CLASS_RULE = +074 HBaseClassTestRule.forClass(TestDataBlockEncoders.class); 075 -076 private static int NUMBER_OF_KV = 1; -077 private static int NUM_RANDOM_SEEKS = 1000; -078 -079 private static int ENCODED_DATA_OFFSET = HConstants.HFILEBLOCK_HEADER_SIZE -080 + DataBlockEncoding.ID_SIZE; -081 static final byte[] HFILEBLOCK_DUMMY_HEADER = new byte[HConstants.HFILEBLOCK_HEADER_SIZE]; -082 -083 private RedundantKVGenerator generator = new RedundantKVGenerator(); -084 private Random randomizer = new Random(42L); -085 -086 private final boolean includesMemstoreTS; -087 private final boolean includesTags; -088 private final boolean useOffheapData; -089 -090 @Parameters -091 public static CollectionObject[] parameters() { -092return HBaseTestingUtility.memStoreTSTagsAndOffheapCombination(); -093 } -094 -095 public TestDataBlockEncoders(boolean includesMemstoreTS, boolean includesTag, -096 boolean useOffheapData) { -097this.includesMemstoreTS = includesMemstoreTS; -098this.includesTags = includesTag; -099this.useOffheapData = useOffheapData; -100 } -101 -102 private HFileBlockEncodingContext getEncodingContext(Compression.Algorithm algo, -103 DataBlockEncoding encoding) { -104DataBlockEncoder encoder = encoding.getEncoder(); -105HFileContext meta = new HFileContextBuilder() -106 .withHBaseCheckSum(false) -107 .withIncludesMvcc(includesMemstoreTS) -108 .withIncludesTags(includesTags) -109 .withCompression(algo).build(); -110if (encoder != null) { -111 return
[06/51] [partial] hbase-site git commit: Published site at d5e4faacc354c1bc4d93efa71ca97ee3a056123e.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b5e107c3/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-summary.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-summary.html b/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-summary.html index 8a925d1..a6c6c1a 100644 --- a/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-summary.html +++ b/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-summary.html @@ -1039,452 +1039,464 @@ -TestRegionServerAccounting +TestRegionServerAbortTimeout + + + +TestRegionServerAbortTimeout.SleepWhenCloseCoprocessor + + + +TestRegionServerAbortTimeout.TestAbortTimeoutTask +TestRegionServerAccounting + + + TestRegionServerCrashDisableWAL Testcase for HBASE-20742 - + TestRegionServerHostname Tests for the hostname specification by region server - + TestRegionServerMetrics - + TestRegionServerNoMaster Tests on the region server, without the master. - + TestRegionServerOnlineConfigChange Verify that the Online config Changes on the HRegionServer side are actually happening. - + TestRegionServerReadRequestMetrics - + TestRegionServerReadRequestMetrics.ScanRegionCoprocessor - + TestRegionServerRegionSpaceUseReport Test class for isolated (non-cluster) tests surrounding the report of Region space use to the Master by RegionServers. - + TestRegionServerReportForDuty - + TestRegionServerReportForDuty.LogCapturer LogCapturer is similar to GenericTestUtils.LogCapturer except that this implementation has a default appender to the root logger. - + TestRegionServerReportForDuty.MyRegionServer - + TestRegionServerReportForDuty.NeverInitializedMaster This test HMaster class will always throw ServerNotRunningYetException if checked. - + TestRegionSplitPolicy - + TestRemoveRegionMetrics - + TestResettingCounters - + TestReversibleScanners Test cases against ReversibleKeyValueScanner - + TestRowPrefixBloomFilter Test TestRowPrefixBloomFilter - + TestRowTooBig Test case to check HRS throws RowTooBigException when row size exceeds configured limits. - + TestRpcSchedulerFactory A silly test that does nothing but make sure an rpcscheduler factory makes what it says it is going to make. - + TestRSKilledWhenInitializing Tests that a regionserver that dies after reporting for duty gets removed from list of online regions. - + TestRSKilledWhenInitializing.RegisterAndDieRegionServer A RegionServer that reports for duty and then immediately dies if it is the first to receive the response to a reportForDuty. - + TestRSStatusServlet Tests for the region server status page and its template. - + TestScanner Test of a long-lived scanner validating as we go. - + TestScannerHeartbeatMessages Here we test to make sure that scans return the expected Results when the server is sending the Client heartbeat messages. - + TestScannerHeartbeatMessages.HeartbeatHRegion Custom HRegion class that instantiates RegionScanners with configurable sleep times between fetches of row Results and/or column family cells. - + TestScannerHeartbeatMessages.HeartbeatHRegionServer Custom HRegionServer instance that instantiates TestScannerHeartbeatMessages.HeartbeatRPCServices in place of RSRpcServices to allow us to toggle support for heartbeat messages - + TestScannerHeartbeatMessages.HeartbeatKVHeap Custom KV Heap that can be configured to sleep/wait in between retrievals of column family cells. - + TestScannerHeartbeatMessages.HeartbeatRegionScanner Custom RegionScanner that can be configured to sleep between retrievals of row Results and/or column family cells - + TestScannerHeartbeatMessages.HeartbeatReversedKVHeap Custom reversed KV Heap that can be configured to sleep in between retrievals of column family cells. - + TestScannerHeartbeatMessages.HeartbeatReversedRegionScanner Custom ReversedRegionScanner that can be configured to sleep between retrievals of row Results and/or column family cells - + TestScannerHeartbeatMessages.HeartbeatRPCServices Custom RSRpcServices instance that allows heartbeat support to be toggled - + TestScannerHeartbeatMessages.SparseCellFilter - + TestScannerHeartbeatMessages.SparseRowFilter - + TestScannerRetriableFailure - + TestScannerRetriableFailure.FaultyScannerObserver - + TestScannerWithBulkload - + TestScannerWithCorruptHFile Tests a scanner on a corrupt hfile. - + TestScannerWithCorruptHFile.CorruptHFileCoprocessor - + TestScanWithBloomError Test a multi-column scanner when there is a Bloom filter false-positive. - + TestSCVFWithMiniCluster - + TestSecureBulkLoadManager - + TestSeekOptimizations Test
[06/51] [partial] hbase-site git commit: Published site at 3fe8649b2c9ba1271c25e8f476548907e4c7a90d.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8f09a71d/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html index 9b964f6..98ef11a 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html @@ -105,7 +105,7 @@ 097 * will first be initialized to the oldest file's tracker(which is stored in the trailer), using the 098 * method {@link ProcedureStoreTracker#resetTo(ProcedureStoreTracker, boolean)}, and then merge it 099 * with the tracker of every newer wal files, using the -100 * {@link ProcedureStoreTracker#setDeletedIfModifiedInBoth(ProcedureStoreTracker, boolean)}. +100 * {@link ProcedureStoreTracker#setDeletedIfModifiedInBoth(ProcedureStoreTracker)}. 101 * If we find out 102 * that all the modified procedures for the oldest wal file are modified or deleted in newer wal 103 * files, then we can delete it. This is because that, every time we call @@ -1181,244 +1181,243 @@ 1173} 1174 1175// compute the holding tracker. -1176// - the first WAL is used for the 'updates' -1177// - the global tracker is passed in first to decide which procedures are not -1178//exist anymore, so we can mark them as deleted in holdingCleanupTracker. -1179//Only global tracker have the whole picture here. -1180// - the other WALs are scanned to remove procs already updated in a newer wal. -1181//If it is updated in a newer wal, we can mark it as delelted in holdingCleanupTracker -1182//But, we can not delete it if it was shown deleted in the newer wal, as said -1183//above. -1184// TODO: exit early if holdingCleanupTracker.isEmpty() -1185 holdingCleanupTracker.resetTo(logs.getFirst().getTracker(), true); -1186//Passing in the global tracker, we can delete the procedures not in the global -1187//tracker, because they are deleted in the later logs -1188 holdingCleanupTracker.setDeletedIfModifiedInBoth(storeTracker, true); -1189for (int i = 1, size = logs.size() - 1; i size; ++i) { -1190 // Set deleteIfNotExists to false since a single log's tracker is passed in. -1191 // Since a specific procedure may not show up in the log at all(not executed or -1192 // updated during the time), we can not delete the procedure just because this log -1193 // don't have the info of the procedure. We can delete the procedure only if -1194 // in this log's tracker, it was cleanly showed that the procedure is modified or deleted -1195 // in the corresponding BitSetNode. -1196 holdingCleanupTracker.setDeletedIfModifiedInBoth(logs.get(i).getTracker(), false); -1197} -1198 } -1199 -1200 /** -1201 * Remove all logs with logId = {@code lastLogId}. -1202 */ -1203 private void removeAllLogs(long lastLogId, String why) { -1204if (logs.size() = 1) { -1205 return; -1206} -1207 -1208LOG.info("Remove all state logs with ID less than {}, since {}", lastLogId, why); -1209 -1210boolean removed = false; -1211while (logs.size() 1) { -1212 ProcedureWALFile log = logs.getFirst(); -1213 if (lastLogId log.getLogId()) { -1214break; -1215 } -1216 removeLogFile(log, walArchiveDir); -1217 removed = true; -1218} -1219 -1220if (removed) { -1221 buildHoldingCleanupTracker(); -1222} -1223 } -1224 -1225 private boolean removeLogFile(final ProcedureWALFile log, final Path walArchiveDir) { -1226try { -1227 LOG.trace("Removing log={}", log); -1228 log.removeFile(walArchiveDir); -1229 logs.remove(log); -1230 LOG.debug("Removed log={}, activeLogs={}", log, logs); -1231 assert logs.size() 0 : "expected at least one log"; -1232} catch (IOException e) { -1233 LOG.error("Unable to remove log: " + log, e); -1234 return false; -1235} -1236return true; -1237 } -1238 -1239 // == -1240 // FileSystem Log Files helpers -1241 // == -1242 public Path getWALDir() { -1243return this.walDir; -1244 } -1245 -1246 @VisibleForTesting -1247 Path getWalArchiveDir() { -1248return this.walArchiveDir; -1249 } -1250 -1251 public FileSystem getFileSystem() { -1252return this.fs; -1253 } -1254 -1255 protected Path getLogFilePath(final long logId) throws IOException { -1256return new Path(walDir, String.format(LOG_PREFIX + "%020d.log", logId)); -1257
[06/51] [partial] hbase-site git commit: Published site at 7adf590106826b9e4432cfeee06acdc0ccff8c6e.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/425db230/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHRegion.HRegionForTesting.html -- diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHRegion.HRegionForTesting.html b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHRegion.HRegionForTesting.html index ed3db7a..156dabb 100644 --- a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHRegion.HRegionForTesting.html +++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHRegion.HRegionForTesting.html @@ -5542,785 +5542,825 @@ 5534 } 5535 5536 @Test -5537 public void testWriteRequestsCounter() throws IOException { -5538byte[] fam = Bytes.toBytes("info"); -5539byte[][] families = { fam }; -5540this.region = initHRegion(tableName, method, CONF, families); +5537 public void testReverseScanWhenPutCellsAfterOpenReverseScan() throws Exception { +5538byte[] cf1 = Bytes.toBytes("CF1"); +5539byte[][] families = { cf1 }; +5540byte[] col = Bytes.toBytes("C"); 5541 -5542Assert.assertEquals(0L, region.getWriteRequestsCount()); -5543 -5544Put put = new Put(row); -5545put.addColumn(fam, fam, fam); -5546 -5547Assert.assertEquals(0L, region.getWriteRequestsCount()); -5548region.put(put); -5549Assert.assertEquals(1L, region.getWriteRequestsCount()); -5550region.put(put); -5551Assert.assertEquals(2L, region.getWriteRequestsCount()); -5552region.put(put); -5553Assert.assertEquals(3L, region.getWriteRequestsCount()); -5554 -region.delete(new Delete(row)); -5556Assert.assertEquals(4L, region.getWriteRequestsCount()); -5557 } -5558 -5559 @Test -5560 public void testOpenRegionWrittenToWAL() throws Exception { -5561final ServerName serverName = ServerName.valueOf(name.getMethodName(), 100, 42); -5562final RegionServerServices rss = spy(TEST_UTIL.createMockRegionServerService(serverName)); -5563 -5564HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName())); -5565htd.addFamily(new HColumnDescriptor(fam1)); -5566htd.addFamily(new HColumnDescriptor(fam2)); -5567 -5568HRegionInfo hri = new HRegionInfo(htd.getTableName(), -5569 HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY); -5570 -5571// open the region w/o rss and wal and flush some files -5572region = -5573 HBaseTestingUtility.createRegionAndWAL(hri, TEST_UTIL.getDataTestDir(), TEST_UTIL -5574 .getConfiguration(), htd); -5575assertNotNull(region); -5576 -5577// create a file in fam1 for the region before opening in OpenRegionHandler -5578region.put(new Put(Bytes.toBytes("a")).addColumn(fam1, fam1, fam1)); -5579region.flush(true); -5580 HBaseTestingUtility.closeRegionAndWAL(region); +5542HBaseConfiguration conf = new HBaseConfiguration(); +5543this.region = initHRegion(tableName, method, conf, families); +5544 +5545Put put = new Put(Bytes.toBytes("16")); +5546put.addColumn(cf1, col, Bytes.toBytes("val")); +5547region.put(put); +5548Put put2 = new Put(Bytes.toBytes("15")); +5549put2.addColumn(cf1, col, Bytes.toBytes("val")); +5550region.put(put2); +5551 +5552// Create a reverse scan +5553Scan scan = new Scan(Bytes.toBytes("16")); +5554scan.setReversed(true); +RegionScannerImpl scanner = region.getScanner(scan); +5556 +5557// Put a lot of cells that have sequenceIDs grater than the readPt of the reverse scan +5558for (int i = 10; i 20; i++) { +5559 Put p = new Put(Bytes.toBytes("" + i)); +5560 p.addColumn(cf1, col, Bytes.toBytes("" + i)); +5561 region.put(p); +5562} +5563ListCell currRow = new ArrayList(); +5564boolean hasNext; +5565do { +5566 hasNext = scanner.next(currRow); +5567} while (hasNext); +5568 +5569assertEquals(2, currRow.size()); +5570assertEquals("16", Bytes.toString(currRow.get(0).getRowArray(), +5571 currRow.get(0).getRowOffset(), currRow.get(0).getRowLength())); +5572assertEquals("15", Bytes.toString(currRow.get(1).getRowArray(), +5573 currRow.get(1).getRowOffset(), currRow.get(1).getRowLength())); +5574 } +5575 +5576 @Test +5577 public void testWriteRequestsCounter() throws IOException { +5578byte[] fam = Bytes.toBytes("info"); +5579byte[][] families = { fam }; +5580this.region = initHRegion(tableName, method, CONF, families); 5581 -5582ArgumentCaptorWALEdit editCaptor = ArgumentCaptor.forClass(WALEdit.class); +5582Assert.assertEquals(0L, region.getWriteRequestsCount()); 5583 -5584// capture append() calls -5585WAL wal = mockWAL(); -5586when(rss.getWAL((HRegionInfo) any())).thenReturn(wal); -5587 -5588region = HRegion.openHRegion(hri, htd, rss.getWAL(hri), -5589
[06/51] [partial] hbase-site git commit: Published site at 5fbb227deb365fe812d433fe39b85ac4b0ddee20.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9ebe686/devapidocs/org/apache/hadoop/hbase/util/Bytes.Converter.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/util/Bytes.Converter.html b/devapidocs/org/apache/hadoop/hbase/util/Bytes.Converter.html new file mode 100644 index 000..a8a22f1 --- /dev/null +++ b/devapidocs/org/apache/hadoop/hbase/util/Bytes.Converter.html @@ -0,0 +1,369 @@ +http://www.w3.org/TR/html4/loose.dtd;> + + + + + +Bytes.Converter (Apache HBase 3.0.0-SNAPSHOT API) + + + + + +var methods = {"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6}; +var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],4:["t3","Abstract Methods"]}; +var altColor = "altColor"; +var rowColor = "rowColor"; +var tableTab = "tableTab"; +var activeTableTab = "activeTableTab"; + + +JavaScript is disabled on your browser. + + + + + +Skip navigation links + + + + +Overview +Package +Class +Use +Tree +Deprecated +Index +Help + + + + +PrevClass +NextClass + + +Frames +NoFrames + + +AllClasses + + + + + + + +Summary: +Nested| +Field| +Constr| +Method + + +Detail: +Field| +Constr| +Method + + + + + + + + +org.apache.hadoop.hbase.util +Class Bytes.Converter + + + +https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">java.lang.Object + + +org.apache.hadoop.hbase.util.Bytes.Converter + + + + + + + +Direct Known Subclasses: +Bytes.ConverterHolder.PureJavaConverter, Bytes.ConverterHolder.UnsafeConverter + + +Enclosing class: +Bytes + + + +abstract static class Bytes.Converter +extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object + + + + + + + + + + + +Constructor Summary + +Constructors + +Constructor and Description + + +Converter() + + + + + + + + + +Method Summary + +All MethodsInstance MethodsAbstract Methods + +Modifier and Type +Method and Description + + +(package private) abstract int +putInt(byte[]bytes, + intoffset, + intval) + + +(package private) abstract int +putLong(byte[]bytes, + intoffset, + longval) + + +(package private) abstract int +putShort(byte[]bytes, +intoffset, +shortval) + + +(package private) abstract int +toInt(byte[]bytes, + intoffset, + intlength) + + +(package private) abstract long +toLong(byte[]bytes, + intoffset, + intlength) + + +(package private) abstract short +toShort(byte[]bytes, + intoffset, + intlength) + + + + + + +Methods inherited from classjava.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object +https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--; title="class or interface in java.lang">clone, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-; title="class or interface in java.lang">equals, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--; title="class or interface in java.lang">finalize, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--; title="class or interface in java.lang">getClass, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--; title="class or interface in java.lang">hashCode, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--; title="class or interface in java.lang">notify, https://docs.oracle.com/javase/8/docs/api/ja va/lang/Object.html?is-external=true#notifyAll--" title="class or interface in java.lang">notifyAll, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--; title="class or interface in java.lang">toString, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--; title="class or interface in java.lang">wait, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-; title="class or interface in java.lang">wait, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-int-; title="class or interface in java.lang">wait + + + + + + + + + + + + + + +Constructor Detail + + + + + +Converter +Converter() + + + + + + + + + +Method Detail + + + + + +toLong +abstractlongtoLong(byte[]bytes, + intoffset, +
[06/51] [partial] hbase-site git commit: Published site at 821e4d7de2d576189f4288d1c2acf9e9a9471f5c.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/323b17d9/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.StepHook.html -- diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.StepHook.html b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.StepHook.html index 8c7d5c9..0c87f49 100644 --- a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.StepHook.html +++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.StepHook.html @@ -387,7 +387,7 @@ 379 */ 380 public static void testRecoveryAndDoubleExecution( 381 final ProcedureExecutorMasterProcedureEnv procExec, final long procId, -382 final int numSteps, final boolean expectExecRunning) throws Exception { +382 final int lastStep, final boolean expectExecRunning) throws Exception { 383 ProcedureTestingUtility.waitProcedure(procExec, procId); 384assertEquals(false, procExec.isRunning()); 385 @@ -405,201 +405,204 @@ 397// fix would be get all visited states by the procedure and then check if user speccified 398// state is in that list. Current assumption of sequential proregression of steps/ states is 399// made at multiple places so we can keep while condition below for simplicity. -400Procedure proc = procExec.getProcedure(procId); +400Procedure? proc = procExec.getProcedure(procId); 401int stepNum = proc instanceof StateMachineProcedure ? 402((StateMachineProcedure) proc).getCurrentStateId() : 0; -403while (stepNum numSteps) { -404 LOG.info("Restart " + stepNum + " exec state=" + proc); -405 ProcedureTestingUtility.assertProcNotYetCompleted(procExec, procId); -406 restartMasterProcedureExecutor(procExec); -407 ProcedureTestingUtility.waitProcedure(procExec, procId); -408 // Old proc object is stale, need to get the new one after ProcedureExecutor restart -409 proc = procExec.getProcedure(procId); -410 stepNum = proc instanceof StateMachineProcedure ? -411 ((StateMachineProcedure) proc).getCurrentStateId() : stepNum + 1; -412} -413 -414assertEquals(expectExecRunning, procExec.isRunning()); -415 } +403for (;;) { +404 if (stepNum == lastStep) { +405break; +406 } +407 LOG.info("Restart " + stepNum + " exec state=" + proc); +408 ProcedureTestingUtility.assertProcNotYetCompleted(procExec, procId); +409 restartMasterProcedureExecutor(procExec); +410 ProcedureTestingUtility.waitProcedure(procExec, procId); +411 // Old proc object is stale, need to get the new one after ProcedureExecutor restart +412 proc = procExec.getProcedure(procId); +413 stepNum = proc instanceof StateMachineProcedure ? +414 ((StateMachineProcedure) proc).getCurrentStateId() : stepNum + 1; +415} 416 -417 /** -418 * Run through all procedure flow states TWICE while also restarting -419 * procedure executor at each step; i.e force a reread of procedure store. -420 * -421 *pIt does -422 * olliExecute step N - kill the executor before store update -423 * liRestart executor/store -424 * liExecutes hook for each step twice -425 * liExecute step N - and then save to store -426 * /ol -427 * -428 *pThis is a good test for finding state that needs persisting and steps that are not -429 * idempotent. Use this version of the test when the order in which flow steps are executed is -430 * not start to finish; where the procedure may vary the flow steps dependent on circumstance -431 * found. -432 * @see #testRecoveryAndDoubleExecution(ProcedureExecutor, long, int, boolean) -433 */ -434 public static void testRecoveryAndDoubleExecution( -435 final ProcedureExecutorMasterProcedureEnv procExec, final long procId, final StepHook hook) -436 throws Exception { -437 ProcedureTestingUtility.waitProcedure(procExec, procId); -438assertEquals(false, procExec.isRunning()); -439for (int i = 0; !procExec.isFinished(procId); ++i) { -440 LOG.info("Restart " + i + " exec state=" + procExec.getProcedure(procId)); -441 if (hook != null) { -442assertTrue(hook.execute(i)); -443 } -444 restartMasterProcedureExecutor(procExec); -445 ProcedureTestingUtility.waitProcedure(procExec, procId); -446} -447assertEquals(true, procExec.isRunning()); -448 ProcedureTestingUtility.assertProcNotFailed(procExec, procId); -449 } -450 -451 public static void testRecoveryAndDoubleExecution( -452 final ProcedureExecutorMasterProcedureEnv procExec, final long procId) throws Exception { -453 testRecoveryAndDoubleExecution(procExec, procId, null); -454 } -455 -456 /** -457 * Hook which
[06/51] [partial] hbase-site git commit: Published site at fa5fa6ecdd071b72b58971058ff3ab9d28c3e709.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d1341859/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.Providers.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.Providers.html b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.Providers.html index d69bb8c..92967f2 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.Providers.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.Providers.html @@ -88,428 +88,404 @@ 080 081 public static final String WAL_PROVIDER = "hbase.wal.provider"; 082 static final String DEFAULT_WAL_PROVIDER = Providers.defaultProvider.name(); -083 public static final String WAL_PROVIDER_CLASS = "hbase.wal.provider.class"; -084 static final Class? extends WALProvider DEFAULT_WAL_PROVIDER_CLASS = AsyncFSWALProvider.class; +083 +084 public static final String META_WAL_PROVIDER = "hbase.wal.meta_provider"; 085 -086 public static final String META_WAL_PROVIDER = "hbase.wal.meta_provider"; -087 public static final String META_WAL_PROVIDER_CLASS = "hbase.wal.meta_provider.class"; -088 -089 final String factoryId; -090 private final WALProvider provider; -091 // The meta updates are written to a different wal. If this -092 // regionserver holds meta regions, then this ref will be non-null. -093 // lazily intialized; most RegionServers don't deal with META -094 private final AtomicReferenceWALProvider metaProvider = new AtomicReference(); -095 -096 /** -097 * Configuration-specified WAL Reader used when a custom reader is requested -098 */ -099 private final Class? extends AbstractFSWALProvider.Reader logReaderClass; -100 -101 /** -102 * How long to attempt opening in-recovery wals -103 */ -104 private final int timeoutMillis; -105 -106 private final Configuration conf; -107 -108 // Used for the singleton WALFactory, see below. -109 private WALFactory(Configuration conf) { -110// this code is duplicated here so we can keep our members final. -111// until we've moved reader/writer construction down into providers, this initialization must -112// happen prior to provider initialization, in case they need to instantiate a reader/writer. -113timeoutMillis = conf.getInt("hbase.hlog.open.timeout", 30); -114/* TODO Both of these are probably specific to the fs wal provider */ -115logReaderClass = conf.getClass("hbase.regionserver.hlog.reader.impl", ProtobufLogReader.class, -116 AbstractFSWALProvider.Reader.class); -117this.conf = conf; -118// end required early initialization -119 -120// this instance can't create wals, just reader/writers. -121provider = null; -122factoryId = SINGLETON_ID; -123 } -124 -125 @VisibleForTesting -126 Providers getDefaultProvider() { -127return Providers.defaultProvider; -128 } -129 -130 @VisibleForTesting -131 /* -132 * @param clsKey config key for provider classname -133 * @param key config key for provider enum -134 * @param defaultValue default value for provider enum -135 * @return Class which extends WALProvider -136 */ -137 public Class? extends WALProvider getProviderClass(String clsKey, String key, -138 String defaultValue) { -139String clsName = conf.get(clsKey); -140if (clsName == null || clsName.isEmpty()) { -141 clsName = conf.get(key, defaultValue); -142} -143if (clsName != null !clsName.isEmpty()) { -144 try { -145return (Class? extends WALProvider) Class.forName(clsName); -146 } catch (ClassNotFoundException exception) { -147// try with enum key next -148 } -149} -150try { -151 Providers provider = Providers.valueOf(conf.get(key, defaultValue)); -152 -153 // AsyncFSWALProvider is not guaranteed to work on all Hadoop versions, when it's chosen as -154 // the default and we can't use it, we want to fall back to FSHLog which we know works on -155 // all versions. -156 if (provider == getDefaultProvider() provider.clazz == AsyncFSWALProvider.class -157 !AsyncFSWALProvider.load()) { -158// AsyncFSWAL has better performance in most cases, and also uses less resources, we will -159// try to use it if possible. It deeply hacks into the internal of DFSClient so will be -160// easily broken when upgrading hadoop. -161LOG.warn("Failed to load AsyncFSWALProvider, falling back to FSHLogProvider"); -162return FSHLogProvider.class; -163 } -164 -165 // N.b. If the user specifically requested AsyncFSWALProvider but their environment doesn't -166 // support using it (e.g. AsyncFSWALProvider.load() == false), we should let this fail and -167 // not fall back to FSHLogProvider. -168 return provider.clazz; -169} catch (IllegalArgumentException exception) { -170 // Fall back to them
[06/51] [partial] hbase-site git commit: Published site at 6bc7089f9e0793efc9bdd46a84f5ccd9bc4579ad.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/419d0338/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.TestMultiStepProcedure.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.TestMultiStepProcedure.html b/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.TestMultiStepProcedure.html index f2124e0..bfe7950 100644 --- a/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.TestMultiStepProcedure.html +++ b/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.TestMultiStepProcedure.html @@ -238,7 +238,7 @@ extends http://git-wip-us.apache.org/repos/asf/hbase-site/blob/419d0338/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.TestSingleStepProcedure.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.TestSingleStepProcedure.html b/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.TestSingleStepProcedure.html index 22c5028..2339e3b 100644 --- a/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.TestSingleStepProcedure.html +++ b/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.TestSingleStepProcedure.html @@ -230,7 +230,7 @@ extends org.apache.hadoop.hbase.procedure2.SequentialProcedure http://git-wip-us.apache.org/repos/asf/hbase-site/blob/419d0338/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.TestStateMachineProcedure.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.TestStateMachineProcedure.html b/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.TestStateMachineProcedure.html index 434ca30..eb593ae 100644 --- a/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.TestStateMachineProcedure.html +++ b/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.TestStateMachineProcedure.html @@ -288,7 +288,7 @@ extends org.apache.hadoop.hbase.procedure2.StateMachineProcedure http://git-wip-us.apache.org/repos/asf/hbase-site/blob/419d0338/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.TestProcedure.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.TestProcedure.html b/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.TestProcedure.html index 8d0d9a2..bbcfdca 100644 --- a/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.TestProcedure.html +++ b/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.TestProcedure.html @@ -234,7 +234,7 @@ extends org.apache.hadoop.hbase.procedure2.Procedure http://git-wip-us.apache.org/repos/asf/hbase-site/blob/419d0338/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.TestSingleStepProcedure.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.TestSingleStepProcedure.html b/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.TestSingleStepProcedure.html index bb28784..36c883e 100644 --- a/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.TestSingleStepProcedure.html +++ b/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.TestSingleStepProcedure.html @@ -222,7 +222,7 @@ extends http://git-wip-us.apache.org/repos/asf/hbase-site/blob/419d0338/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.TestTwoStepProcedure.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.TestTwoStepProcedure.html b/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.TestTwoStepProcedure.html index 20b22e0..d9e4839 100644 --- a/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.TestTwoStepProcedure.html +++ b/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.TestTwoStepProcedure.html @@ -222,7 +222,7 @@ extends http://git-wip-us.apache.org/repos/asf/hbase-site/blob/419d0338/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureSchedulerConcurrency.TestProcedureWithEvent.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureSchedulerConcurrency.TestProcedureWithEvent.html
[06/51] [partial] hbase-site git commit: Published site at d7e08317d2f214e4cca7b67578aba0ed7a567d54.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/37cf49a6/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html index 566f410..da040ad 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html @@ -341,8361 +341,8425 @@ 333 private final int rowLockWaitDuration; 334 static final int DEFAULT_ROWLOCK_WAIT_DURATION = 3; 335 -336 // The internal wait duration to acquire a lock before read/update -337 // from the region. It is not per row. The purpose of this wait time -338 // is to avoid waiting a long time while the region is busy, so that -339 // we can release the IPC handler soon enough to improve the -340 // availability of the region server. It can be adjusted by -341 // tuning configuration "hbase.busy.wait.duration". -342 final long busyWaitDuration; -343 static final long DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT; -344 -345 // If updating multiple rows in one call, wait longer, -346 // i.e. waiting for busyWaitDuration * # of rows. However, -347 // we can limit the max multiplier. -348 final int maxBusyWaitMultiplier; -349 -350 // Max busy wait duration. There is no point to wait longer than the RPC -351 // purge timeout, when a RPC call will be terminated by the RPC engine. -352 final long maxBusyWaitDuration; -353 -354 // Max cell size. If nonzero, the maximum allowed size for any given cell -355 // in bytes -356 final long maxCellSize; -357 -358 // Number of mutations for minibatch processing. -359 private final int miniBatchSize; +336 private Path regionDir; +337 private FileSystem walFS; +338 +339 // The internal wait duration to acquire a lock before read/update +340 // from the region. It is not per row. The purpose of this wait time +341 // is to avoid waiting a long time while the region is busy, so that +342 // we can release the IPC handler soon enough to improve the +343 // availability of the region server. It can be adjusted by +344 // tuning configuration "hbase.busy.wait.duration". +345 final long busyWaitDuration; +346 static final long DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT; +347 +348 // If updating multiple rows in one call, wait longer, +349 // i.e. waiting for busyWaitDuration * # of rows. However, +350 // we can limit the max multiplier. +351 final int maxBusyWaitMultiplier; +352 +353 // Max busy wait duration. There is no point to wait longer than the RPC +354 // purge timeout, when a RPC call will be terminated by the RPC engine. +355 final long maxBusyWaitDuration; +356 +357 // Max cell size. If nonzero, the maximum allowed size for any given cell +358 // in bytes +359 final long maxCellSize; 360 -361 // negative number indicates infinite timeout -362 static final long DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L; -363 final ExecutorService rowProcessorExecutor = Executors.newCachedThreadPool(); -364 -365 private final ConcurrentHashMapRegionScanner, Long scannerReadPoints; -366 -367 /** -368 * The sequence ID that was enLongAddered when this region was opened. -369 */ -370 private long openSeqNum = HConstants.NO_SEQNUM; -371 -372 /** -373 * The default setting for whether to enable on-demand CF loading for -374 * scan requests to this region. Requests can override it. -375 */ -376 private boolean isLoadingCfsOnDemandDefault = false; -377 -378 private final AtomicInteger majorInProgress = new AtomicInteger(0); -379 private final AtomicInteger minorInProgress = new AtomicInteger(0); +361 // Number of mutations for minibatch processing. +362 private final int miniBatchSize; +363 +364 // negative number indicates infinite timeout +365 static final long DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L; +366 final ExecutorService rowProcessorExecutor = Executors.newCachedThreadPool(); +367 +368 private final ConcurrentHashMapRegionScanner, Long scannerReadPoints; +369 +370 /** +371 * The sequence ID that was enLongAddered when this region was opened. +372 */ +373 private long openSeqNum = HConstants.NO_SEQNUM; +374 +375 /** +376 * The default setting for whether to enable on-demand CF loading for +377 * scan requests to this region. Requests can override it. +378 */ +379 private boolean isLoadingCfsOnDemandDefault = false; 380 -381 // -382 // Context: During replay we want to ensure that we do not lose any data. So, we -383 // have to be conservative in how we replay wals. For each store, we calculate -384 // the maxSeqId up to which the
[06/51] [partial] hbase-site git commit: Published site at 8eaaa63114a64bcaeaf0ed9bdd88615ee22255c1.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f6f9d4f3/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionServerServices.RegionStateTransitionContext.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionServerServices.RegionStateTransitionContext.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionServerServices.RegionStateTransitionContext.html index acc491f..e6c6561 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionServerServices.RegionStateTransitionContext.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionServerServices.RegionStateTransitionContext.html @@ -26,256 +26,255 @@ 018 */ 019package org.apache.hadoop.hbase.regionserver; 020 -021import java.io.IOException; -022import java.util.Collection; -023import java.util.List; -024import java.util.Map.Entry; -025import java.util.concurrent.ConcurrentMap; -026 +021import com.google.protobuf.Service; +022import java.io.IOException; +023import java.util.Collection; +024import java.util.List; +025import java.util.Map.Entry; +026import java.util.concurrent.ConcurrentMap; 027import org.apache.hadoop.hbase.Abortable; 028import org.apache.hadoop.hbase.Server; -029import org.apache.hadoop.hbase.TableName; -030import org.apache.hadoop.hbase.client.RegionInfo; -031import org.apache.hadoop.hbase.client.locking.EntityLock; -032import org.apache.hadoop.hbase.executor.ExecutorService; -033import org.apache.hadoop.hbase.ipc.RpcServerInterface; -034import org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager; -035import org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager; -036import org.apache.hadoop.hbase.quotas.RegionSizeStore; -037import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequester; -038import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController; -039import org.apache.hadoop.hbase.wal.WAL; -040import org.apache.yetus.audience.InterfaceAudience; -041import org.apache.zookeeper.KeeperException; +029import org.apache.hadoop.hbase.TableDescriptors; +030import org.apache.hadoop.hbase.TableName; +031import org.apache.hadoop.hbase.client.RegionInfo; +032import org.apache.hadoop.hbase.client.locking.EntityLock; +033import org.apache.hadoop.hbase.executor.ExecutorService; +034import org.apache.hadoop.hbase.ipc.RpcServerInterface; +035import org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager; +036import org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager; +037import org.apache.hadoop.hbase.quotas.RegionSizeStore; +038import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequester; +039import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController; +040import org.apache.hadoop.hbase.wal.WAL; +041import org.apache.yetus.audience.InterfaceAudience; 042 043import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; 044 -045import com.google.protobuf.Service; -046 -047/** -048 * A curated subset of services provided by {@link HRegionServer}. -049 * For use internally only. Passed to Managers, Services and Chores so can pass less-than-a -050 * full-on HRegionServer at test-time. Be judicious adding API. Changes cause ripples through -051 * the code base. -052 */ -053@InterfaceAudience.Private -054public interface RegionServerServices extends Server, MutableOnlineRegions, FavoredNodesForRegion { -055 -056 /** @return the WAL for a particular region. Pass null for getting the -057 * default (common) WAL */ -058 WAL getWAL(RegionInfo regionInfo) throws IOException; -059 -060 /** @return the List of WALs that are used by this server -061 * Doesn't include the meta WAL -062 */ -063 ListWAL getWALs() throws IOException; -064 -065 /** -066 * @return Implementation of {@link FlushRequester} or null. Usually it will not be null unless -067 * during intialization. -068 */ -069 FlushRequester getFlushRequester(); -070 -071 /** -072 * @return Implementation of {@link CompactionRequester} or null. Usually it will not be null -073 * unless during intialization. -074 */ -075 CompactionRequester getCompactionRequestor(); -076 -077 /** -078 * @return the RegionServerAccounting for this Region Server -079 */ -080 RegionServerAccounting getRegionServerAccounting(); -081 -082 /** -083 * @return RegionServer's instance of {@link RegionServerRpcQuotaManager} -084 */ -085 RegionServerRpcQuotaManager getRegionServerRpcQuotaManager(); -086 -087 /** -088 * @return RegionServer's instance of {@link SecureBulkLoadManager} -089 */ -090 SecureBulkLoadManager getSecureBulkLoadManager(); -091 -092 /** -093 * @return RegionServer's instance of {@link RegionServerSpaceQuotaManager} -094 */ -095 RegionServerSpaceQuotaManager
[06/51] [partial] hbase-site git commit: Published site at cd161d976ef47b84e904f2d54bac65d2f3417c2a.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fa1bebf8/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerMonitor.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerMonitor.html b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerMonitor.html index 2c14c50..43c66a8 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerMonitor.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerMonitor.html @@ -46,2104 +46,2113 @@ 038import java.util.concurrent.atomic.AtomicLong; 039import java.util.stream.Collectors; 040import java.util.stream.Stream; -041import org.apache.hadoop.conf.Configuration; -042import org.apache.hadoop.hbase.HConstants; -043import org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException; -044import org.apache.hadoop.hbase.log.HBaseMarkers; -045import org.apache.hadoop.hbase.procedure2.Procedure.LockState; -046import org.apache.hadoop.hbase.procedure2.store.ProcedureStore; -047import org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator; -048import org.apache.hadoop.hbase.procedure2.util.StringUtils; -049import org.apache.hadoop.hbase.security.User; -050import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -051import org.apache.hadoop.hbase.util.IdLock; -052import org.apache.hadoop.hbase.util.NonceKey; -053import org.apache.hadoop.hbase.util.Threads; -054import org.apache.yetus.audience.InterfaceAudience; -055import org.slf4j.Logger; -056import org.slf4j.LoggerFactory; -057 -058import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; -059import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; -060 -061import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState; -062 -063/** -064 * Thread Pool that executes the submitted procedures. -065 * The executor has a ProcedureStore associated. -066 * Each operation is logged and on restart the pending procedures are resumed. -067 * -068 * Unless the Procedure code throws an error (e.g. invalid user input) -069 * the procedure will complete (at some point in time), On restart the pending -070 * procedures are resumed and the once failed will be rolledback. -071 * -072 * The user can add procedures to the executor via submitProcedure(proc) -073 * check for the finished state via isFinished(procId) -074 * and get the result via getResult(procId) -075 */ -076@InterfaceAudience.Private -077public class ProcedureExecutorTEnvironment { -078 private static final Logger LOG = LoggerFactory.getLogger(ProcedureExecutor.class); -079 -080 public static final String CHECK_OWNER_SET_CONF_KEY = "hbase.procedure.check.owner.set"; -081 private static final boolean DEFAULT_CHECK_OWNER_SET = false; -082 -083 public static final String WORKER_KEEP_ALIVE_TIME_CONF_KEY = -084 "hbase.procedure.worker.keep.alive.time.msec"; -085 private static final long DEFAULT_WORKER_KEEP_ALIVE_TIME = TimeUnit.MINUTES.toMillis(1); -086 -087 /** -088 * {@link #testing} is non-null when ProcedureExecutor is being tested. Tests will try to -089 * break PE having it fail at various junctures. When non-null, testing is set to an instance of -090 * the below internal {@link Testing} class with flags set for the particular test. -091 */ -092 Testing testing = null; -093 -094 /** -095 * Class with parameters describing how to fail/die when in testing-context. -096 */ -097 public static class Testing { -098protected boolean killIfHasParent = true; -099protected boolean killIfSuspended = false; -100 -101/** -102 * Kill the PE BEFORE we store state to the WAL. Good for figuring out if a Procedure is -103 * persisting all the state it needs to recover after a crash. -104 */ -105protected boolean killBeforeStoreUpdate = false; -106protected boolean toggleKillBeforeStoreUpdate = false; -107 -108/** -109 * Set when we want to fail AFTER state has been stored into the WAL. Rarely used. HBASE-20978 -110 * is about a case where memory-state was being set after store to WAL where a crash could -111 * cause us to get stuck. This flag allows killing at what was a vulnerable time. -112 */ -113protected boolean killAfterStoreUpdate = false; -114protected boolean toggleKillAfterStoreUpdate = false; -115 -116protected boolean shouldKillBeforeStoreUpdate() { -117 final boolean kill = this.killBeforeStoreUpdate; -118 if (this.toggleKillBeforeStoreUpdate) { -119this.killBeforeStoreUpdate = !kill; -120LOG.warn("Toggle KILL before store update to: " + this.killBeforeStoreUpdate); -121 } -122 return kill; -123} -124 -125protected boolean shouldKillBeforeStoreUpdate(boolean
[06/51] [partial] hbase-site git commit: Published site at c6a65ba63fce85ac7c4b62b96ef2bbe6c35d2f00.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/293abb17/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.DefaultRackManager.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.DefaultRackManager.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.DefaultRackManager.html index c372545..af3b364 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.DefaultRackManager.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.DefaultRackManager.html @@ -1279,322 +1279,339 @@ 1271ListRegionInfo lastFewRegions = new ArrayList(); 1272// assign the remaining by going through the list and try to assign to servers one-by-one 1273int serverIdx = RANDOM.nextInt(numServers); -1274for (RegionInfo region : unassignedRegions) { +1274OUTER : for (RegionInfo region : unassignedRegions) { 1275 boolean assigned = false; -1276 for (int j = 0; j numServers; j++) { // try all servers one by one +1276 INNER : for (int j = 0; j numServers; j++) { // try all servers one by one 1277ServerName serverName = servers.get((j + serverIdx) % numServers); 1278if (!cluster.wouldLowerAvailability(region, serverName)) { 1279 ListRegionInfo serverRegions = 1280 assignments.computeIfAbsent(serverName, k - new ArrayList()); -1281 serverRegions.add(region); -1282 cluster.doAssignRegion(region, serverName); -1283 serverIdx = (j + serverIdx + 1) % numServers; //remain from next server -1284 assigned = true; -1285 break; -1286} -1287 } -1288 if (!assigned) { -1289lastFewRegions.add(region); -1290 } -1291} -1292// just sprinkle the rest of the regions on random regionservers. The balanceCluster will -1293// make it optimal later. we can end up with this if numReplicas numServers. -1294for (RegionInfo region : lastFewRegions) { -1295 int i = RANDOM.nextInt(numServers); -1296 ServerName server = servers.get(i); -1297 ListRegionInfo serverRegions = assignments.computeIfAbsent(server, k - new ArrayList()); -1298 serverRegions.add(region); -1299 cluster.doAssignRegion(region, server); -1300} -1301return assignments; -1302 } -1303 -1304 protected Cluster createCluster(ListServerName servers, CollectionRegionInfo regions) { -1305// Get the snapshot of the current assignments for the regions in question, and then create -1306// a cluster out of it. Note that we might have replicas already assigned to some servers -1307// earlier. So we want to get the snapshot to see those assignments, but this will only contain -1308// replicas of the regions that are passed (for performance). -1309MapServerName, ListRegionInfo clusterState = getRegionAssignmentsByServer(regions); -1310 -1311for (ServerName server : servers) { -1312 if (!clusterState.containsKey(server)) { -1313clusterState.put(server, EMPTY_REGION_LIST); -1314 } -1315} -1316return new Cluster(regions, clusterState, null, this.regionFinder, -1317rackManager); -1318 } -1319 -1320 private ListServerName findIdleServers(ListServerName servers) { -1321return this.services.getServerManager() -1322 .getOnlineServersListWithPredicator(servers, IDLE_SERVER_PREDICATOR); -1323 } -1324 -1325 /** -1326 * Used to assign a single region to a random server. -1327 */ -1328 @Override -1329 public ServerName randomAssignment(RegionInfo regionInfo, ListServerName servers) -1330 throws HBaseIOException { -1331 metricsBalancer.incrMiscInvocations(); -1332if (servers != null servers.contains(masterServerName)) { -1333 if (shouldBeOnMaster(regionInfo)) { -1334return masterServerName; -1335 } -1336 if (!LoadBalancer.isTablesOnMaster(getConf())) { -1337// Guarantee we do not put any regions on master -1338servers = new ArrayList(servers); -1339 servers.remove(masterServerName); -1340 } -1341} -1342 -1343int numServers = servers == null ? 0 : servers.size(); -1344if (numServers == 0) { -1345 LOG.warn("Wanted to retain assignment but no servers to assign to"); -1346 return null; -1347} -1348if (numServers == 1) { // Only one server, nothing fancy we can do here -1349 return servers.get(0); -1350} -1351ListServerName idleServers = findIdleServers(servers); -1352if (idleServers.size() == 1) { -1353 return idleServers.get(0); -1354} -1355final ListServerName finalServers = idleServers.isEmpty() ? -1356servers : idleServers; -1357ListRegionInfo regions = Lists.newArrayList(regionInfo); -1358Cluster cluster =
[06/51] [partial] hbase-site git commit: Published site at 7c1fad4992a169a35b4457e6f4afcb30d04406e9.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/74f60271/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html index d11176a..2c14c50 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html @@ -982,1050 +982,1168 @@ 974 } 975 976 /** -977 * Add a new root-procedure to the executor. -978 * @param proc the new procedure to execute. -979 * @param nonceKey the registered unique identifier for this operation from the client or process. -980 * @return the procedure id, that can be used to monitor the operation -981 */ -982 @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH", -983 justification = "FindBugs is blind to the check-for-null") -984 public long submitProcedure(ProcedureTEnvironment proc, NonceKey nonceKey) { -985 Preconditions.checkArgument(lastProcId.get() = 0); -986 -987prepareProcedure(proc); -988 -989final Long currentProcId; -990if (nonceKey != null) { -991 currentProcId = nonceKeysToProcIdsMap.get(nonceKey); -992 Preconditions.checkArgument(currentProcId != null, -993"Expected nonceKey=" + nonceKey + " to be reserved, use registerNonce(); proc=" + proc); -994} else { -995 currentProcId = nextProcId(); -996} -997 -998// Initialize the procedure -999proc.setNonceKey(nonceKey); -1000 proc.setProcId(currentProcId.longValue()); -1001 -1002// Commit the transaction -1003store.insert(proc, null); -1004LOG.debug("Stored {}", proc); -1005 -1006// Add the procedure to the executor -1007return pushProcedure(proc); -1008 } -1009 -1010 /** -1011 * Add a set of new root-procedure to the executor. -1012 * @param procs the new procedures to execute. -1013 */ -1014 // TODO: Do we need to take nonces here? -1015 public void submitProcedures(ProcedureTEnvironment[] procs) { -1016 Preconditions.checkArgument(lastProcId.get() = 0); -1017if (procs == null || procs.length = 0) { -1018 return; -1019} -1020 -1021// Prepare procedure -1022for (int i = 0; i procs.length; ++i) { -1023 prepareProcedure(procs[i]).setProcId(nextProcId()); -1024} -1025 -1026// Commit the transaction -1027store.insert(procs); -1028if (LOG.isDebugEnabled()) { -1029 LOG.debug("Stored " + Arrays.toString(procs)); -1030} -1031 -1032// Add the procedure to the executor -1033for (int i = 0; i procs.length; ++i) { -1034 pushProcedure(procs[i]); -1035} -1036 } -1037 -1038 private ProcedureTEnvironment prepareProcedure(ProcedureTEnvironment proc) { -1039 Preconditions.checkArgument(proc.getState() == ProcedureState.INITIALIZING); -1040 Preconditions.checkArgument(!proc.hasParent(), "unexpected parent", proc); -1041if (this.checkOwnerSet) { -1042 Preconditions.checkArgument(proc.hasOwner(), "missing owner"); -1043} -1044return proc; -1045 } -1046 -1047 private long pushProcedure(ProcedureTEnvironment proc) { -1048final long currentProcId = proc.getProcId(); +977 * Bypass a procedure. If the procedure is set to bypass, all the logic in +978 * execute/rollback will be ignored and it will return success, whatever. +979 * It is used to recover buggy stuck procedures, releasing the lock resources +980 * and letting other procedures to run. Bypassing one procedure (and its ancestors will +981 * be bypassed automatically) may leave the cluster in a middle state, e.g. region +982 * not assigned, or some hdfs files left behind. After getting rid of those stuck procedures, +983 * the operators may have to do some clean up on hdfs or schedule some assign procedures +984 * to let region online. DO AT YOUR OWN RISK. +985 * p +986 * A procedure can be bypassed only if +987 * 1. The procedure is in state of RUNNABLE, WAITING, WAITING_TIMEOUT +988 * or it is a root procedure without any child. +989 * 2. No other worker thread is executing it +990 * 3. No child procedure has been submitted +991 * +992 * p +993 * If all the requirements are meet, the procedure and its ancestors will be +994 * bypassed and persisted to WAL. +995 * +996 * p +997 * If the procedure is in WAITING state, will set it to RUNNABLE add it to run queue. +998 * TODO: What about WAITING_TIMEOUT? +999 * @param id the procedure id +1000 * @param lockWait time to wait lock +1001 * @param force if force set to true, we will bypass the procedure even if it is executing. +1002 * This is for procedures which
[06/51] [partial] hbase-site git commit: Published site at 3afe9fb7e6ebfa71187cbe131558a83fae61cecd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/424d7e41/testdevapidocs/org/apache/hadoop/hbase/client/TestClientNoCluster.RegionServerStoppedOnScannerOpenConnection.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/client/TestClientNoCluster.RegionServerStoppedOnScannerOpenConnection.html b/testdevapidocs/org/apache/hadoop/hbase/client/TestClientNoCluster.RegionServerStoppedOnScannerOpenConnection.html index f163123..6ab4ef2 100644 --- a/testdevapidocs/org/apache/hadoop/hbase/client/TestClientNoCluster.RegionServerStoppedOnScannerOpenConnection.html +++ b/testdevapidocs/org/apache/hadoop/hbase/client/TestClientNoCluster.RegionServerStoppedOnScannerOpenConnection.html @@ -18,7 +18,7 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":9,"i39":10,"i40":10,"i41":42,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10}; +var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":9,"i41":10,"i42":10,"i43":42,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10}; var tabs = {65535:["t0","All Methods"],1:["t1","Static Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"],32:["t6","Deprecated Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -331,113 +331,121 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html getCurrentNrHRS() +org.apache.hadoop.hbase.client.Hbck +getHbck() + + +org.apache.hadoop.hbase.client.Hbck +getHbck(org.apache.hadoop.hbase.ServerNamearg0) + + org.apache.hadoop.hbase.client.MasterKeepAliveConnection getMaster() - + org.apache.hadoop.hbase.client.RpcRetryingCallerFactory getNewRpcRetryingCallerFactory(org.apache.hadoop.conf.Configurationarg0) - + org.apache.hadoop.hbase.client.NonceGenerator getNonceGenerator() - + (package private) int getNumberOfCachedRegionLocations(org.apache.hadoop.hbase.TableNamearg0) - + org.apache.hadoop.hbase.HRegionLocation getRegionLocation(org.apache.hadoop.hbase.TableNamearg0, byte[]arg1, booleanarg2) - + org.apache.hadoop.hbase.client.RegionLocator getRegionLocator(org.apache.hadoop.hbase.TableNamearg0) - + (package private) org.apache.hadoop.hbase.ipc.RpcClient getRpcClient() - + org.apache.hadoop.hbase.ipc.RpcControllerFactory getRpcControllerFactory() - + org.apache.hadoop.hbase.client.RpcRetryingCallerFactory getRpcRetryingCallerFactory() - + org.apache.hadoop.hbase.client.ServerStatisticTracker getStatisticsTracker() - + org.apache.hadoop.hbase.client.Table getTable(org.apache.hadoop.hbase.TableNamearg0) - + org.apache.hadoop.hbase.client.TableBuilder getTableBuilder(org.apache.hadoop.hbase.TableNamearg0, https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true; title="class or interface in java.util.concurrent">ExecutorServicearg1) - + org.apache.hadoop.hbase.client.TableState getTableState(org.apache.hadoop.hbase.TableNamearg0) - + boolean hasCellBlockSupport() - + (package private) static org.apache.hadoop.hbase.client.NonceGenerator injectNonceGeneratorForTesting(org.apache.hadoop.hbase.client.ClusterConnectionarg0, org.apache.hadoop.hbase.client.NonceGeneratorarg1) - + boolean isAborted() - + boolean isClosed() - + boolean isMasterRunning() Deprecated. - + boolean isTableAvailable(org.apache.hadoop.hbase.TableNamearg0, byte[][]arg1) - + boolean isTableDisabled(org.apache.hadoop.hbase.TableNamearg0) - + boolean isTableEnabled(org.apache.hadoop.hbase.TableNamearg0) - + org.apache.hadoop.hbase.HRegionLocation locateRegion(byte[]arg0) - + org.apache.hadoop.hbase.HRegionLocation locateRegion(org.apache.hadoop.hbase.TableNamearg0, byte[]arg1) - + org.apache.hadoop.hbase.RegionLocations locateRegion(org.apache.hadoop.hbase.TableNamearg0, byte[]arg1, booleanarg2, booleanarg3) - + org.apache.hadoop.hbase.RegionLocations locateRegion(org.apache.hadoop.hbase.TableNamearg0,
[06/51] [partial] hbase-site git commit: Published site at a452487a9b82bfd33bc10683c3f8b8ae74d58883.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cf79db0/devapidocs/src-html/org/apache/hadoop/hbase/filter/ColumnCountGetFilter.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/filter/ColumnCountGetFilter.html b/devapidocs/src-html/org/apache/hadoop/hbase/filter/ColumnCountGetFilter.html index c6137d0..4ca69da 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/filter/ColumnCountGetFilter.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/filter/ColumnCountGetFilter.html @@ -29,118 +29,133 @@ 021 022import java.io.IOException; 023import java.util.ArrayList; -024 -025import org.apache.hadoop.hbase.Cell; -026import org.apache.yetus.audience.InterfaceAudience; -027import org.apache.hadoop.hbase.exceptions.DeserializationException; -028import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; -029 -030import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; -031import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; -032 -033/** -034 * Simple filter that returns first N columns on row only. -035 * This filter was written to test filters in Get and as soon as it gets -036 * its quota of columns, {@link #filterAllRemaining()} returns true. This -037 * makes this filter unsuitable as a Scan filter. -038 */ -039@InterfaceAudience.Public -040public class ColumnCountGetFilter extends FilterBase { -041 private int limit = 0; -042 private int count = 0; -043 -044 public ColumnCountGetFilter(final int n) { -045Preconditions.checkArgument(n = 0, "limit be positive %s", n); -046this.limit = n; -047 } -048 -049 public int getLimit() { -050return limit; -051 } -052 -053 @Override -054 public boolean filterRowKey(Cell cell) throws IOException { -055// Impl in FilterBase might do unnecessary copy for Off heap backed Cells. -056if (filterAllRemaining()) return true; -057return false; -058 } -059 -060 @Override -061 public boolean filterAllRemaining() { -062return this.count this.limit; -063 } -064 -065 @Deprecated -066 @Override -067 public ReturnCode filterKeyValue(final Cell c) { -068return filterCell(c); -069 } -070 -071 @Override -072 public ReturnCode filterCell(final Cell c) { -073this.count++; -074return filterAllRemaining() ? ReturnCode.NEXT_COL : ReturnCode.INCLUDE_AND_NEXT_COL; -075 } -076 -077 @Override -078 public void reset() { -079this.count = 0; -080 } -081 -082 public static Filter createFilterFromArguments(ArrayListbyte [] filterArguments) { -083 Preconditions.checkArgument(filterArguments.size() == 1, -084"Expected 1 but got: %s", filterArguments.size()); -085int limit = ParseFilter.convertByteArrayToInt(filterArguments.get(0)); -086return new ColumnCountGetFilter(limit); -087 } -088 -089 /** -090 * @return The filter serialized using pb -091 */ -092 @Override -093 public byte [] toByteArray() { -094 FilterProtos.ColumnCountGetFilter.Builder builder = -095 FilterProtos.ColumnCountGetFilter.newBuilder(); -096builder.setLimit(this.limit); -097return builder.build().toByteArray(); -098 } -099 -100 /** -101 * @param pbBytes A pb serialized {@link ColumnCountGetFilter} instance -102 * @return An instance of {@link ColumnCountGetFilter} made from codebytes/code -103 * @throws org.apache.hadoop.hbase.exceptions.DeserializationException -104 * @see #toByteArray -105 */ -106 public static ColumnCountGetFilter parseFrom(final byte [] pbBytes) -107 throws DeserializationException { -108FilterProtos.ColumnCountGetFilter proto; -109try { -110 proto = FilterProtos.ColumnCountGetFilter.parseFrom(pbBytes); -111} catch (InvalidProtocolBufferException e) { -112 throw new DeserializationException(e); -113} -114return new ColumnCountGetFilter(proto.getLimit()); -115 } -116 -117 /** -118 * @param o the other filter to compare with -119 * @return true if and only if the fields of the filter that are serialized -120 * are equal to the corresponding fields in other. Used for testing. -121 */ -122 @Override -123 boolean areSerializedFieldsEqual(Filter o) { -124if (o == this) return true; -125if (!(o instanceof ColumnCountGetFilter)) return false; -126 -127ColumnCountGetFilter other = (ColumnCountGetFilter)o; -128return this.getLimit() == other.getLimit(); -129 } -130 -131 @Override -132 public String toString() { -133return this.getClass().getSimpleName() + " " + this.limit; -134 } -135} +024import java.util.Objects; +025 +026import org.apache.hadoop.hbase.Cell; +027import org.apache.yetus.audience.InterfaceAudience; +028import org.apache.hadoop.hbase.exceptions.DeserializationException; +029import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; +030 +031import
[06/51] [partial] hbase-site git commit: Published site at 6a5b4f2a5c188f8eef4f2250b8b7db7dd1e750e4.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ff05a18/testdevapidocs/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.html -- diff --git a/testdevapidocs/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.html b/testdevapidocs/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.html index 5d2c1df..7959886 100644 --- a/testdevapidocs/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.html +++ b/testdevapidocs/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.html @@ -18,7 +18,7 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10}; +var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab"; -public class TestCreateTableProcedure +public class TestCreateTableProcedure extends TestTableDDLProcedureBase @@ -166,10 +166,6 @@ extends F2 -private static org.slf4j.Logger -LOG - - org.junit.rules.TestName name @@ -222,38 +218,34 @@ extends void -testMRegions() - - -void testOnHDFSFailure() - + void testRecoveryAndDoubleExecution() - + void testRollbackAndDoubleExecution() - + private void testRollbackAndDoubleExecution(org.apache.hadoop.hbase.client.TableDescriptorBuilderbuilder) - + void testRollbackAndDoubleExecutionOnMobTable() - + void testSimpleCreate() - + private void testSimpleCreate(org.apache.hadoop.hbase.TableNametableName, byte[][]splitKeys) - + void testSimpleCreateWithSplits() @@ -292,16 +284,7 @@ extends CLASS_RULE -public static finalHBaseClassTestRule CLASS_RULE - - - - - - - -LOG -private static finalorg.slf4j.Logger LOG +public static finalHBaseClassTestRule CLASS_RULE @@ -310,7 +293,7 @@ extends F1 -private static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String F1 +private static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String F1 See Also: Constant Field Values @@ -323,7 +306,7 @@ extends F2 -private static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String F2 +private static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String F2 See Also: Constant Field Values @@ -336,7 +319,7 @@ extends name -publicorg.junit.rules.TestName name +publicorg.junit.rules.TestName name @@ -353,7 +336,7 @@ extends TestCreateTableProcedure -publicTestCreateTableProcedure() +publicTestCreateTableProcedure() @@ -370,7 +353,7 @@ extends testSimpleCreate -publicvoidtestSimpleCreate() +publicvoidtestSimpleCreate() throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true; title="class or interface in java.lang">Exception Throws: @@ -384,7 +367,7 @@ extends testSimpleCreateWithSplits -publicvoidtestSimpleCreateWithSplits() +publicvoidtestSimpleCreateWithSplits() throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true; title="class or interface in java.lang">Exception Throws: @@ -398,7 +381,7 @@ extends testSimpleCreate -privatevoidtestSimpleCreate(org.apache.hadoop.hbase.TableNametableName, +privatevoidtestSimpleCreate(org.apache.hadoop.hbase.TableNametableName, byte[][]splitKeys) throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true; title="class or interface in java.lang">Exception @@ -413,7 +396,7 @@ extends testCreateWithoutColumnFamily -publicvoidtestCreateWithoutColumnFamily() +publicvoidtestCreateWithoutColumnFamily() throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true; title="class or interface in java.lang">Exception Throws: @@ -427,7 +410,7 @@ extends testCreateExisting -publicvoidtestCreateExisting() +publicvoidtestCreateExisting() throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true; title="class or interface in java.lang">Exception Throws: @@ -441,7 +424,7 @@ extends testRecoveryAndDoubleExecution -publicvoidtestRecoveryAndDoubleExecution() +publicvoidtestRecoveryAndDoubleExecution() throws
[06/51] [partial] hbase-site git commit: Published site at 63f2d3cbdc8151f5f61f33e0a078c51b9ac076a5.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7ae6a80c/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/StateMachineProcedure.Flow.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/StateMachineProcedure.Flow.html b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/StateMachineProcedure.Flow.html index 1a5a90b..bc44c38 100644 --- a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/StateMachineProcedure.Flow.html +++ b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/StateMachineProcedure.Flow.html @@ -115,6 +115,16 @@ +private StateMachineProcedure.Flow +TransitRegionStateProcedure.confirmClosed(MasterProcedureEnvenv, + RegionStateNoderegionNode) + + +private StateMachineProcedure.Flow +TransitRegionStateProcedure.confirmOpened(MasterProcedureEnvenv, + RegionStateNoderegionNode) + + protected StateMachineProcedure.Flow GCMergedRegionsProcedure.executeFromState(MasterProcedureEnvenv, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.GCMergedRegionsStatestate) @@ -132,10 +142,17 @@ protected StateMachineProcedure.Flow MoveRegionProcedure.executeFromState(MasterProcedureEnvenv, - org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStatestate) + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStatestate) +Deprecated. + protected StateMachineProcedure.Flow +TransitRegionStateProcedure.executeFromState(MasterProcedureEnvenv, + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionStateTransitionStatestate) + + +protected StateMachineProcedure.Flow SplitTableRegionProcedure.executeFromState(MasterProcedureEnvenv, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStatestate) http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7ae6a80c/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/StateMachineProcedure.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/StateMachineProcedure.html b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/StateMachineProcedure.html index ce402ec..dd470ad 100644 --- a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/StateMachineProcedure.html +++ b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/StateMachineProcedure.html @@ -131,7 +131,9 @@ class MoveRegionProcedure -Procedure that implements a RegionPlan. +Deprecated. +Do not use any more. + @@ -140,6 +142,12 @@ The procedure to split a region in a table. + +class +TransitRegionStateProcedure +The procedure to deal with the state transition of a region. + + http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7ae6a80c/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html b/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html index bd09b84..22decce 100644 --- a/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html +++ b/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html @@ -216,11 +216,11 @@ java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true; title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true; title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true; title="class or interface in java.io">Serializable) -org.apache.hadoop.hbase.procedure2.RootProcedureState.State +org.apache.hadoop.hbase.procedure2.StateMachineProcedure.Flow org.apache.hadoop.hbase.procedure2.LockedResourceType -org.apache.hadoop.hbase.procedure2.LockType org.apache.hadoop.hbase.procedure2.Procedure.LockState -org.apache.hadoop.hbase.procedure2.StateMachineProcedure.Flow +org.apache.hadoop.hbase.procedure2.RootProcedureState.State +org.apache.hadoop.hbase.procedure2.LockType http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7ae6a80c/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html b/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html index c5b39c0..f767f0e 100644 --- a/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html +++ b/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html @@ -229,13 +229,13 @@ java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true; title="class
[06/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.WriterThread.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.WriterThread.html b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.WriterThread.html index 63e4b46..514f830 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.WriterThread.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.WriterThread.html @@ -468,15 +468,15 @@ 460 * creating it if necessary. 461 * @param logEntry 462 * @param fileNameBeingSplit the file being split currently. Used to generate tmp file name. -463 * @param conf -464 * @return Path to file into which to dump split log edits. -465 * @throws IOException -466 */ -467 @SuppressWarnings("deprecation") -468 @VisibleForTesting -469 static Path getRegionSplitEditsPath(final Entry logEntry, String fileNameBeingSplit, -470 Configuration conf) -471 throws IOException { +463 * @param tmpDirName of the directory used to sideline old recovered edits file +464 * @param conf +465 * @return Path to file into which to dump split log edits. +466 * @throws IOException +467 */ +468 @SuppressWarnings("deprecation") +469 @VisibleForTesting +470 static Path getRegionSplitEditsPath(final Entry logEntry, String fileNameBeingSplit, +471 String tmpDirName, Configuration conf) throws IOException { 472FileSystem fs = FileSystem.get(conf); 473Path rootDir = FSUtils.getRootDir(conf); 474Path tableDir = FSUtils.getTableDir(rootDir, logEntry.getKey().getTableName()); @@ -491,7 +491,7 @@ 483 return null; 484} 485if (fs.exists(dir) fs.isFile(dir)) { -486 Path tmp = new Path("/tmp"); +486 Path tmp = new Path(tmpDirName); 487 if (!fs.exists(tmp)) { 488fs.mkdirs(tmp); 489 } @@ -1520,411 +1520,413 @@ 1512 * @return a path with a write for that path. caller should close. 1513 */ 1514WriterAndPath createWAP(byte[] region, Entry entry) throws IOException { -1515 Path regionedits = getRegionSplitEditsPath(entry, -1516 fileBeingSplit.getPath().getName(), conf); -1517 if (regionedits == null) { -1518return null; -1519 } -1520 FileSystem rootFs = FileSystem.get(conf); -1521 if (rootFs.exists(regionedits)) { -1522LOG.warn("Found old edits file. It could be the " -1523+ "result of a previous failed split attempt. Deleting " + regionedits + ", length=" -1524+ rootFs.getFileStatus(regionedits).getLen()); -1525if (!rootFs.delete(regionedits, false)) { -1526 LOG.warn("Failed delete of old {}", regionedits); -1527} -1528 } -1529 Writer w = createWriter(regionedits); -1530 LOG.debug("Creating writer path={}", regionedits); -1531 return new WriterAndPath(regionedits, w, entry.getKey().getSequenceId()); -1532} -1533 -1534void filterCellByStore(Entry logEntry) { -1535 Mapbyte[], Long maxSeqIdInStores = -1536 regionMaxSeqIdInStores.get(Bytes.toString(logEntry.getKey().getEncodedRegionName())); -1537 if (MapUtils.isEmpty(maxSeqIdInStores)) { -1538return; -1539 } -1540 // Create the array list for the cells that aren't filtered. -1541 // We make the assumption that most cells will be kept. -1542 ArrayListCell keptCells = new ArrayList(logEntry.getEdit().getCells().size()); -1543 for (Cell cell : logEntry.getEdit().getCells()) { -1544if (CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) { -1545 keptCells.add(cell); -1546} else { -1547 byte[] family = CellUtil.cloneFamily(cell); -1548 Long maxSeqId = maxSeqIdInStores.get(family); -1549 // Do not skip cell even if maxSeqId is null. Maybe we are in a rolling upgrade, -1550 // or the master was crashed before and we can not get the information. -1551 if (maxSeqId == null || maxSeqId.longValue() logEntry.getKey().getSequenceId()) { -1552keptCells.add(cell); -1553 } -1554} -1555 } -1556 -1557 // Anything in the keptCells array list is still live. -1558 // So rather than removing the cells from the array list -1559 // which would be an O(n^2) operation, we just replace the list -1560 logEntry.getEdit().setCells(keptCells); -1561} -1562 -1563@Override -1564public void append(RegionEntryBuffer buffer) throws IOException { -1565 appendBuffer(buffer, true); -1566} -1567 -1568WriterAndPath appendBuffer(RegionEntryBuffer buffer, boolean reusable) throws IOException{ -1569 ListEntry entries = buffer.entryBuffer; -1570 if (entries.isEmpty()) { -1571LOG.warn("got an empty buffer, skipping"); -1572return null;
[06/51] [partial] hbase-site git commit: Published site at 613d831429960348dc42c3bdb6ea5d31be15c81c.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7cf6034b/testapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.html -- diff --git a/testapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.html b/testapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.html index 00c8bf0..1e87652 100644 --- a/testapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.html +++ b/testapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.html @@ -18,7 +18,7 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":42,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10}; +var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":42,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10}; var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"],32:["t6","Deprecated Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -119,7 +119,7 @@ var activeTableTab = "activeTableTab"; @InterfaceAudience.Public -public class MiniHBaseCluster +public class MiniHBaseCluster extends HBaseCluster This class creates a single process HBase cluster. each server. The master uses the 'default' FileSystem. The RegionServers, @@ -416,38 +416,45 @@ extends void +killNameNode(ServerNameserverName) +Kills the namenode process if this is a distributed cluster, otherwise, this causes master to + exit doing basic clean up only. + + + +void killRegionServer(ServerNameserverName) Kills the region server process if this is a distributed cluster, otherwise this causes the region server to exit doing basic clean up only. - + void killZkNode(ServerNameserverName) Kills the zookeeper node process if this is a distributed cluster, otherwise, this causes master to exit doing basic clean up only. - + void shutdown() Shut down the mini HBase cluster - + void startDataNode(ServerNameserverName) Starts a new datanode on the given hostname or if this is a mini/local cluster, silently logs warning message. - + JVMClusterUtil.MasterThread startMaster() Starts a master thread running - + void startMaster(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringhostname, intport) @@ -455,13 +462,20 @@ extends + +void +startNameNode(ServerNameserverName) +Starts a new namenode on the given hostname or if this is a mini/local cluster, silently logs + warning message. + + + JVMClusterUtil.RegionServerThread startRegionServer() Starts a region server thread running - + void startRegionServer(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringhostname, intport) @@ -469,13 +483,13 @@ extends + JVMClusterUtil.RegionServerThread startRegionServerAndWait(longtimeout) Starts a region server thread and waits until its processed by master. - + void startZkNode(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringhostname, intport) @@ -483,120 +497,140 @@ extends + void stopDataNode(ServerNameserverName) Stops the datanode if this is a distributed cluster, otherwise silently logs warning message. - + JVMClusterUtil.MasterThread stopMaster(intserverNumber) Shut down the specified master cleanly - + JVMClusterUtil.MasterThread stopMaster(intserverNumber, booleanshutdownFS) Shut down the specified master cleanly - + void stopMaster(ServerNameserverName) Stops the given master, by attempting a gradual stop. - + +void +stopNameNode(ServerNameserverName) +Stops the namenode if this is a distributed cluster, otherwise silently logs warning message. + + + JVMClusterUtil.RegionServerThread stopRegionServer(intserverNumber) Shut down the specified region server cleanly - +
[06/51] [partial] hbase-site git commit: Published site at ba5d1c1f28301adc99019d9d6c4a04fac98ae511.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/804782f0/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html index 95f2a65..073d0d0 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html @@ -931,7 +931,7 @@ 923InitMetaProcedure initMetaProc = null; 924if (assignmentManager.getRegionStates().getRegionState(RegionInfoBuilder.FIRST_META_REGIONINFO) 925 .isOffline()) { -926 OptionalProcedure? optProc = procedureExecutor.getProcedures().stream() +926 OptionalProcedureMasterProcedureEnv optProc = procedureExecutor.getProcedures().stream() 927.filter(p - p instanceof InitMetaProcedure).findAny(); 928 if (optProc.isPresent()) { 929initMetaProc = (InitMetaProcedure) optProc.get(); @@ -3210,566 +3210,567 @@ 3202 cpHost.preGetProcedures(); 3203} 3204 -3205final ListProcedure? procList = this.procedureExecutor.getProcedures(); -3206 -3207if (cpHost != null) { -3208 cpHost.postGetProcedures(procList); -3209} -3210 -3211return procList; -3212 } -3213 -3214 @Override -3215 public ListLockedResource getLocks() throws IOException { -3216if (cpHost != null) { -3217 cpHost.preGetLocks(); -3218} -3219 -3220MasterProcedureScheduler procedureScheduler = -3221 procedureExecutor.getEnvironment().getProcedureScheduler(); -3222 -3223final ListLockedResource lockedResources = procedureScheduler.getLocks(); -3224 -3225if (cpHost != null) { -3226 cpHost.postGetLocks(lockedResources); -3227} -3228 -3229return lockedResources; -3230 } -3231 -3232 /** -3233 * Returns the list of table descriptors that match the specified request -3234 * @param namespace the namespace to query, or null if querying for all -3235 * @param regex The regular expression to match against, or null if querying for all -3236 * @param tableNameList the list of table names, or null if querying for all -3237 * @param includeSysTables False to match only against userspace tables -3238 * @return the list of table descriptors -3239 */ -3240 public ListTableDescriptor listTableDescriptors(final String namespace, final String regex, -3241 final ListTableName tableNameList, final boolean includeSysTables) -3242 throws IOException { -3243ListTableDescriptor htds = new ArrayList(); -3244if (cpHost != null) { -3245 cpHost.preGetTableDescriptors(tableNameList, htds, regex); -3246} -3247htds = getTableDescriptors(htds, namespace, regex, tableNameList, includeSysTables); -3248if (cpHost != null) { -3249 cpHost.postGetTableDescriptors(tableNameList, htds, regex); -3250} -3251return htds; -3252 } -3253 -3254 /** -3255 * Returns the list of table names that match the specified request -3256 * @param regex The regular expression to match against, or null if querying for all -3257 * @param namespace the namespace to query, or null if querying for all -3258 * @param includeSysTables False to match only against userspace tables -3259 * @return the list of table names -3260 */ -3261 public ListTableName listTableNames(final String namespace, final String regex, -3262 final boolean includeSysTables) throws IOException { -3263ListTableDescriptor htds = new ArrayList(); -3264if (cpHost != null) { -3265 cpHost.preGetTableNames(htds, regex); -3266} -3267htds = getTableDescriptors(htds, namespace, regex, null, includeSysTables); -3268if (cpHost != null) { -3269 cpHost.postGetTableNames(htds, regex); -3270} -3271ListTableName result = new ArrayList(htds.size()); -3272for (TableDescriptor htd: htds) result.add(htd.getTableName()); -3273return result; -3274 } -3275 -3276 /** -3277 * @return list of table table descriptors after filtering by regex and whether to include system -3278 *tables, etc. -3279 * @throws IOException -3280 */ -3281 private ListTableDescriptor getTableDescriptors(final ListTableDescriptor htds, -3282 final String namespace, final String regex, final ListTableName tableNameList, -3283 final boolean includeSysTables) -3284 throws IOException { -3285if (tableNameList == null || tableNameList.isEmpty()) { -3286 // request for all TableDescriptors -3287 CollectionTableDescriptor allHtds; -3288 if (namespace != null namespace.length() 0) { -3289// Do a check on the namespace existence. Will fail if does not exist. -3290 this.clusterSchemaService.getNamespace(namespace); -3291allHtds = tableDescriptors.getByNamespace(namespace).values(); -3292 } else { -3293allHtds =
[06/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html index 233dba3..91b9055 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html @@ -540,1205 +540,1204 @@ 532 sm.getRegionMetrics().forEach((byte[] regionName, RegionMetrics rm) - { 533DequeBalancerRegionLoad rLoads = oldLoads.get(Bytes.toString(regionName)); 534if (rLoads == null) { -535 // There was nothing there -536 rLoads = new ArrayDeque(); -537} else if (rLoads.size() = numRegionLoadsToRemember) { -538 rLoads.remove(); -539} -540rLoads.add(new BalancerRegionLoad(rm)); -541 loads.put(Bytes.toString(regionName), rLoads); -542 }); -543}); -544 -545for(CostFromRegionLoadFunction cost : regionLoadFunctions) { -546 cost.setLoads(loads); -547} -548 } -549 -550 protected void initCosts(Cluster cluster) { -551for (CostFunction c:costFunctions) { -552 c.init(cluster); -553} -554 } -555 -556 protected void updateCostsWithAction(Cluster cluster, Action action) { -557for (CostFunction c : costFunctions) { -558 c.postAction(action); -559} -560 } -561 -562 /** -563 * Get the names of the cost functions -564 */ -565 public String[] getCostFunctionNames() { -566if (costFunctions == null) return null; -567String[] ret = new String[costFunctions.length]; -568for (int i = 0; i costFunctions.length; i++) { -569 CostFunction c = costFunctions[i]; -570 ret[i] = c.getClass().getSimpleName(); -571} -572 -573return ret; -574 } -575 -576 /** -577 * This is the main cost function. It will compute a cost associated with a proposed cluster -578 * state. All different costs will be combined with their multipliers to produce a double cost. -579 * -580 * @param cluster The state of the cluster -581 * @param previousCost the previous cost. This is used as an early out. -582 * @return a double of a cost associated with the proposed cluster state. This cost is an -583 * aggregate of all individual cost functions. -584 */ -585 protected double computeCost(Cluster cluster, double previousCost) { -586double total = 0; -587 -588for (int i = 0; i costFunctions.length; i++) { -589 CostFunction c = costFunctions[i]; -590 this.tempFunctionCosts[i] = 0.0; -591 -592 if (c.getMultiplier() = 0) { -593continue; -594 } -595 -596 Float multiplier = c.getMultiplier(); -597 Double cost = c.cost(); -598 -599 this.tempFunctionCosts[i] = multiplier*cost; -600 total += this.tempFunctionCosts[i]; -601 -602 if (total previousCost) { -603break; -604 } -605} -606 -607return total; -608 } -609 -610 /** Generates a candidate action to be applied to the cluster for cost function search */ -611 abstract static class CandidateGenerator { -612abstract Cluster.Action generate(Cluster cluster); -613 -614/** -615 * From a list of regions pick a random one. Null can be returned which -616 * {@link StochasticLoadBalancer#balanceCluster(Map)} recognize as signal to try a region move -617 * rather than swap. -618 * -619 * @param clusterThe state of the cluster -620 * @param server index of the server -621 * @param chanceOfNoSwap Chance that this will decide to try a move rather -622 * than a swap. -623 * @return a random {@link RegionInfo} or null if an asymmetrical move is -624 * suggested. -625 */ -626protected int pickRandomRegion(Cluster cluster, int server, double chanceOfNoSwap) { -627 // Check to see if this is just a move. -628 if (cluster.regionsPerServer[server].length == 0 || RANDOM.nextFloat() chanceOfNoSwap) { -629// signal a move only. -630return -1; -631 } -632 int rand = RANDOM.nextInt(cluster.regionsPerServer[server].length); -633 return cluster.regionsPerServer[server][rand]; -634 -635} -636protected int pickRandomServer(Cluster cluster) { -637 if (cluster.numServers 1) { -638return -1; -639 } -640 -641 return RANDOM.nextInt(cluster.numServers); -642} -643 -644protected int pickRandomRack(Cluster cluster) { -645 if (cluster.numRacks 1) { -646return -1; -647 } -648 -649 return RANDOM.nextInt(cluster.numRacks); -650} -651 -652protected int
[06/51] [partial] hbase-site git commit: Published site at e66a6603e36ecd67237ca16acd5e2de03f0d372d.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0c6f447e/apidocs/org/apache/hadoop/hbase/class-use/DoNotRetryIOException.html -- diff --git a/apidocs/org/apache/hadoop/hbase/class-use/DoNotRetryIOException.html b/apidocs/org/apache/hadoop/hbase/class-use/DoNotRetryIOException.html index a652985..0a58d3f 100644 --- a/apidocs/org/apache/hadoop/hbase/class-use/DoNotRetryIOException.html +++ b/apidocs/org/apache/hadoop/hbase/class-use/DoNotRetryIOException.html @@ -1,10 +1,10 @@ http://www.w3.org/TR/html4/loose.dtd;> - + -ç±» org.apache.hadoop.hbase.DoNotRetryIOExceptionçä½¿ç¨ (Apache HBase 3.0.0-SNAPSHOT API) +Uses of Class org.apache.hadoop.hbase.DoNotRetryIOException (Apache HBase 3.0.0-SNAPSHOT API) @@ -12,7 +12,7 @@ -æ¨çæµè§å¨å·²ç¦ç¨ JavaScriptã +JavaScript is disabled on your browser. -è·³è¿å¯¼èªé¾æ¥ +Skip navigation links - -æ¦è§ -ç¨åºå -ç±» -ä½¿ç¨ -æ -å·²è¿æ¶ -ç´¢å¼ -å¸®å© + +Overview +Package +Class +Use +Tree +Deprecated +Index +Help -ä¸ä¸ä¸ª -ä¸ä¸ä¸ª +Prev +Next -æ¡æ¶ -æ æ¡æ¶ +Frames +NoFrames -ææç±» +AllClasses-ç±»ç使ç¨
+
org.apache.hadoop.hbase.DoNotRetryIOExceptionUses of Class
org.apache.hadoop.hbase.DoNotRetryIOException
- -