Revert "HBASE-14614 Procedure v2 - Core Assignment Manager (Matteo Bertozzi)" Revert a mistaken commit!!!
This reverts commit dc1065a85da3098f4defbe008b08057e34d94d4f. Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a3c5a744 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a3c5a744 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a3c5a744 Branch: refs/heads/master Commit: a3c5a74487fa47b6cb8b2132e7fdc5f0f4f7f314 Parents: dc1065a Author: Michael Stack <[email protected]> Authored: Wed May 24 23:31:36 2017 -0700 Committer: Michael Stack <[email protected]> Committed: Wed May 24 23:31:36 2017 -0700 ---------------------------------------------------------------------- .../org/apache/hadoop/hbase/ClusterStatus.java | 8 +- .../org/apache/hadoop/hbase/HRegionInfo.java | 14 - .../apache/hadoop/hbase/MetaTableAccessor.java | 15 +- .../hbase/client/ConnectionImplementation.java | 12 - .../client/ShortCircuitMasterConnection.java | 12 - .../hadoop/hbase/ipc/NettyRpcDuplexHandler.java | 4 +- .../apache/hadoop/hbase/ipc/RpcConnection.java | 6 +- .../apache/hadoop/hbase/master/RegionState.java | 22 +- .../hbase/shaded/protobuf/ProtobufUtil.java | 122 +- .../hbase/shaded/protobuf/RequestConverter.java | 16 +- .../shaded/protobuf/ResponseConverter.java | 13 + .../hbase/zookeeper/MetaTableLocator.java | 7 +- .../org/apache/hadoop/hbase/ProcedureInfo.java | 6 +- .../master/MetricsAssignmentManagerSource.java | 20 +- .../MetricsAssignmentManagerSourceImpl.java | 36 +- .../procedure2/AbstractProcedureScheduler.java | 40 +- .../hadoop/hbase/procedure2/Procedure.java | 206 +- .../hadoop/hbase/procedure2/ProcedureEvent.java | 2 +- .../hbase/procedure2/ProcedureExecutor.java | 223 +- .../procedure2/ProcedureInMemoryChore.java | 6 +- .../hbase/procedure2/ProcedureScheduler.java | 3 +- .../procedure2/RemoteProcedureDispatcher.java | 375 - .../hbase/procedure2/SequentialProcedure.java | 9 +- .../hbase/procedure2/StateMachineProcedure.java | 28 +- .../procedure2/store/NoopProcedureStore.java | 4 +- .../hbase/procedure2/store/ProcedureStore.java | 3 +- .../procedure2/store/wal/ProcedureWALFile.java | 18 +- .../store/wal/ProcedureWALFormatReader.java | 38 +- .../procedure2/store/wal/WALProcedureStore.java | 55 +- .../hbase/procedure2/util/DelayedUtil.java | 6 +- .../hbase/procedure2/TestProcedureToString.java | 4 +- .../protobuf/generated/AccessControlProtos.java | 102 +- .../shaded/protobuf/generated/AdminProtos.java | 17247 +++++++---------- .../generated/MasterProcedureProtos.java | 9360 ++------- .../shaded/protobuf/generated/MasterProtos.java | 7786 +++----- .../shaded/protobuf/generated/QuotaProtos.java | 134 +- .../generated/RegionServerStatusProtos.java | 1679 +- .../protobuf/generated/SnapshotProtos.java | 22 +- .../src/main/protobuf/Admin.proto | 51 +- .../src/main/protobuf/Master.proto | 37 - .../src/main/protobuf/MasterProcedure.proto | 117 +- .../src/main/protobuf/RegionServerStatus.proto | 26 + .../hbase/rsgroup/RSGroupAdminServer.java | 13 +- .../hbase/rsgroup/RSGroupBasedLoadBalancer.java | 9 +- .../balancer/TestRSGroupBasedLoadBalancer.java | 2 +- .../hadoop/hbase/rsgroup/TestRSGroups.java | 16 +- .../hbase/rsgroup/TestRSGroupsOfflineMode.java | 3 +- .../master/AssignmentManagerStatusTmpl.jamon | 51 +- .../hbase/tmpl/master/MasterStatusTmpl.jamon | 2 +- .../hadoop/hbase/RegionStateListener.java | 7 +- .../org/apache/hadoop/hbase/SplitLogTask.java | 4 - .../hadoop/hbase/backup/HFileArchiver.java | 15 +- .../hadoop/hbase/client/VersionInfoUtil.java | 81 +- .../hbase/coprocessor/RegionObserver.java | 22 +- .../org/apache/hadoop/hbase/ipc/CallRunner.java | 9 +- .../apache/hadoop/hbase/ipc/RpcExecutor.java | 5 +- .../hadoop/hbase/ipc/SimpleRpcServer.java | 16 +- .../hadoop/hbase/master/AssignCallable.java | 49 + .../hadoop/hbase/master/AssignmentManager.java | 3053 +++ .../hadoop/hbase/master/BulkAssigner.java | 122 + .../apache/hadoop/hbase/master/BulkReOpen.java | 136 + .../hadoop/hbase/master/CatalogJanitor.java | 101 +- .../apache/hadoop/hbase/master/DeadServer.java | 6 +- .../hbase/master/GeneralBulkAssigner.java | 213 + .../org/apache/hadoop/hbase/master/HMaster.java | 209 +- .../hadoop/hbase/master/LoadBalancer.java | 2 +- .../hbase/master/MasterCoprocessorHost.java | 22 - .../hadoop/hbase/master/MasterDumpServlet.java | 8 +- .../hbase/master/MasterMetaBootstrap.java | 61 +- .../hadoop/hbase/master/MasterRpcServices.java | 109 +- .../hadoop/hbase/master/MasterServices.java | 30 - .../hadoop/hbase/master/MasterWalManager.java | 17 +- .../hbase/master/MetricsAssignmentManager.java | 39 +- .../hbase/master/NoSuchProcedureException.java | 33 - .../apache/hadoop/hbase/master/RegionPlan.java | 4 +- .../hadoop/hbase/master/RegionStateStore.java | 268 + .../hadoop/hbase/master/RegionStates.java | 1170 ++ .../hadoop/hbase/master/ServerManager.java | 85 +- .../hadoop/hbase/master/SplitLogManager.java | 2 +- .../hbase/master/TableNamespaceManager.java | 5 +- .../hadoop/hbase/master/TableStateManager.java | 3 +- .../hadoop/hbase/master/UnAssignCallable.java | 47 + .../master/assignment/AssignProcedure.java | 338 - .../master/assignment/AssignmentManager.java | 1709 -- .../FailedRemoteDispatchException.java | 33 - .../assignment/GCMergedRegionsProcedure.java | 170 - .../master/assignment/GCRegionProcedure.java | 155 - .../assignment/MergeTableRegionsProcedure.java | 776 - .../master/assignment/MoveRegionProcedure.java | 145 - .../master/assignment/RegionStateStore.java | 327 - .../hbase/master/assignment/RegionStates.java | 969 - .../assignment/RegionTransitionProcedure.java | 381 - .../assignment/SplitTableRegionProcedure.java | 733 - .../master/assignment/UnassignProcedure.java | 247 - .../hadoop/hbase/master/assignment/Util.java | 60 - .../hbase/master/balancer/BaseLoadBalancer.java | 33 +- .../balancer/FavoredStochasticBalancer.java | 11 +- .../master/balancer/RegionLocationFinder.java | 14 +- .../master/balancer/SimpleLoadBalancer.java | 9 +- .../master/balancer/StochasticLoadBalancer.java | 8 +- .../hbase/master/locking/LockProcedure.java | 3 +- .../AbstractStateMachineNamespaceProcedure.java | 3 +- .../AbstractStateMachineRegionProcedure.java | 133 - .../AbstractStateMachineTableProcedure.java | 14 +- .../procedure/AddColumnFamilyProcedure.java | 31 +- .../procedure/CloneSnapshotProcedure.java | 4 +- .../master/procedure/CreateTableProcedure.java | 41 +- .../procedure/DeleteColumnFamilyProcedure.java | 31 +- .../master/procedure/DeleteTableProcedure.java | 12 +- .../master/procedure/DisableTableProcedure.java | 156 +- .../DispatchMergingRegionsProcedure.java | 584 - .../master/procedure/EnableTableProcedure.java | 172 +- .../procedure/MasterDDLOperationHelper.java | 93 +- .../procedure/MasterProcedureConstants.java | 2 +- .../master/procedure/MasterProcedureEnv.java | 30 +- .../procedure/MasterProcedureScheduler.java | 25 +- .../procedure/MergeTableRegionsProcedure.java | 906 + .../procedure/ModifyColumnFamilyProcedure.java | 30 +- .../master/procedure/ModifyTableProcedure.java | 30 +- .../master/procedure/ProcedureSyncWait.java | 146 +- .../master/procedure/RSProcedureDispatcher.java | 541 - .../procedure/RestoreSnapshotProcedure.java | 27 +- .../master/procedure/ServerCrashException.java | 46 - .../master/procedure/ServerCrashProcedure.java | 587 +- .../procedure/SplitTableRegionProcedure.java | 785 + .../procedure/TableProcedureInterface.java | 3 +- .../procedure/TruncateTableProcedure.java | 6 +- .../apache/hadoop/hbase/mob/MobFileCache.java | 4 +- .../hbase/namespace/NamespaceAuditor.java | 10 +- .../hbase/namespace/NamespaceStateManager.java | 5 +- .../hadoop/hbase/quotas/MasterQuotaManager.java | 8 +- .../hadoop/hbase/regionserver/CompactSplit.java | 723 - .../hbase/regionserver/CompactSplitThread.java | 695 + .../regionserver/CompactedHFilesDischarger.java | 77 +- .../hadoop/hbase/regionserver/HRegion.java | 92 +- .../hbase/regionserver/HRegionFileSystem.java | 4 +- .../hbase/regionserver/HRegionServer.java | 134 +- .../hbase/regionserver/RSRpcServices.java | 120 +- .../hadoop/hbase/regionserver/Region.java | 8 - .../hbase/regionserver/RegionMergeRequest.java | 108 - .../regionserver/RegionServerServices.java | 10 + .../hbase/regionserver/RegionUnassigner.java | 5 +- .../hadoop/hbase/regionserver/SplitRequest.java | 91 +- .../handler/CloseRegionHandler.java | 2 +- .../org/apache/hadoop/hbase/util/HBaseFsck.java | 2 +- .../hadoop/hbase/util/ModifyRegionUtils.java | 24 +- .../apache/hadoop/hbase/wal/WALSplitter.java | 16 +- .../hbase/zookeeper/RegionServerTracker.java | 4 +- .../hadoop/hbase/HBaseTestingUtility.java | 12 +- .../hadoop/hbase/MockRegionServerServices.java | 10 + .../hadoop/hbase/TestRegionRebalancing.java | 16 +- .../apache/hadoop/hbase/client/TestAdmin1.java | 20 +- .../apache/hadoop/hbase/client/TestAdmin2.java | 4 +- .../hbase/client/TestAsyncRegionAdminApi.java | 51 +- .../client/TestAsyncTableGetMultiThreaded.java | 2 - ...ableGetMultiThreadedWithBasicCompaction.java | 6 +- ...ableGetMultiThreadedWithEagerCompaction.java | 6 +- .../client/TestBlockEvictionFromClient.java | 2 - .../hadoop/hbase/client/TestEnableTable.java | 34 +- .../hbase/client/TestFromClientSide3.java | 2 - .../org/apache/hadoop/hbase/client/TestHCM.java | 142 +- .../hbase/client/TestMetaWithReplicas.java | 37 +- .../client/TestScannersFromClientSide.java | 30 +- .../hbase/client/TestServerBusyException.java | 234 - .../client/TestSnapshotCloneIndependence.java | 2 +- .../hbase/client/TestSplitOrMergeStatus.java | 119 +- .../hbase/client/TestTableFavoredNodes.java | 53 +- .../coprocessor/TestIncrementTimeRange.java | 5 +- .../hbase/ipc/TestSimpleRpcScheduler.java | 7 +- .../mapreduce/TestLoadIncrementalHFiles.java | 2 +- .../hbase/master/MockNoopMasterServices.java | 22 +- .../hadoop/hbase/master/MockRegionServer.java | 37 +- .../hbase/master/TestAssignmentListener.java | 1 - .../master/TestAssignmentManagerOnCluster.java | 1403 ++ .../hadoop/hbase/master/TestCatalogJanitor.java | 591 +- .../master/TestDistributedLogSplitting.java | 1 - .../apache/hadoop/hbase/master/TestMaster.java | 1 - .../master/TestMasterBalanceThrottling.java | 11 +- .../hadoop/hbase/master/TestMasterFailover.java | 31 +- .../hadoop/hbase/master/TestMasterMetrics.java | 8 +- .../TestMasterOperationsForRegionReplicas.java | 10 +- .../hbase/master/TestMasterStatusServlet.java | 58 +- .../hbase/master/TestMasterWalManager.java | 2 +- .../hbase/master/TestMetaShutdownHandler.java | 1 - .../hadoop/hbase/master/TestRegionState.java | 17 +- .../hadoop/hbase/master/TestRegionStates.java | 144 + .../hadoop/hbase/master/TestRestartCluster.java | 8 +- .../hadoop/hbase/master/TestWarmupRegion.java | 14 +- .../assignment/AssignmentTestingUtil.java | 125 - .../master/assignment/MockMasterServices.java | 358 - .../assignment/TestAssignmentManager.java | 750 - .../assignment/TestAssignmentOnRSCrash.java | 185 - .../TestMergeTableRegionsProcedure.java | 260 - .../master/assignment/TestRegionStates.java | 224 - .../TestSplitTableRegionProcedure.java | 428 - .../TestFavoredStochasticBalancerPickers.java | 3 +- .../TestFavoredStochasticLoadBalancer.java | 27 +- .../TestSimpleRegionNormalizerOnCluster.java | 3 +- ...ProcedureSchedulerPerformanceEvaluation.java | 2 +- .../MasterProcedureTestingUtility.java | 67 +- .../procedure/TestAddColumnFamilyProcedure.java | 34 +- .../procedure/TestCloneSnapshotProcedure.java | 8 +- .../procedure/TestCreateNamespaceProcedure.java | 4 +- .../procedure/TestCreateTableProcedure.java | 46 +- .../TestDeleteColumnFamilyProcedure.java | 31 +- .../procedure/TestDeleteNamespaceProcedure.java | 4 +- .../procedure/TestDeleteTableProcedure.java | 21 +- .../procedure/TestDisableTableProcedure.java | 24 +- .../procedure/TestEnableTableProcedure.java | 24 +- .../TestMasterFailoverWithProcedures.java | 23 +- .../procedure/TestMasterProcedureEvents.java | 2 +- .../procedure/TestMasterProcedureScheduler.java | 20 +- .../TestModifyColumnFamilyProcedure.java | 9 +- .../procedure/TestModifyNamespaceProcedure.java | 4 +- .../procedure/TestModifyTableProcedure.java | 18 +- .../master/procedure/TestProcedureAdmin.java | 12 +- .../procedure/TestRestoreSnapshotProcedure.java | 12 +- .../procedure/TestServerCrashProcedure.java | 115 +- .../TestSplitTableRegionProcedure.java | 420 + .../procedure/TestTableDDLProcedureBase.java | 7 +- .../procedure/TestTruncateTableProcedure.java | 11 +- .../hbase/namespace/TestNamespaceAuditor.java | 184 +- .../procedure/SimpleMasterProcedureManager.java | 2 +- .../regionserver/TestCompactSplitThread.java | 21 +- .../hbase/regionserver/TestCompaction.java | 10 +- .../TestCorruptedRegionStoreFile.java | 5 - .../regionserver/TestHRegionFileSystem.java | 6 +- .../TestRegionMergeTransactionOnCluster.java | 58 +- .../regionserver/TestRegionServerMetrics.java | 40 +- .../TestSplitTransactionOnCluster.java | 140 +- .../hbase/regionserver/wal/TestLogRolling.java | 5 - .../wal/TestSecureAsyncWALReplay.java | 5 - .../hbase/regionserver/wal/TestWALReplay.java | 8 +- .../security/access/TestAccessController3.java | 14 +- .../hadoop/hbase/util/BaseTestHBaseFsck.java | 4 +- .../hadoop/hbase/util/TestHBaseFsckMOB.java | 2 +- .../hadoop/hbase/util/TestHBaseFsckOneRS.java | 118 +- .../hbase/util/TestHBaseFsckReplicas.java | 4 +- .../hadoop/hbase/util/TestHBaseFsckTwoRS.java | 23 +- .../util/hbck/TestOfflineMetaRebuildBase.java | 3 +- .../util/hbck/TestOfflineMetaRebuildHole.java | 2 - .../hbck/TestOfflineMetaRebuildOverlap.java | 2 - 242 files changed, 25902 insertions(+), 37835 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java ---------------------------------------------------------------------- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java index 95d77a2..c51a437 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java @@ -22,7 +22,7 @@ package org.apache.hadoop.hbase; import java.util.Arrays; import java.util.Collection; import java.util.Collections; -import java.util.List; +import java.util.Set; import java.util.Map; import org.apache.hadoop.hbase.classification.InterfaceAudience; @@ -67,7 +67,7 @@ public class ClusterStatus extends VersionedWritable { private Collection<ServerName> deadServers; private ServerName master; private Collection<ServerName> backupMasters; - private List<RegionState> intransition; + private Set<RegionState> intransition; private String clusterId; private String[] masterCoprocessors; private Boolean balancerOn; @@ -77,7 +77,7 @@ public class ClusterStatus extends VersionedWritable { final Collection<ServerName> deadServers, final ServerName master, final Collection<ServerName> backupMasters, - final List<RegionState> rit, + final Set<RegionState> rit, final String[] masterCoprocessors, final Boolean balancerOn) { this.hbaseVersion = hbaseVersion; @@ -248,7 +248,7 @@ public class ClusterStatus extends VersionedWritable { } @InterfaceAudience.Private - public List<RegionState> getRegionsInTransition() { + public Set<RegionState> getRegionsInTransition() { return this.intransition; } http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java ---------------------------------------------------------------------- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java index d470ffa..bc93cc6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java @@ -23,7 +23,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; -import java.util.stream.Collectors; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -168,19 +167,6 @@ public class HRegionInfo implements Comparable<HRegionInfo> { return prettyPrint(this.getEncodedName()); } - public static String getShortNameToLog(HRegionInfo...hris) { - return getShortNameToLog(Arrays.asList(hris)); - } - - /** - * @return Return a String of short, printable names for <code>hris</code> - * (usually encoded name) for us logging. - */ - public static String getShortNameToLog(final List<HRegionInfo> hris) { - return hris.stream().map(hri -> hri.getShortNameToLog()). - collect(Collectors.toList()).toString(); - } - /** * Use logging. * @param encodedRegionName The encoded regionname. http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java ---------------------------------------------------------------------- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java index 9eb5111..15bc132 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java @@ -1663,11 +1663,8 @@ public class MetaTableAccessor { Delete deleteA = makeDeleteFromRegionInfo(regionA, time); Delete deleteB = makeDeleteFromRegionInfo(regionB, time); - // The merged is a new region, openSeqNum = 1 is fine. ServerName may be null - // if crash after merge happened but before we got to here.. means in-memory - // locations of offlined merged, now-closed, regions is lost. Should be ok. We - // assign the merged region later. - if (sn != null) addLocation(putOfMerged, sn, 1, -1, mergedRegion.getReplicaId()); + // The merged is a new region, openSeqNum = 1 is fine. + addLocation(putOfMerged, sn, 1, -1, mergedRegion.getReplicaId()); // Add empty locations for region replicas of the merged region so that number of replicas can // be cached whenever the primary region is looked up from meta @@ -1969,8 +1966,8 @@ public class MetaTableAccessor { * @param regionsInfo list of regions to be deleted from META * @throws IOException */ - public static void deleteRegions(Connection connection, List<HRegionInfo> regionsInfo, long ts) - throws IOException { + public static void deleteRegions(Connection connection, + List<HRegionInfo> regionsInfo, long ts) throws IOException { List<Delete> deletes = new ArrayList<>(regionsInfo.size()); for (HRegionInfo hri: regionsInfo) { Delete e = new Delete(hri.getRegionName()); @@ -2005,10 +2002,10 @@ public class MetaTableAccessor { } mutateMetaTable(connection, mutation); if (regionsToRemove != null && regionsToRemove.size() > 0) { - LOG.debug("Deleted " + HRegionInfo.getShortNameToLog(regionsToRemove)); + LOG.debug("Deleted " + regionsToRemove); } if (regionsToAdd != null && regionsToAdd.size() > 0) { - LOG.debug("Added " + HRegionInfo.getShortNameToLog(regionsToAdd)); + LOG.debug("Added " + regionsToAdd); } } http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java ---------------------------------------------------------------------- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java index 4ed28ec..e5f5694 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java @@ -1339,12 +1339,6 @@ class ConnectionImplementation implements ClusterConnection, Closeable { return stub.mergeTableRegions(controller, request); } - public MasterProtos.DispatchMergingRegionsResponse dispatchMergingRegions( - RpcController controller, MasterProtos.DispatchMergingRegionsRequest request) - throws ServiceException { - return stub.dispatchMergingRegions(controller, request); - } - @Override public MasterProtos.AssignRegionResponse assignRegion(RpcController controller, MasterProtos.AssignRegionRequest request) throws ServiceException { @@ -1364,12 +1358,6 @@ class ConnectionImplementation implements ClusterConnection, Closeable { } @Override - public MasterProtos.SplitTableRegionResponse splitRegion(RpcController controller, - MasterProtos.SplitTableRegionRequest request) throws ServiceException { - return stub.splitRegion(controller, request); - } - - @Override public MasterProtos.DeleteTableResponse deleteTable(RpcController controller, MasterProtos.DeleteTableRequest request) throws ServiceException { return stub.deleteTable(controller, request); http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.java ---------------------------------------------------------------------- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.java index 6d75446..bea578c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.java @@ -499,16 +499,4 @@ public class ShortCircuitMasterConnection implements MasterKeepAliveConnection { GetQuotaStatesRequest request) throws ServiceException { return stub.getQuotaStates(controller, request); } - - @Override - public SplitTableRegionResponse splitRegion(RpcController controller, SplitTableRegionRequest request) - throws ServiceException { - return stub.splitRegion(controller, request); - } - - @Override - public DispatchMergingRegionsResponse dispatchMergingRegions(RpcController controller, - DispatchMergingRegionsRequest request) throws ServiceException { - return stub.dispatchMergingRegions(controller, request); - } } http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java ---------------------------------------------------------------------- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java index 08533b4..e69b42d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.java @@ -226,8 +226,8 @@ class NettyRpcDuplexHandler extends ChannelDuplexHandler { switch (idleEvt.state()) { case WRITER_IDLE: if (id2Call.isEmpty()) { - if (LOG.isTraceEnabled()) { - LOG.trace("shutdown connection to " + conn.remoteId().address + if (LOG.isDebugEnabled()) { + LOG.debug("shutdown connection to " + conn.remoteId().address + " because idle for a long time"); } // It may happen that there are still some pending calls in the event loop queue and http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java ---------------------------------------------------------------------- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java index 98d2256..b5a7959 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcConnection.java @@ -129,11 +129,7 @@ abstract class RpcConnection { authMethod = AuthMethod.KERBEROS; } - // Log if debug AND non-default auth, else if trace enabled. - // No point logging obvious. - if ((LOG.isDebugEnabled() && !authMethod.equals(AuthMethod.SIMPLE)) || - LOG.isTraceEnabled()) { - // Only log if not default auth. + if (LOG.isDebugEnabled()) { LOG.debug("Use " + authMethod + " authentication for service " + remoteId.serviceName + ", sasl=" + useSasl); } http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java ---------------------------------------------------------------------- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java index 7116763..0e12ef6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java @@ -36,8 +36,10 @@ public class RegionState { @InterfaceStability.Evolving public enum State { OFFLINE, // region is in an offline state + PENDING_OPEN, // same as OPENING, to be removed OPENING, // server has begun to open but not yet done OPEN, // server opened region and updated meta + PENDING_CLOSE, // same as CLOSING, to be removed CLOSING, // server has begun to close but not yet done CLOSED, // server closed region and updated meta SPLITTING, // server started split of a region @@ -62,12 +64,18 @@ public class RegionState { case OFFLINE: rs = ClusterStatusProtos.RegionState.State.OFFLINE; break; + case PENDING_OPEN: + rs = ClusterStatusProtos.RegionState.State.PENDING_OPEN; + break; case OPENING: rs = ClusterStatusProtos.RegionState.State.OPENING; break; case OPEN: rs = ClusterStatusProtos.RegionState.State.OPEN; break; + case PENDING_CLOSE: + rs = ClusterStatusProtos.RegionState.State.PENDING_CLOSE; + break; case CLOSING: rs = ClusterStatusProtos.RegionState.State.CLOSING; break; @@ -116,6 +124,8 @@ public class RegionState { state = OFFLINE; break; case PENDING_OPEN: + state = PENDING_OPEN; + break; case OPENING: state = OPENING; break; @@ -123,6 +133,8 @@ public class RegionState { state = OPEN; break; case PENDING_CLOSE: + state = PENDING_CLOSE; + break; case CLOSING: state = CLOSING; break; @@ -219,16 +231,22 @@ public class RegionState { this.ritDuration += (this.stamp - previousStamp); } + /** + * PENDING_CLOSE (to be removed) is the same as CLOSING + */ public boolean isClosing() { - return state == State.CLOSING; + return state == State.PENDING_CLOSE || state == State.CLOSING; } public boolean isClosed() { return state == State.CLOSED; } + /** + * PENDING_OPEN (to be removed) is the same as OPENING + */ public boolean isOpening() { - return state == State.OPENING; + return state == State.PENDING_OPEN || state == State.OPENING; } public boolean isOpened() { http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java ---------------------------------------------------------------------- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index eca050f..108646a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -20,19 +20,19 @@ package org.apache.hadoop.hbase.shaded.protobuf; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; -import java.io.InterruptedIOException; import java.lang.reflect.Constructor; import java.lang.reflect.Method; import java.nio.ByteBuffer; -import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Map.Entry; import java.util.NavigableSet; +import java.util.Set; import java.util.concurrent.Callable; import java.util.concurrent.TimeUnit; @@ -89,14 +89,12 @@ import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.procedure2.LockInfo; import org.apache.hadoop.hbase.protobuf.ProtobufMagic; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest; import org.apache.hadoop.hbase.quotas.QuotaScope; import org.apache.hadoop.hbase.quotas.QuotaType; import org.apache.hadoop.hbase.quotas.SpaceViolationPolicy; import org.apache.hadoop.hbase.quotas.ThrottleType; import org.apache.hadoop.hbase.replication.ReplicationLoadSink; import org.apache.hadoop.hbase.replication.ReplicationLoadSource; -import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.visibility.Authorizations; import org.apache.hadoop.hbase.security.visibility.CellVisibility; import org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString; @@ -110,6 +108,8 @@ import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException; import org.apache.hadoop.hbase.shaded.com.google.protobuf.TextFormat; import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest; @@ -177,7 +177,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.util.Addressing; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.DynamicClassLoader; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.ExceptionUtil; import org.apache.hadoop.hbase.util.ForeignExceptionUtil; import org.apache.hadoop.hbase.util.Methods; @@ -1843,6 +1842,33 @@ public final class ProtobufUtil { } /** + * A helper to close a region for split or merge + * using admin protocol. + * + * @param controller RPC controller + * @param admin Admin service + * @param server the RS that hosts the target region + * @param regionInfo the target region info + * @return true if the region is closed + * @throws IOException + */ + public static boolean closeRegionForSplitOrMerge( + final RpcController controller, + final AdminService.BlockingInterface admin, + final ServerName server, + final HRegionInfo... regionInfo) throws IOException { + CloseRegionForSplitOrMergeRequest closeRegionForRequest = + ProtobufUtil.buildCloseRegionForSplitOrMergeRequest(server, regionInfo); + try { + CloseRegionForSplitOrMergeResponse response = + admin.closeRegionForSplitOrMerge(controller, closeRegionForRequest); + return ResponseConverter.isClosed(response); + } catch (ServiceException se) { + throw getRemoteException(se); + } + } + + /** * A helper to warmup a region given a region name * using admin protocol * @@ -1994,46 +2020,6 @@ public final class ProtobufUtil { } } - /** - * A helper to merge regions using admin protocol. Send request to - * regionserver. - * @param admin - * @param region_a - * @param region_b - * @param forcible true if do a compulsory merge, otherwise we will only merge - * two adjacent regions - * @param user effective user - * @throws IOException - */ - public static void mergeRegions(final RpcController controller, - final AdminService.BlockingInterface admin, - final HRegionInfo region_a, final HRegionInfo region_b, - final boolean forcible, final User user) throws IOException { - final MergeRegionsRequest request = ProtobufUtil.buildMergeRegionsRequest( - region_a.getRegionName(), region_b.getRegionName(),forcible); - if (user != null) { - try { - user.runAs(new PrivilegedExceptionAction<Void>() { - @Override - public Void run() throws Exception { - admin.mergeRegions(controller, request); - return null; - } - }); - } catch (InterruptedException ie) { - InterruptedIOException iioe = new InterruptedIOException(); - iioe.initCause(ie); - throw iioe; - } - } else { - try { - admin.mergeRegions(controller, request); - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); - } - } - } - // End helpers for Admin /* @@ -3117,8 +3103,8 @@ public final class ProtobufUtil { backupMasters.add(ProtobufUtil.toServerName(sn)); } - List<RegionState> rit = - new ArrayList<>(proto.getRegionsInTransitionList().size()); + Set<RegionState> rit = null; + rit = new HashSet<>(proto.getRegionsInTransitionList().size()); for (RegionInTransition region : proto.getRegionsInTransitionList()) { RegionState value = RegionState.convert(region.getRegionState()); rit.add(value); @@ -3277,6 +3263,26 @@ public final class ProtobufUtil { } /** + * Create a CloseRegionForSplitOrMergeRequest for given regions + * + * @param server the RS server that hosts the region + * @param regionsToClose the info of the regions to close + * @return a CloseRegionForSplitRequest + */ + public static CloseRegionForSplitOrMergeRequest buildCloseRegionForSplitOrMergeRequest( + final ServerName server, + final HRegionInfo... regionsToClose) { + CloseRegionForSplitOrMergeRequest.Builder builder = + CloseRegionForSplitOrMergeRequest.newBuilder(); + for(int i = 0; i < regionsToClose.length; i++) { + RegionSpecifier regionToClose = RequestConverter.buildRegionSpecifier( + RegionSpecifierType.REGION_NAME, regionsToClose[i].getRegionName()); + builder.addRegion(regionToClose); + } + return builder.build(); + } + + /** * Create a CloseRegionRequest for a given encoded region name * * @param encodedRegionName the name of the region to close @@ -3325,28 +3331,6 @@ public final class ProtobufUtil { return builder.build(); } - /** - * Create a MergeRegionsRequest for the given regions - * @param regionA name of region a - * @param regionB name of region b - * @param forcible true if it is a compulsory merge - * @return a MergeRegionsRequest - */ - public static MergeRegionsRequest buildMergeRegionsRequest( - final byte[] regionA, final byte[] regionB, final boolean forcible) { - MergeRegionsRequest.Builder builder = MergeRegionsRequest.newBuilder(); - RegionSpecifier regionASpecifier = RequestConverter.buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionA); - RegionSpecifier regionBSpecifier = RequestConverter.buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionB); - builder.setRegionA(regionASpecifier); - builder.setRegionB(regionBSpecifier); - builder.setForcible(forcible); - // send the master's wall clock time as well, so that the RS can refer to it - builder.setMasterSystemTime(EnvironmentEdgeManager.currentTime()); - return builder.build(); - } - /** * Get a ServerName from the passed in data bytes. * @param data Data with a serialize server name in it; can handle the old style http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java ---------------------------------------------------------------------- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java index 134c319..4d34334 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java @@ -123,6 +123,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaSta import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest; @@ -1119,6 +1120,19 @@ public final class RequestConverter { return builder.build(); } + public static SplitTableRegionRequest buildSplitTableRegionRequest( + final HRegionInfo regionInfo, + final byte[] splitPoint, + final long nonceGroup, + final long nonce) { + SplitTableRegionRequest.Builder builder = SplitTableRegionRequest.newBuilder(); + builder.setRegionInfo(HRegionInfo.convert(regionInfo)); + builder.setSplitRow(UnsafeByteOperations.unsafeWrap(splitPoint)); + builder.setNonceGroup(nonceGroup); + builder.setNonce(nonce); + return builder.build(); + } + /** * Create a protocol buffer AssignRegionRequest * @@ -1501,7 +1515,7 @@ public final class RequestConverter { /** * Create a RegionOpenInfo based on given region info and version of offline node */ - public static RegionOpenInfo buildRegionOpenInfo( + private static RegionOpenInfo buildRegionOpenInfo( final HRegionInfo region, final List<ServerName> favoredNodes, Boolean openForReplay) { RegionOpenInfo.Builder builder = RegionOpenInfo.newBuilder(); http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java ---------------------------------------------------------------------- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java index c489628..ecadbbc 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.SingleResponse; import org.apache.hadoop.hbase.ipc.ServerRpcController; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse; @@ -253,6 +254,18 @@ public final class ResponseConverter { } /** + * Check if the region is closed from a CloseRegionForSplitResponse + * + * @param proto the CloseRegionForSplitResponse + * @return the region close state + */ + public static boolean isClosed + (final CloseRegionForSplitOrMergeResponse proto) { + if (proto == null || !proto.hasClosed()) return false; + return proto.getClosed(); + } + + /** * A utility to build a GetServerInfoResponse. * * @param serverName http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java ---------------------------------------------------------------------- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java index c11d896..afab54a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java @@ -439,10 +439,6 @@ public class MetaTableLocator { */ public static void setMetaLocation(ZooKeeperWatcher zookeeper, ServerName serverName, int replicaId, RegionState.State state) throws KeeperException { - if (serverName == null) { - LOG.warn("Tried to set null ServerName in hbase:meta; skipping -- ServerName required"); - return; - } LOG.info("Setting hbase:meta region location in ZooKeeper as " + serverName); // Make the MetaRegionServer pb and then get its bytes and save this as // the znode content. @@ -452,8 +448,7 @@ public class MetaTableLocator { .setState(state.convert()).build(); byte[] data = ProtobufUtil.prependPBMagic(pbrsr.toByteArray()); try { - ZKUtil.setData(zookeeper, - zookeeper.znodePaths.getZNodeForReplica(replicaId), data); + ZKUtil.setData(zookeeper, zookeeper.znodePaths.getZNodeForReplica(replicaId), data); } catch(KeeperException.NoNodeException nne) { if (replicaId == HRegionInfo.DEFAULT_REPLICA_ID) { LOG.debug("META region location doesn't exist, create it"); http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-common/src/main/java/org/apache/hadoop/hbase/ProcedureInfo.java ---------------------------------------------------------------------- diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ProcedureInfo.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ProcedureInfo.java index 36dabdd..6104c22 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ProcedureInfo.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ProcedureInfo.java @@ -80,11 +80,12 @@ public class ProcedureInfo implements Cloneable { @Override public String toString() { StringBuilder sb = new StringBuilder(); + sb.append("Procedure="); sb.append(procName); - sb.append(" pid="); + sb.append(" (id="); sb.append(procId); if (hasParentId()) { - sb.append(", ppid="); + sb.append(", parent="); sb.append(parentId); } if (hasOwner()) { @@ -106,6 +107,7 @@ public class ProcedureInfo implements Cloneable { sb.append(this.exception.getMessage()); sb.append("\""); } + sb.append(")"); return sb.toString(); } http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSource.java ---------------------------------------------------------------------- diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSource.java index 2ebf8c9..fa7bbec 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSource.java @@ -47,7 +47,6 @@ public interface MetricsAssignmentManagerSource extends BaseSource { String RIT_OLDEST_AGE_NAME = "ritOldestAge"; String RIT_DURATION_NAME = "ritDuration"; String ASSIGN_TIME_NAME = "assign"; - String UNASSIGN_TIME_NAME = "unassign"; String BULK_ASSIGN_TIME_NAME = "bulkAssign"; String RIT_COUNT_DESC = "Current number of Regions In Transition (Gauge)."; @@ -57,7 +56,9 @@ public interface MetricsAssignmentManagerSource extends BaseSource { String RIT_DURATION_DESC = "Total durations in milliseconds for all Regions in Transition (Histogram)."; - String OPERATION_COUNT_NAME = "operationCount"; + void updateAssignmentTime(long time); + + void updateBulkAssignTime(long time); /** * Set the number of regions in transition. @@ -81,19 +82,4 @@ public interface MetricsAssignmentManagerSource extends BaseSource { void setRITOldestAge(long age); void updateRitDuration(long duration); - - /** - * Increment the count of assignment operation (assign/unassign). - */ - void incrementOperationCounter(); - - /** - * Add the time took to perform the last assign operation - */ - void updateAssignTime(long time); - - /** - * Add the time took to perform the last unassign operation - */ - void updateUnassignTime(long time); } http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSourceImpl.java ---------------------------------------------------------------------- diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSourceImpl.java index 14b7e71..faae044 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSourceImpl.java @@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; import org.apache.hadoop.metrics2.MetricHistogram; -import org.apache.hadoop.metrics2.lib.MutableFastCounter; import org.apache.hadoop.metrics2.lib.MutableGaugeLong; @InterfaceAudience.Private @@ -33,10 +32,8 @@ public class MetricsAssignmentManagerSourceImpl private MutableGaugeLong ritCountOverThresholdGauge; private MutableGaugeLong ritOldestAgeGauge; private MetricHistogram ritDurationHisto; - - private MutableFastCounter operationCounter; private MetricHistogram assignTimeHisto; - private MetricHistogram unassignTimeHisto; + private MetricHistogram bulkAssignTimeHisto; public MetricsAssignmentManagerSourceImpl() { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); @@ -54,39 +51,30 @@ public class MetricsAssignmentManagerSourceImpl RIT_COUNT_OVER_THRESHOLD_DESC,0l); ritOldestAgeGauge = metricsRegistry.newGauge(RIT_OLDEST_AGE_NAME, RIT_OLDEST_AGE_DESC, 0l); assignTimeHisto = metricsRegistry.newTimeHistogram(ASSIGN_TIME_NAME); - unassignTimeHisto = metricsRegistry.newTimeHistogram(UNASSIGN_TIME_NAME); + bulkAssignTimeHisto = metricsRegistry.newTimeHistogram(BULK_ASSIGN_TIME_NAME); ritDurationHisto = metricsRegistry.newTimeHistogram(RIT_DURATION_NAME, RIT_DURATION_DESC); - operationCounter = metricsRegistry.getCounter(OPERATION_COUNT_NAME, 0l); - } - - @Override - public void setRIT(final int ritCount) { - ritGauge.set(ritCount); } @Override - public void setRITCountOverThreshold(final int ritCount) { - ritCountOverThresholdGauge.set(ritCount); + public void updateAssignmentTime(long time) { + assignTimeHisto.add(time); } @Override - public void setRITOldestAge(final long ritCount) { - ritOldestAgeGauge.set(ritCount); + public void updateBulkAssignTime(long time) { + bulkAssignTimeHisto.add(time); } - @Override - public void incrementOperationCounter() { - operationCounter.incr(); + public void setRIT(int ritCount) { + ritGauge.set(ritCount); } - @Override - public void updateAssignTime(final long time) { - assignTimeHisto.add(time); + public void setRITCountOverThreshold(int ritCount) { + ritCountOverThresholdGauge.set(ritCount); } - @Override - public void updateUnassignTime(final long time) { - unassignTimeHisto.add(time); + public void setRITOldestAge(long ritCount) { + ritOldestAgeGauge.set(ritCount); } @Override http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java ---------------------------------------------------------------------- diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java index 64c3e53..fbb066c 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java @@ -29,8 +29,8 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; @InterfaceAudience.Private public abstract class AbstractProcedureScheduler implements ProcedureScheduler { private static final Log LOG = LogFactory.getLog(AbstractProcedureScheduler.class); - private final ReentrantLock schedulerLock = new ReentrantLock(); - private final Condition schedWaitCond = schedulerLock.newCondition(); + private final ReentrantLock schedLock = new ReentrantLock(); + private final Condition schedWaitCond = schedLock.newCondition(); private boolean running = false; // TODO: metrics @@ -88,14 +88,14 @@ public abstract class AbstractProcedureScheduler implements ProcedureScheduler { } protected void push(final Procedure procedure, final boolean addFront, final boolean notify) { - schedulerLock.lock(); + schedLock.lock(); try { enqueue(procedure, addFront); if (notify) { schedWaitCond.signal(); } } finally { - schedulerLock.unlock(); + schedLock.unlock(); } } @@ -219,11 +219,11 @@ public abstract class AbstractProcedureScheduler implements ProcedureScheduler { @Override public void suspendEvent(final ProcedureEvent event) { - final boolean traceEnabled = LOG.isTraceEnabled(); + final boolean isTraceEnabled = LOG.isTraceEnabled(); synchronized (event) { event.setReady(false); - if (traceEnabled) { - LOG.trace("Suspend " + event); + if (isTraceEnabled) { + LOG.trace("Suspend event " + event); } } } @@ -235,29 +235,18 @@ public abstract class AbstractProcedureScheduler implements ProcedureScheduler { @Override public void wakeEvents(final int count, final ProcedureEvent... events) { - final boolean traceEnabled = LOG.isTraceEnabled(); + final boolean isTraceEnabled = LOG.isTraceEnabled(); schedLock(); try { int waitingCount = 0; for (int i = 0; i < count; ++i) { final ProcedureEvent event = events[i]; synchronized (event) { - if (!event.isReady()) { - // Only set ready if we were not ready; i.e. suspended. Otherwise, we double-wake - // on this event and down in wakeWaitingProcedures, we double decrement this - // finish which messes up child procedure accounting. - event.setReady(true); - if (traceEnabled) { - LOG.trace("Unsuspend " + event); - } - waitingCount += wakeWaitingProcedures(event.getSuspendedProcedures()); - } else { - ProcedureDeque q = event.getSuspendedProcedures(); - if (q != null && !q.isEmpty()) { - LOG.warn("Q is not empty! size=" + q.size() + "; PROCESSING..."); - waitingCount += wakeWaitingProcedures(event.getSuspendedProcedures()); - } + event.setReady(true); + if (isTraceEnabled) { + LOG.trace("Wake event " + event); } + waitingCount += wakeWaitingProcedures(event.getSuspendedProcedures()); } } wakePollIfNeeded(waitingCount); @@ -286,7 +275,6 @@ public abstract class AbstractProcedureScheduler implements ProcedureScheduler { } protected void wakeProcedure(final Procedure procedure) { - if (LOG.isTraceEnabled()) LOG.trace("Wake " + procedure); push(procedure, /* addFront= */ true, /* notify= */false); } @@ -294,11 +282,11 @@ public abstract class AbstractProcedureScheduler implements ProcedureScheduler { // Internal helpers // ========================================================================== protected void schedLock() { - schedulerLock.lock(); + schedLock.lock(); } protected void schedUnlock() { - schedulerLock.unlock(); + schedLock.unlock(); } protected void wakePollIfNeeded(final int waitingCount) { http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java ---------------------------------------------------------------------- diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java index 09f8170..591c0d0 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java @@ -25,8 +25,6 @@ import java.util.Arrays; import java.util.List; import java.util.Map; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.exceptions.TimeoutIOException; @@ -39,66 +37,37 @@ import org.apache.hadoop.hbase.util.NonceKey; import com.google.common.annotations.VisibleForTesting; /** - * Base Procedure class responsible for Procedure Metadata; - * e.g. state, submittedTime, lastUpdate, stack-indexes, etc. + * Base Procedure class responsible to handle the Procedure Metadata + * e.g. state, submittedTime, lastUpdate, stack-indexes, ... * - * <p>Procedures are run by a {@link ProcedureExecutor} instance. They are submitted and then - * the ProcedureExecutor keeps calling {@link #execute(Object)} until the Procedure is done. - * Execute may be called multiple times in the case of failure or a restart, so code must be - * idempotent. The return from an execute call is either: null to indicate we are done; - * ourself if there is more to do; or, a set of sub-procedures that need to - * be run to completion before the framework resumes our execution. + * execute() is called each time the procedure is executed. + * it may be called multiple times in case of failure and restart, so the + * code must be idempotent. + * the return is a set of sub-procedures or null in case the procedure doesn't + * have sub-procedures. Once the sub-procedures are successfully completed + * the execute() method is called again, you should think at it as a stack: + * -> step 1 + * ---> step 2 + * -> step 1 * - * <p>The ProcedureExecutor keeps its - * notion of Procedure State in the Procedure itself; e.g. it stamps the Procedure as INITIALIZING, - * RUNNABLE, SUCCESS, etc. Here are some of the States defined in the ProcedureState enum from - * protos: - *<ul> - * <li>{@link #isFailed()} A procedure has executed at least once and has failed. The procedure - * may or may not have rolled back yet. Any procedure in FAILED state will be eventually moved - * to ROLLEDBACK state.</li> - * - * <li>{@link #isSuccess()} A procedure is completed successfully without exception.</li> - * - * <li>{@link #isFinished()} As a procedure in FAILED state will be tried forever for rollback, only - * condition when scheduler/ executor will drop procedure from further processing is when procedure - * state is ROLLEDBACK or isSuccess() returns true. This is a terminal state of the procedure.</li> - * - * <li>{@link #isWaiting()} - Procedure is in one of the two waiting states - * ({@link ProcedureState#WAITING}, {@link ProcedureState#WAITING_TIMEOUT}).</li> - *</ul> - * NOTE: This states are of the ProcedureExecutor. Procedure implementations in turn can keep - * their own state. This can lead to confusion. Try to keep the two distinct. - * - * <p>rollback() is called when the procedure or one of the sub-procedures - * has failed. The rollback step is supposed to cleanup the resources created - * during the execute() step. In case of failure and restart, rollback() may be - * called multiple times, so again the code must be idempotent. - * - * <p>Procedure can be made respect a locking regime. It has acqure/release methods as - * well as an {@link #hasLock(Object)}. The lock implementation is up to the implementor. - * If an entity needs to be locked for the life of a procedure -- not just the calls to - * execute -- then implementations should say so with the {@link #holdLock(Object)} - * method. - * - * <p>There are hooks for collecting metrics on submit of the procedure and on finish. - * See {@link #updateMetricsOnSubmit(Object)} and - * {@link #updateMetricsOnFinish(Object, long, boolean)}. + * rollback() is called when the procedure or one of the sub-procedures is failed. + * the rollback step is supposed to cleanup the resources created during the + * execute() step. in case of failure and restart rollback() may be called + * multiple times, so the code must be idempotent. */ @InterfaceAudience.Private @InterfaceStability.Evolving -public abstract class Procedure<TEnvironment> implements Comparable<Procedure<TEnvironment>> { - private static final Log LOG = LogFactory.getLog(Procedure.class); +public abstract class Procedure<TEnvironment> implements Comparable<Procedure> { public static final long NO_PROC_ID = -1; protected static final int NO_TIMEOUT = -1; public enum LockState { - LOCK_ACQUIRED, // Lock acquired and ready to execute - LOCK_YIELD_WAIT, // Lock not acquired, framework needs to yield - LOCK_EVENT_WAIT, // Lock not acquired, an event will yield the procedure + LOCK_ACQUIRED, // lock acquired and ready to execute + LOCK_YIELD_WAIT, // lock not acquired, framework needs to yield + LOCK_EVENT_WAIT, // lock not acquired, an event will yield the procedure } - // Unchanged after initialization + // unchanged after initialization private NonceKey nonceKey = null; private String owner = null; private long parentProcId = NO_PROC_ID; @@ -106,7 +75,7 @@ public abstract class Procedure<TEnvironment> implements Comparable<Procedure<TE private long procId = NO_PROC_ID; private long submittedTime; - // Runtime state, updated every operation + // runtime state, updated every operation private ProcedureState state = ProcedureState.INITIALIZING; private RemoteProcedureException exception = null; private int[] stackIndexes = null; @@ -119,22 +88,19 @@ public abstract class Procedure<TEnvironment> implements Comparable<Procedure<TE /** * The main code of the procedure. It must be idempotent since execute() - * may be called multiple times in case of machine failure in the middle + * may be called multiple time in case of machine failure in the middle * of the execution. * @param env the environment passed to the ProcedureExecutor - * @return a set of sub-procedures to run or ourselves if there is more work to do or null if the - * procedure is done. - * @throws ProcedureYieldException the procedure will be added back to the queue and retried later. - * @throws InterruptedException the procedure will be added back to the queue and retried later. - * @throws ProcedureSuspendedException Signal to the executor that Procedure has suspended itself and - * has set itself up waiting for an external event to wake it back up again. - */ - protected abstract Procedure<TEnvironment>[] execute(TEnvironment env) + * @return a set of sub-procedures or null if there is nothing else to execute. + * @throws ProcedureYieldException the procedure will be added back to the queue and retried later + * @throws InterruptedException the procedure will be added back to the queue and retried later + */ + protected abstract Procedure[] execute(TEnvironment env) throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException; /** - * The code to undo what was done by the execute() code. - * It is called when the procedure or one of the sub-procedures failed or an + * The code to undo what done by the execute() code. + * It is called when the procedure or one of the sub-procedure failed or an * abort was requested. It should cleanup all the resources created by * the execute() call. The implementation must be idempotent since rollback() * may be called multiple time in case of machine failure in the middle @@ -148,21 +114,21 @@ public abstract class Procedure<TEnvironment> implements Comparable<Procedure<TE /** * The abort() call is asynchronous and each procedure must decide how to deal - * with it, if they want to be abortable. The simplest implementation + * with that, if they want to be abortable. The simplest implementation * is to have an AtomicBoolean set in the abort() method and then the execute() * will check if the abort flag is set or not. * abort() may be called multiple times from the client, so the implementation * must be idempotent. * - * <p>NOTE: abort() is not like Thread.interrupt(). It is just a notification - * that allows the procedure implementor abort. + * NOTE: abort() is not like Thread.interrupt() it is just a notification + * that allows the procedure implementor where to abort to avoid leak and + * have a better control on what was executed and what not. */ protected abstract boolean abort(TEnvironment env); /** * The user-level code of the procedure may have some state to - * persist (e.g. input arguments or current position in the processing state) to - * be able to resume on failure. + * persist (e.g. input arguments) to be able to resume on failure. * @param stream the stream that will contain the user serialized data */ protected abstract void serializeStateData(final OutputStream stream) @@ -177,17 +143,11 @@ public abstract class Procedure<TEnvironment> implements Comparable<Procedure<TE throws IOException; /** - * The user should override this method if they need a lock on an Entity. - * A lock can be anything, and it is up to the implementor. The Procedure - * Framework will call this method just before it invokes {@link #execute(Object)}. - * It calls {@link #releaseLock(Object)} after the call to execute. - * - * <p>If you need to hold the lock for the life of the Procdure -- i.e. you do not - * want any other Procedure interfering while this Procedure is running, see - * {@link #holdLock(Object)}. + * The user should override this method, and try to take a lock if necessary. + * A lock can be anything, and it is up to the implementor. * * <p>Example: in our Master we can execute request in parallel for different tables. - * We can create t1 and create t2 and these creates can be executed at the same time. + * We can create t1 and create t2 and this can be executed at the same time. * Anything else on t1/t2 is queued waiting that specific table create to happen. * * <p>There are 3 LockState: @@ -213,9 +173,6 @@ public abstract class Procedure<TEnvironment> implements Comparable<Procedure<TE /** * Used to keep the procedure lock even when the procedure is yielding or suspended. - * Must implement {@link #hasLock(Object)} if you want to hold the lock for life - * of the Procedure. - * @see #hasLock(Object) * @return true if the procedure should hold on the lock until completionCleanup() */ protected boolean holdLock(final TEnvironment env) { @@ -223,11 +180,8 @@ public abstract class Procedure<TEnvironment> implements Comparable<Procedure<TE } /** - * This is used in conjunction with {@link #holdLock(Object)}. If {@link #holdLock(Object)} - * returns true, the procedure executor will call acquireLock() once and thereafter - * not call {@link #releaseLock(Object)} until the Procedure is done (Normally, it calls - * release/acquire around each invocation of {@link #execute(Object)}. - * @see #holdLock(Object) + * This is used in conjuction with holdLock(). If holdLock() is true + * the procedure executor will not call acquireLock() if hasLock() is true. * @return true if the procedure has the lock, false otherwise. */ protected boolean hasLock(final TEnvironment env) { @@ -255,15 +209,14 @@ public abstract class Procedure<TEnvironment> implements Comparable<Procedure<TE /** * Called when the procedure is marked as completed (success or rollback). * The procedure implementor may use this method to cleanup in-memory states. - * This operation will not be retried on failure. If a procedure took a lock, - * it will have been released when this method runs. + * This operation will not be retried on failure. */ protected void completionCleanup(final TEnvironment env) { // no-op } /** - * By default, the procedure framework/executor will try to run procedures start to finish. + * By default, the executor will try to run procedures start to finish. * Return true to make the executor yield between each execution step to * give other procedures a chance to run. * @param env the environment passed to the ProcedureExecutor @@ -322,30 +275,27 @@ public abstract class Procedure<TEnvironment> implements Comparable<Procedure<TE protected StringBuilder toStringSimpleSB() { final StringBuilder sb = new StringBuilder(); - sb.append("pid="); + sb.append("procId="); sb.append(getProcId()); if (hasParent()) { - sb.append(", ppid="); + sb.append(", parentProcId="); sb.append(getParentProcId()); } - /** - * Enable later when this is being used. - * Currently owner not used. if (hasOwner()) { sb.append(", owner="); sb.append(getOwner()); - }*/ + } - sb.append(", state="); // pState for Procedure State as opposed to any other kind. + sb.append(", state="); toStringState(sb); if (hasException()) { sb.append(", exception=" + getException()); } - sb.append("; "); + sb.append(", "); toStringClassDetails(sb); return sb; @@ -361,7 +311,7 @@ public abstract class Procedure<TEnvironment> implements Comparable<Procedure<TE sb.append(" submittedTime="); sb.append(getSubmittedTime()); - sb.append(", lastUpdate="); + sb.append(" lastUpdate="); sb.append(getLastUpdate()); final int[] stackIndices = getStackIndexes(); @@ -381,8 +331,7 @@ public abstract class Procedure<TEnvironment> implements Comparable<Procedure<TE } /** - * Called from {@link #toString()} when interpolating {@link Procedure} State. - * Allows decorating generic Procedure State with Procedure particulars. + * Called from {@link #toString()} when interpolating {@link Procedure} state * @param builder Append current {@link ProcedureState} */ protected void toStringState(StringBuilder builder) { @@ -578,6 +527,25 @@ public abstract class Procedure<TEnvironment> implements Comparable<Procedure<TE // ============================================================================================== /** + * Procedure has states which are defined in proto file. At some places in the code, we + * need to determine more about those states. Following Methods help determine: + * + * {@link #isFailed()} - A procedure has executed at least once and has failed. The procedure + * may or may not have rolled back yet. Any procedure in FAILED state + * will be eventually moved to ROLLEDBACK state. + * + * {@link #isSuccess()} - A procedure is completed successfully without any exception. + * + * {@link #isFinished()} - As a procedure in FAILED state will be tried forever for rollback, only + * condition when scheduler/ executor will drop procedure from further + * processing is when procedure state is ROLLEDBACK or isSuccess() + * returns true. This is a terminal state of the procedure. + * + * {@link #isWaiting()} - Procedure is in one of the two waiting states ({@link + * ProcedureState#WAITING}, {@link ProcedureState#WAITING_TIMEOUT}). + */ + + /** * @return true if the procedure is in a RUNNABLE state. */ protected synchronized boolean isRunnable() { @@ -680,10 +648,6 @@ public abstract class Procedure<TEnvironment> implements Comparable<Procedure<TE @InterfaceAudience.Private protected synchronized void setChildrenLatch(final int numChildren) { this.childrenLatch = numChildren; - if (LOG.isTraceEnabled()) { - LOG.trace("CHILD LATCH INCREMENT SET " + - this.childrenLatch, new Throwable(this.toString())); - } } /** @@ -693,34 +657,15 @@ public abstract class Procedure<TEnvironment> implements Comparable<Procedure<TE protected synchronized void incChildrenLatch() { // TODO: can this be inferred from the stack? I think so... this.childrenLatch++; - if (LOG.isTraceEnabled()) { - LOG.trace("CHILD LATCH INCREMENT " + this.childrenLatch, new Throwable(this.toString())); - } } /** * Called by the ProcedureExecutor to notify that one of the sub-procedures has completed. */ @InterfaceAudience.Private - private synchronized boolean childrenCountDown() { + protected synchronized boolean childrenCountDown() { assert childrenLatch > 0: this; - boolean b = --childrenLatch == 0; - if (LOG.isTraceEnabled()) { - LOG.trace("CHILD LATCH DECREMENT " + childrenLatch, new Throwable(this.toString())); - } - return b; - } - - /** - * Try to set this procedure into RUNNABLE state. - * Succeeds if all subprocedures/children are done. - * @return True if we were able to move procedure to RUNNABLE state. - */ - synchronized boolean tryRunnable() { - // Don't use isWaiting in the below; it returns true for WAITING and WAITING_TIMEOUT - boolean b = getState() == ProcedureState.WAITING && childrenCountDown(); - if (b) setState(ProcedureState.RUNNABLE); - return b; + return --childrenLatch == 0; } @InterfaceAudience.Private @@ -787,11 +732,9 @@ public abstract class Procedure<TEnvironment> implements Comparable<Procedure<TE /** * Internal method called by the ProcedureExecutor that starts the user-level code execute(). - * @throws ProcedureSuspendedException This is used when procedure wants to halt processing and - * skip out without changing states or releasing any locks held. */ @InterfaceAudience.Private - protected Procedure<TEnvironment>[] doExecute(final TEnvironment env) + protected Procedure[] doExecute(final TEnvironment env) throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { try { updateTimestamp(); @@ -832,7 +775,7 @@ public abstract class Procedure<TEnvironment> implements Comparable<Procedure<TE } @Override - public int compareTo(final Procedure<TEnvironment> other) { + public int compareTo(final Procedure other) { return Long.compare(getProcId(), other.getProcId()); } @@ -858,8 +801,7 @@ public abstract class Procedure<TEnvironment> implements Comparable<Procedure<TE * Helper to lookup the root Procedure ID given a specified procedure. */ @InterfaceAudience.Private - protected static Long getRootProcedureId(final Map<Long, Procedure> procedures, - Procedure<?> proc) { + protected static Long getRootProcedureId(final Map<Long, Procedure> procedures, Procedure proc) { while (proc.hasParent()) { proc = procedures.get(proc.getParentProcId()); if (proc == null) return null; @@ -872,10 +814,10 @@ public abstract class Procedure<TEnvironment> implements Comparable<Procedure<TE * @param b the second procedure to be compared. * @return true if the two procedures have the same parent */ - public static boolean haveSameParent(final Procedure<?> a, final Procedure<?> b) { + public static boolean haveSameParent(final Procedure a, final Procedure b) { if (a.hasParent() && b.hasParent()) { return a.getParentProcId() == b.getParentProcId(); } return false; } -} \ No newline at end of file +} http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureEvent.java ---------------------------------------------------------------------- diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureEvent.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureEvent.java index adb27a8..43cce3a 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureEvent.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureEvent.java @@ -50,6 +50,6 @@ public class ProcedureEvent<T> { @Override public String toString() { return getClass().getSimpleName() + " for " + object + ", ready=" + isReady() + - ", " + getSuspendedProcedures(); + ", suspended procedures count=" + getSuspendedProcedures().size(); } } \ No newline at end of file
