http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/SnapshotProtos.java ---------------------------------------------------------------------- diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/SnapshotProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/SnapshotProtos.java index 4d5953c..454e3bc 100644 --- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/SnapshotProtos.java +++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/SnapshotProtos.java @@ -357,7 +357,7 @@ public final class SnapshotProtos { if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { - org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { @@ -373,7 +373,7 @@ public final class SnapshotProtos { getNameBytes() { java.lang.Object ref = name_; if (ref instanceof java.lang.String) { - org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); name_ = b; @@ -407,7 +407,7 @@ public final class SnapshotProtos { if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { - org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { @@ -427,7 +427,7 @@ public final class SnapshotProtos { getTableBytes() { java.lang.Object ref = table_; if (ref instanceof java.lang.String) { - org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); table_ = b; @@ -499,7 +499,7 @@ public final class SnapshotProtos { if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { - org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { @@ -515,7 +515,7 @@ public final class SnapshotProtos { getOwnerBytes() { java.lang.Object ref = owner_; if (ref instanceof java.lang.String) { - org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); owner_ = b; @@ -1047,7 +1047,7 @@ public final class SnapshotProtos { getNameBytes() { java.lang.Object ref = name_; if (ref instanceof String) { - org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); name_ = b; @@ -1135,7 +1135,7 @@ public final class SnapshotProtos { getTableBytes() { java.lang.Object ref = table_; if (ref instanceof String) { - org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); table_ = b; @@ -1323,7 +1323,7 @@ public final class SnapshotProtos { getOwnerBytes() { java.lang.Object ref = owner_; if (ref instanceof String) { - org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); owner_ = b; @@ -1473,7 +1473,7 @@ public final class SnapshotProtos { * <code>optional .hbase.pb.UsersAndPermissions users_and_permissions = 7;</code> */ private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< - org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.UsersAndPermissions, org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.UsersAndPermissions.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.UsersAndPermissionsOrBuilder> + org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.UsersAndPermissions, org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.UsersAndPermissions.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.UsersAndPermissionsOrBuilder> getUsersAndPermissionsFieldBuilder() { if (usersAndPermissionsBuilder_ == null) { usersAndPermissionsBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< @@ -6366,7 +6366,7 @@ public final class SnapshotProtos { private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor internal_static_hbase_pb_SnapshotDescription_descriptor; - private static final + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hbase_pb_SnapshotDescription_fieldAccessorTable; private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-protocol-shaded/src/main/protobuf/Admin.proto ---------------------------------------------------------------------- diff --git a/hbase-protocol-shaded/src/main/protobuf/Admin.proto b/hbase-protocol-shaded/src/main/protobuf/Admin.proto index fe95fd5..39e73b6 100644 --- a/hbase-protocol-shaded/src/main/protobuf/Admin.proto +++ b/hbase-protocol-shaded/src/main/protobuf/Admin.proto @@ -39,10 +39,6 @@ message GetRegionInfoResponse { required RegionInfo region_info = 1; optional CompactionState compaction_state = 2; optional bool isRecovering = 3; - // True if region is splittable, false otherwise. - optional bool splittable = 4; - // True if region is mergeable, false otherwise. - optional bool mergeable = 5; enum CompactionState { NONE = 0; @@ -124,6 +120,18 @@ message CloseRegionResponse { } /** + * Closes the specified region(s) for + * split or merge + */ +message CloseRegionForSplitOrMergeRequest { + repeated RegionSpecifier region = 1; +} + +message CloseRegionForSplitOrMergeResponse { + required bool closed = 1; +} + +/** * Flushes the MemStore of the specified region. * <p> * This method is synchronous. @@ -260,32 +268,6 @@ message ClearCompactionQueuesRequest { message ClearCompactionQueuesResponse { } -message ExecuteProceduresRequest { - repeated OpenRegionRequest open_region = 1; - repeated CloseRegionRequest close_region = 2; -} - -message ExecuteProceduresResponse { - repeated OpenRegionResponse open_region = 1; - repeated CloseRegionResponse close_region = 2; -} - -/** - * Merges the specified regions. - * <p> - * This method currently closes the regions and then merges them - */ -message MergeRegionsRequest { - required RegionSpecifier region_a = 1; - required RegionSpecifier region_b = 2; - optional bool forcible = 3 [default = false]; - // wall clock time from master - optional uint64 master_system_time = 4; -} - -message MergeRegionsResponse { -} - service AdminService { rpc GetRegionInfo(GetRegionInfoRequest) returns(GetRegionInfoResponse); @@ -305,6 +287,9 @@ service AdminService { rpc CloseRegion(CloseRegionRequest) returns(CloseRegionResponse); + rpc CloseRegionForSplitOrMerge(CloseRegionForSplitOrMergeRequest) + returns(CloseRegionForSplitOrMergeResponse); + rpc FlushRegion(FlushRegionRequest) returns(FlushRegionResponse); @@ -344,10 +329,4 @@ service AdminService { /** Fetches the RegionServer's view of space quotas */ rpc GetSpaceQuotaSnapshots(GetSpaceQuotaSnapshotsRequest) returns(GetSpaceQuotaSnapshotsResponse); - - rpc ExecuteProcedures(ExecuteProceduresRequest) - returns(ExecuteProceduresResponse); - - rpc MergeRegions(MergeRegionsRequest) - returns(MergeRegionsResponse); } http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-protocol-shaded/src/main/protobuf/Master.proto ---------------------------------------------------------------------- diff --git a/hbase-protocol-shaded/src/main/protobuf/Master.proto b/hbase-protocol-shaded/src/main/protobuf/Master.proto index 7015fcb..3d6ae1b 100644 --- a/hbase-protocol-shaded/src/main/protobuf/Master.proto +++ b/hbase-protocol-shaded/src/main/protobuf/Master.proto @@ -81,21 +81,6 @@ message MoveRegionRequest { message MoveRegionResponse { } - /** - * Dispatch merging the specified regions. - */ -message DispatchMergingRegionsRequest { - required RegionSpecifier region_a = 1; - required RegionSpecifier region_b = 2; - optional bool forcible = 3 [default = false]; - optional uint64 nonce_group = 4 [default = 0]; - optional uint64 nonce = 5 [default = 0]; -} - -message DispatchMergingRegionsResponse { - optional uint64 proc_id = 1; -} - /** * Merging the specified regions in a table. */ @@ -134,17 +119,6 @@ message OfflineRegionResponse { /* Table-level protobufs */ -message SplitTableRegionRequest { - required RegionInfo region_info = 1; - required bytes split_row = 2; - optional uint64 nonce_group = 3 [default = 0]; - optional uint64 nonce = 4 [default = 0]; -} - -message SplitTableRegionResponse { - optional uint64 proc_id = 1; -} - message CreateTableRequest { required TableSchema table_schema = 1; repeated bytes split_keys = 2; @@ -366,7 +340,6 @@ message RunCatalogScanRequest { } message RunCatalogScanResponse { - // This is how many archiving tasks we started as a result of this scan. optional int32 scan_result = 1; } @@ -667,10 +640,6 @@ service MasterService { rpc ModifyColumn(ModifyColumnRequest) returns(ModifyColumnResponse); - /** Master dispatch merging the regions */ - rpc DispatchMergingRegions(DispatchMergingRegionsRequest) - returns(DispatchMergingRegionsResponse); - /** Move the region region to the destination server. */ rpc MoveRegion(MoveRegionRequest) returns(MoveRegionResponse); @@ -701,12 +670,6 @@ service MasterService { rpc OfflineRegion(OfflineRegionRequest) returns(OfflineRegionResponse); - /** - * Split region - */ - rpc SplitRegion(SplitTableRegionRequest) - returns(SplitTableRegionResponse); - /** Deletes a table */ rpc DeleteTable(DeleteTableRequest) returns(DeleteTableResponse); http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto ---------------------------------------------------------------------- diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto index 14d6d56..2c70882 100644 --- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto +++ b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto @@ -265,31 +265,38 @@ message RestoreSnapshotStateData { repeated RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 7; } -enum DispatchMergingRegionsState { - DISPATCH_MERGING_REGIONS_PREPARE = 1; - DISPATCH_MERGING_REGIONS_PRE_OPERATION = 2; - DISPATCH_MERGING_REGIONS_MOVE_REGION_TO_SAME_RS = 3; - DISPATCH_MERGING_REGIONS_DO_MERGE_IN_RS = 4; - DISPATCH_MERGING_REGIONS_POST_OPERATION = 5; +enum MergeTableRegionsState { + MERGE_TABLE_REGIONS_PREPARE = 1; + MERGE_TABLE_REGIONS_MOVE_REGION_TO_SAME_RS = 2; + MERGE_TABLE_REGIONS_PRE_MERGE_OPERATION = 3; + MERGE_TABLE_REGIONS_SET_MERGING_TABLE_STATE = 4; + MERGE_TABLE_REGIONS_CLOSE_REGIONS = 5; + MERGE_TABLE_REGIONS_CREATE_MERGED_REGION = 6; + MERGE_TABLE_REGIONS_PRE_MERGE_COMMIT_OPERATION = 7; + MERGE_TABLE_REGIONS_UPDATE_META = 8; + MERGE_TABLE_REGIONS_POST_MERGE_COMMIT_OPERATION = 9; + MERGE_TABLE_REGIONS_OPEN_MERGED_REGION = 10; + MERGE_TABLE_REGIONS_POST_OPERATION = 11; } -message DispatchMergingRegionsStateData { +message MergeTableRegionsStateData { required UserInformation user_info = 1; - required TableName table_name = 2; - repeated RegionInfo region_info = 3; - optional bool forcible = 4; + repeated RegionInfo region_info = 2; + required RegionInfo merged_region_info = 3; + optional bool forcible = 4 [default = false]; } enum SplitTableRegionState { SPLIT_TABLE_REGION_PREPARE = 1; SPLIT_TABLE_REGION_PRE_OPERATION = 2; - SPLIT_TABLE_REGION_CLOSE_PARENT_REGION = 3; - SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS = 4; - SPLIT_TABLE_REGION_PRE_OPERATION_BEFORE_PONR = 5; - SPLIT_TABLE_REGION_UPDATE_META = 6; - SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_PONR = 7; - SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS = 8; - SPLIT_TABLE_REGION_POST_OPERATION = 9; + SPLIT_TABLE_REGION_SET_SPLITTING_TABLE_STATE = 3; + SPLIT_TABLE_REGION_CLOSE_PARENT_REGION = 4; + SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS = 5; + SPLIT_TABLE_REGION_PRE_OPERATION_BEFORE_PONR = 6; + SPLIT_TABLE_REGION_UPDATE_META = 7; + SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_PONR = 8; + SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS = 9; + SPLIT_TABLE_REGION_POST_OPERATION = 10; } message SplitTableRegionStateData { @@ -298,29 +305,6 @@ message SplitTableRegionStateData { repeated RegionInfo child_region_info = 3; } -enum MergeTableRegionsState { - MERGE_TABLE_REGIONS_PREPARE = 1; - MERGE_TABLE_REGIONS_PRE_OPERATION = 2; - MERGE_TABLE_REGIONS_MOVE_REGION_TO_SAME_RS = 3; - MERGE_TABLE_REGIONS_PRE_MERGE_OPERATION = 4; - MERGE_TABLE_REGIONS_SET_MERGING_TABLE_STATE = 5; - MERGE_TABLE_REGIONS_CLOSE_REGIONS = 6; - MERGE_TABLE_REGIONS_CREATE_MERGED_REGION = 7; - MERGE_TABLE_REGIONS_PRE_MERGE_COMMIT_OPERATION = 8; - MERGE_TABLE_REGIONS_UPDATE_META = 9; - MERGE_TABLE_REGIONS_POST_MERGE_COMMIT_OPERATION = 10; - MERGE_TABLE_REGIONS_OPEN_MERGED_REGION = 11; - MERGE_TABLE_REGIONS_POST_OPERATION = 12; -} - -message MergeTableRegionsStateData { - required UserInformation user_info = 1; - repeated RegionInfo region_info = 2; - optional RegionInfo merged_region_info = 3; - optional bool forcible = 4 [default = false]; -} - - message ServerCrashStateData { required ServerName server_name = 1; optional bool distributed_log_replay = 2; @@ -342,56 +326,3 @@ enum ServerCrashState { SERVER_CRASH_WAIT_ON_ASSIGN = 9; SERVER_CRASH_FINISH = 100; } - -enum RegionTransitionState { - REGION_TRANSITION_QUEUE = 1; - REGION_TRANSITION_DISPATCH = 2; - REGION_TRANSITION_FINISH = 3; -} - -message AssignRegionStateData { - required RegionTransitionState transition_state = 1; - required RegionInfo region_info = 2; - optional bool force_new_plan = 3 [default = false]; - optional ServerName target_server = 4; -} - -message UnassignRegionStateData { - required RegionTransitionState transition_state = 1; - required RegionInfo region_info = 2; - optional ServerName destination_server = 3; - optional bool force = 4 [default = false]; -} - -enum MoveRegionState { - MOVE_REGION_UNASSIGN = 1; - MOVE_REGION_ASSIGN = 2; -} - -message MoveRegionStateData { - optional RegionInfo region_info = 1; - required ServerName source_server = 2; - required ServerName destination_server = 3; -} - -enum GCRegionState { - GC_REGION_PREPARE = 1; - GC_REGION_ARCHIVE = 2; - GC_REGION_PURGE_METADATA = 3; -} - -message GCRegionStateData { - required RegionInfo region_info = 1; -} - -enum GCMergedRegionsState { - GC_MERGED_REGIONS_PREPARE = 1; - GC_MERGED_REGIONS_PURGE = 2; - GC_REGION_EDIT_METADATA = 3; -} - -message GCMergedRegionsStateData { - required RegionInfo parent_a = 1; - required RegionInfo parent_b = 2; - required RegionInfo merged_child = 3; -} http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto ---------------------------------------------------------------------- diff --git a/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto b/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto index 7d35df0..0b765d6 100644 --- a/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto +++ b/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto @@ -26,6 +26,7 @@ option java_generate_equals_and_hash = true; option optimize_for = SPEED; import "HBase.proto"; +import "Master.proto"; import "ClusterStatus.proto"; message RegionServerStartupRequest { @@ -126,6 +127,19 @@ message ReportRegionStateTransitionResponse { optional string error_message = 1; } +/** + * Splits the specified region. + */ +message SplitTableRegionRequest { + required RegionInfo region_info = 1; + required bytes split_row = 2; + optional uint64 nonce_group = 3 [default = 0]; + optional uint64 nonce = 4 [default = 0]; +} + +message SplitTableRegionResponse { + optional uint64 proc_id = 1; +} message RegionSpaceUse { optional RegionInfo region_info = 1; // A region identifier @@ -174,6 +188,18 @@ service RegionServerStatusService { returns(ReportRegionStateTransitionResponse); /** + * Split region + */ + rpc SplitRegion(SplitTableRegionRequest) + returns(SplitTableRegionResponse); + + /** + * Get procedure result + */ + rpc getProcedureResult(GetProcedureResultRequest) + returns(GetProcedureResultResponse); + + /** * Reports Region filesystem space use */ rpc ReportRegionSpaceUse(RegionSpaceUseReportRequest) http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java ---------------------------------------------------------------------- diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java index 430c8fc..718c7c1 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java @@ -37,9 +37,8 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.constraint.ConstraintException; +import org.apache.hadoop.hbase.master.AssignmentManager; import org.apache.hadoop.hbase.master.HMaster; -import org.apache.hadoop.hbase.master.assignment.AssignmentManager; -import org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode; import org.apache.hadoop.hbase.master.LoadBalancer; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.RegionPlan; @@ -119,14 +118,14 @@ public class RSGroupAdminServer implements RSGroupAdmin { LinkedList<HRegionInfo> regions = new LinkedList<>(); for (Map.Entry<HRegionInfo, ServerName> el : master.getAssignmentManager().getRegionStates().getRegionAssignments().entrySet()) { - if (el.getValue() == null) continue; if (el.getValue().getAddress().equals(server)) { addRegion(regions, el.getKey()); } } - for (RegionStateNode state : master.getAssignmentManager().getRegionsInTransition()) { - if (state.getRegionLocation().getAddress().equals(server)) { - addRegion(regions, state.getRegionInfo()); + for (RegionState state: + this.master.getAssignmentManager().getRegionStates().getRegionsInTransition()) { + if (state.getServerName().getAddress().equals(server)) { + addRegion(regions, state.getRegion()); } } return regions; @@ -535,7 +534,7 @@ public class RSGroupAdminServer implements RSGroupAdmin { LOG.info("RSGroup balance " + groupName + " starting with plan count: " + plans.size()); for (RegionPlan plan: plans) { LOG.info("balance " + plan); - assignmentManager.moveAsync(plan); + assignmentManager.balance(plan); } LOG.info("RSGroup balance " + groupName + " completed after " + (System.currentTimeMillis()-startTime) + " seconds"); http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java ---------------------------------------------------------------------- diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java index e2dd91c..5cdfad2 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java @@ -318,8 +318,7 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer { } private Map<ServerName, List<HRegionInfo>> correctAssignments( - Map<ServerName, List<HRegionInfo>> existingAssignments) - throws HBaseIOException{ + Map<ServerName, List<HRegionInfo>> existingAssignments){ Map<ServerName, List<HRegionInfo>> correctAssignments = new TreeMap<>(); List<HRegionInfo> misplacedRegions = new LinkedList<>(); correctAssignments.put(LoadBalancer.BOGUS_SERVER_NAME, new LinkedList<>()); @@ -347,11 +346,7 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer { //TODO bulk unassign? //unassign misplaced regions, so that they are assigned to correct groups. for(HRegionInfo info: misplacedRegions) { - try { - this.masterServices.getAssignmentManager().unassign(info); - } catch (IOException e) { - throw new HBaseIOException(e); - } + this.masterServices.getAssignmentManager().unassign(info); } return correctAssignments; } http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java ---------------------------------------------------------------------- diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java index 0f1e849..83fe122 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java @@ -32,7 +32,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.rsgroup.RSGroupBasedLoadBalancer; import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; import org.apache.hadoop.hbase.rsgroup.RSGroupInfoManager; -import org.apache.hadoop.hbase.master.assignment.AssignmentManager; +import org.apache.hadoop.hbase.master.AssignmentManager; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.RegionPlan; http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java ---------------------------------------------------------------------- diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java index 264ea39..ae1485c 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java @@ -51,13 +51,11 @@ import org.junit.AfterClass; import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; -import org.junit.Ignore; import org.junit.Test; import org.junit.experimental.categories.Category; import com.google.common.collect.Sets; -@Ignore // TODO: Fix after HBASE-14614 goes in. @Category({MediumTests.class}) public class TestRSGroups extends TestRSGroupsBase { protected static final Log LOG = LogFactory.getLog(TestRSGroups.class); @@ -149,7 +147,7 @@ public class TestRSGroups extends TestRSGroupsBase { }); } - @Ignore @Test + @Test public void testBasicStartUp() throws IOException { RSGroupInfo defaultInfo = rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP); assertEquals(4, defaultInfo.getServers().size()); @@ -159,7 +157,7 @@ public class TestRSGroups extends TestRSGroupsBase { assertEquals(3, count); } - @Ignore @Test + @Test public void testNamespaceCreateAndAssign() throws Exception { LOG.info("testNamespaceCreateAndAssign"); String nsName = tablePrefix+"_foo"; @@ -185,7 +183,7 @@ public class TestRSGroups extends TestRSGroupsBase { Assert.assertEquals(1, ProtobufUtil.getOnlineRegions(rs).size()); } - @Ignore @Test + @Test public void testDefaultNamespaceCreateAndAssign() throws Exception { LOG.info("testDefaultNamespaceCreateAndAssign"); final byte[] tableName = Bytes.toBytes(tablePrefix + "_testCreateAndAssign"); @@ -203,7 +201,7 @@ public class TestRSGroups extends TestRSGroupsBase { }); } - @Ignore @Test + @Test public void testNamespaceConstraint() throws Exception { String nsName = tablePrefix+"_foo"; String groupName = tablePrefix+"_foo"; @@ -238,7 +236,7 @@ public class TestRSGroups extends TestRSGroupsBase { } } - @Ignore @Test + @Test public void testGroupInfoMultiAccessing() throws Exception { RSGroupInfoManager manager = rsGroupAdminEndpoint.getGroupInfoManager(); RSGroupInfo defaultGroup = manager.getRSGroup("default"); @@ -249,7 +247,7 @@ public class TestRSGroups extends TestRSGroupsBase { it.next(); } - @Ignore @Test + @Test public void testMisplacedRegions() throws Exception { final TableName tableName = TableName.valueOf(tablePrefix+"_testMisplacedRegions"); LOG.info("testMisplacedRegions"); @@ -277,7 +275,7 @@ public class TestRSGroups extends TestRSGroupsBase { }); } - @Ignore @Test + @Test public void testCloneSnapshot() throws Exception { byte[] FAMILY = Bytes.toBytes("test"); String snapshotName = tableName.getNameAsString() + "_snap"; http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java ---------------------------------------------------------------------- diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java index 8b200ab..4802ca4 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java @@ -37,7 +37,6 @@ import org.apache.hadoop.hbase.util.Bytes; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; -import org.junit.Ignore; import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -99,7 +98,7 @@ public class TestRSGroupsOfflineMode { TEST_UTIL.shutdownMiniCluster(); } - @Ignore @Test + @Test public void testOffline() throws Exception, InterruptedException { // Table should be after group table name so it gets assigned later. final TableName failoverTable = TableName.valueOf(name.getMethodName()); http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon index b5e6dd0..76a85a9 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon @@ -18,9 +18,7 @@ limitations under the License. </%doc> <%import> org.apache.hadoop.hbase.HRegionInfo; -org.apache.hadoop.hbase.master.assignment.AssignmentManager; -org.apache.hadoop.hbase.master.assignment.AssignmentManager.RegionInTransitionStat; -org.apache.hadoop.hbase.master.assignment.RegionStates.RegionFailedOpen; +org.apache.hadoop.hbase.master.AssignmentManager; org.apache.hadoop.hbase.master.RegionState; org.apache.hadoop.conf.Configuration; org.apache.hadoop.hbase.HBaseConfiguration; @@ -37,12 +35,28 @@ int limit = 100; <%java SortedSet<RegionState> rit = assignmentManager .getRegionStates().getRegionsInTransitionOrderedByTimestamp(); -%> + Map<String, AtomicInteger> failedRegionTracker = assignmentManager.getFailedOpenTracker(); + %> <%if !rit.isEmpty() %> <%java> +HashSet<String> ritsOverThreshold = new HashSet<String>(); +HashSet<String> ritsTwiceThreshold = new HashSet<String>(); +// process the map to find region in transition details +Configuration conf = HBaseConfiguration.create(); +int ritThreshold = conf.getInt(HConstants.METRICS_RIT_STUCK_WARNING_THRESHOLD, 60000); +int numOfRITOverThreshold = 0; long currentTime = System.currentTimeMillis(); -RegionInTransitionStat ritStat = assignmentManager.computeRegionInTransitionStat(); +for (RegionState rs : rit) { + long ritTime = currentTime - rs.getStamp(); + if(ritTime > (ritThreshold * 2)) { + numOfRITOverThreshold++; + ritsTwiceThreshold.add(rs.getRegion().getEncodedName()); + } else if (ritTime > ritThreshold) { + numOfRITOverThreshold++; + ritsOverThreshold.add(rs.getRegion().getEncodedName()); + } +} int numOfRITs = rit.size(); int ritsPerPage = Math.min(5, numOfRITs); @@ -51,15 +65,15 @@ int numOfPages = (int) Math.ceil(numOfRITs * 1.0 / ritsPerPage); <section> <h2>Regions in Transition</h2> <p><% numOfRITs %> region(s) in transition. - <%if ritStat.hasRegionsTwiceOverThreshold() %> + <%if !ritsTwiceThreshold.isEmpty() %> <span class="label label-danger" style="font-size:100%;font-weight:normal"> - <%elseif ritStat.hasRegionsOverThreshold() %> + <%elseif !ritsOverThreshold.isEmpty() %> <span class="label label-warning" style="font-size:100%;font-weight:normal"> <%else> <span> </%if> - <% ritStat.getTotalRITsOverThreshold() %> region(s) in transition for - more than <% ritStat.getRITThreshold() %> milliseconds. + <% numOfRITOverThreshold %> region(s) in transition for + more than <% ritThreshold %> milliseconds. </span> </p> <div class="tabbable"> @@ -76,26 +90,25 @@ int numOfPages = (int) Math.ceil(numOfRITs * 1.0 / ritsPerPage); <th>State</th><th>RIT time (ms)</th> <th>Retries </th></tr> </%if> - <%if ritStat.isRegionTwiceOverThreshold(rs.getRegion()) %> - <tr class="alert alert-danger" role="alert"> - <%elseif ritStat.isRegionOverThreshold(rs.getRegion()) %> + <%if ritsOverThreshold.contains(rs.getRegion().getEncodedName()) %> <tr class="alert alert-warning" role="alert"> + <%elseif ritsTwiceThreshold.contains(rs.getRegion().getEncodedName()) %> + <tr class="alert alert-danger" role="alert"> <%else> <tr> </%if> <%java> String retryStatus = "0"; - RegionFailedOpen regionFailedOpen = assignmentManager - .getRegionStates().getFailedOpen(rs.getRegion()); - if (regionFailedOpen != null) { - retryStatus = Integer.toString(regionFailedOpen.getRetries()); + AtomicInteger numOpenRetries = failedRegionTracker.get( + rs.getRegion().getEncodedName()); + if (numOpenRetries != null ) { + retryStatus = Integer.toString(numOpenRetries.get()); } else if (rs.getState() == RegionState.State.FAILED_OPEN) { - retryStatus = "Failed"; + retryStatus = "Failed"; } </%java> <td><% rs.getRegion().getEncodedName() %></td><td> - <% HRegionInfo.getDescriptiveNameFromRegionStateForDisplay(rs, - assignmentManager.getConfiguration()) %></td> + <% HRegionInfo.getDescriptiveNameFromRegionStateForDisplay(rs, conf) %></td> <td><% (currentTime - rs.getStamp()) %> </td> <td> <% retryStatus %> </td> </tr> http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon index 14dfe0a..e1a47c5 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon @@ -41,7 +41,7 @@ org.apache.hadoop.hbase.TableName; org.apache.hadoop.hbase.client.Admin; org.apache.hadoop.hbase.client.MasterSwitchType; org.apache.hadoop.hbase.client.SnapshotDescription; -org.apache.hadoop.hbase.master.assignment.AssignmentManager; +org.apache.hadoop.hbase.master.AssignmentManager; org.apache.hadoop.hbase.master.DeadServer; org.apache.hadoop.hbase.master.HMaster; org.apache.hadoop.hbase.master.RegionState; http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionStateListener.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionStateListener.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionStateListener.java index 011ed1c..22725ec 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionStateListener.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionStateListener.java @@ -26,8 +26,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; */ @InterfaceAudience.Private public interface RegionStateListener { -// TODO: Get rid of this!!!! Ain't there a better way to watch region -// state than introduce a whole new listening mechanism? St.Ack + /** * Process region split event. * @@ -46,7 +45,9 @@ public interface RegionStateListener { /** * Process region merge event. + * + * @param hri An instance of HRegionInfo * @throws IOException */ - void onRegionMerged(HRegionInfo mergedRegion) throws IOException; + void onRegionMerged(HRegionInfo hri) throws IOException; } http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java index 3fef686..3ecaa86 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java @@ -46,10 +46,6 @@ public class SplitLogTask { } public static class Owned extends SplitLogTask { - public Owned(final ServerName originServer) { - this(originServer, ZooKeeperProtos.SplitLogTask.RecoveryMode.LOG_SPLITTING); - } - public Owned(final ServerName originServer, final RecoveryMode mode) { super(originServer, ZooKeeperProtos.SplitLogTask.State.OWNED, mode); } http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java index b9f52b8..ecd4401 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java @@ -32,6 +32,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.HRegion; @@ -73,16 +74,6 @@ public class HFileArchiver { } /** - * @return True if the Region exits in the filesystem. - */ - public static boolean exists(Configuration conf, FileSystem fs, HRegionInfo info) - throws IOException { - Path rootDir = FSUtils.getRootDir(conf); - Path regionDir = HRegion.getRegionDir(rootDir, info); - return fs.exists(regionDir); - } - - /** * Cleans up all the files for a HRegion by archiving the HFiles to the * archive directory * @param conf the configuration to use @@ -146,7 +137,7 @@ public class HFileArchiver { FileStatus[] storeDirs = FSUtils.listStatus(fs, regionDir, nonHidden); // if there no files, we can just delete the directory and return; if (storeDirs == null) { - LOG.debug("Region directory " + regionDir + " empty."); + LOG.debug("Region directory (" + regionDir + ") was empty, just deleting and returning!"); return deleteRegionWithoutArchiving(fs, regionDir); } @@ -463,7 +454,7 @@ public class HFileArchiver { private static boolean deleteRegionWithoutArchiving(FileSystem fs, Path regionDir) throws IOException { if (fs.delete(regionDir, true)) { - LOG.debug("Deleted " + regionDir); + LOG.debug("Deleted all region files in: " + regionDir); return true; } LOG.debug("Failed to delete region directory:" + regionDir); http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java index 4f134c0..ed1ae31 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java @@ -35,7 +35,9 @@ public final class VersionInfoUtil { } public static boolean currentClientHasMinimumVersion(int major, int minor) { - return hasMinimumVersion(getCurrentClientVersionInfo(), major, minor); + RpcCallContext call = RpcServer.getCurrentCall(); + HBaseProtos.VersionInfo versionInfo = call != null ? call.getClientVersionInfo() : null; + return hasMinimumVersion(versionInfo, major, minor); } public static boolean hasMinimumVersion(HBaseProtos.VersionInfo versionInfo, @@ -51,7 +53,7 @@ public final class VersionInfoUtil { return clientMinor >= minor; } try { - final String[] components = getVersionComponents(versionInfo); + String[] components = versionInfo.getVersion().split("\\."); int clientMajor = components.length > 0 ? Integer.parseInt(components[0]) : 0; if (clientMajor != major) { @@ -66,79 +68,4 @@ public final class VersionInfoUtil { } return false; } - - /** - * @return the versionInfo extracted from the current RpcCallContext - */ - private static HBaseProtos.VersionInfo getCurrentClientVersionInfo() { - RpcCallContext call = RpcServer.getCurrentCall(); - return call != null ? call.getClientVersionInfo() : null; - } - - /** - * @return the version number extracted from the current RpcCallContext as int. - * (e.g. 0x0103004 is 1.3.4) - */ - public static int getCurrentClientVersionNumber() { - return getVersionNumber(getCurrentClientVersionInfo()); - } - - - /** - * @param version - * @return the passed-in <code>version</code> int as a version String - * (e.g. 0x0103004 is 1.3.4) - */ - public static String versionNumberToString(final int version) { - return String.format("%d.%d.%d", - ((version >> 20) & 0xff), - ((version >> 12) & 0xff), - (version & 0xfff)); - } - - /** - * Pack the full number version in a int. by shifting each component by 8bit, - * except the dot release which has 12bit. - * Examples: (1.3.4 is 0x0103004, 2.1.0 is 0x0201000) - * @param versionInfo the VersionInfo object to pack - * @return the version number as int. (e.g. 0x0103004 is 1.3.4) - */ - private static int getVersionNumber(final HBaseProtos.VersionInfo versionInfo) { - if (versionInfo != null) { - try { - final String[] components = getVersionComponents(versionInfo); - int clientMajor = components.length > 0 ? Integer.parseInt(components[0]) : 0; - int clientMinor = components.length > 1 ? Integer.parseInt(components[1]) : 0; - int clientPatch = components.length > 2 ? Integer.parseInt(components[2]) : 0; - return buildVersionNumber(clientMajor, clientMinor, clientPatch); - } catch (NumberFormatException e) { - int clientMajor = versionInfo.hasVersionMajor() ? versionInfo.getVersionMajor() : 0; - int clientMinor = versionInfo.hasVersionMinor() ? versionInfo.getVersionMinor() : 0; - return buildVersionNumber(clientMajor, clientMinor, 0); - } - } - return(0); // no version - } - - /** - * Pack the full number version in a int. by shifting each component by 8bit, - * except the dot release which has 12bit. - * Examples: (1.3.4 is 0x0103004, 2.1.0 is 0x0201000) - * @param major version major number - * @param minor version minor number - * @param patch version patch number - * @return the version number as int. (e.g. 0x0103004 is 1.3.4) - */ - private static int buildVersionNumber(int major, int minor, int patch) { - return (major << 20) | (minor << 12) | patch; - } - - /** - * Returns the version components - * Examples: "1.2.3" returns [1, 2, 3], "4.5.6-SNAPSHOT" returns [4, 5, 6, "SNAPSHOT"] - * @returns the components of the version string - */ - private static String[] getVersionComponents(final HBaseProtos.VersionInfo versionInfo) { - return versionInfo.getVersion().split("[\\.-]"); - } } http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java index ca68de2..e36feea 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java @@ -448,8 +448,8 @@ public interface RegionObserver extends Coprocessor { * Called before the region is split. * @param c the environment provided by the region server * (e.getRegion() returns the parent region) - * @deprecated No longer called in hbase2/AMv2 given the master runs splits now; - * @see MasterObserver + * @deprecated Use preSplit( + * final ObserverContext<RegionCoprocessorEnvironment> c, byte[] splitRow) */ @Deprecated default void preSplit(final ObserverContext<RegionCoprocessorEnvironment> c) throws IOException {} @@ -460,8 +460,6 @@ public interface RegionObserver extends Coprocessor { * (e.getRegion() returns the parent region) * * Note: the logic moves to Master; it is unused in RS - * @deprecated No longer called in hbase2/AMv2 given the master runs splits now; - * @see MasterObserver */ @Deprecated default void preSplit(final ObserverContext<RegionCoprocessorEnvironment> c, byte[] splitRow) @@ -473,8 +471,7 @@ public interface RegionObserver extends Coprocessor { * (e.getRegion() returns the parent region) * @param l the left daughter region * @param r the right daughter region - * @deprecated No longer called in hbase2/AMv2 given the master runs splits now; - * @see MasterObserver + * @deprecated Use postCompleteSplit() instead */ @Deprecated default void postSplit(final ObserverContext<RegionCoprocessorEnvironment> c, final Region l, @@ -488,8 +485,6 @@ public interface RegionObserver extends Coprocessor { * @param metaEntries * * Note: the logic moves to Master; it is unused in RS - * @deprecated No longer called in hbase2/AMv2 given the master runs splits now; - * @see MasterObserver */ @Deprecated default void preSplitBeforePONR(final ObserverContext<RegionCoprocessorEnvironment> ctx, @@ -500,9 +495,8 @@ public interface RegionObserver extends Coprocessor { * Calling {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no * effect in this hook. * @param ctx + * * Note: the logic moves to Master; it is unused in RS - * @deprecated No longer called in hbase2/AMv2 given the master runs splits now; - * @see MasterObserver */ @Deprecated default void preSplitAfterPONR(final ObserverContext<RegionCoprocessorEnvironment> ctx) @@ -513,8 +507,6 @@ public interface RegionObserver extends Coprocessor { * @param ctx * * Note: the logic moves to Master; it is unused in RS - * @deprecated No longer called in hbase2/AMv2 given the master runs splits now; - * @see MasterObserver */ @Deprecated default void preRollBackSplit(final ObserverContext<RegionCoprocessorEnvironment> ctx) @@ -525,8 +517,6 @@ public interface RegionObserver extends Coprocessor { * @param ctx * * Note: the logic moves to Master; it is unused in RS - * @deprecated No longer called in hbase2/AMv2 given the master runs splits now; - * @see MasterObserver */ @Deprecated default void postRollBackSplit(final ObserverContext<RegionCoprocessorEnvironment> ctx) @@ -536,11 +526,7 @@ public interface RegionObserver extends Coprocessor { * Called after any split request is processed. This will be called irrespective of success or * failure of the split. * @param ctx - * @deprecated No longer called in hbase2/AMv2 given the master runs splits now; - * implement {@link MasterObserver#postCompletedSplitRegionAction(ObserverContext, HRegionInfo, HRegionInfo)} - * instead. */ - @Deprecated default void postCompleteSplit(final ObserverContext<RegionCoprocessorEnvironment> ctx) throws IOException {} /** http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java index 578fb0f..f476b11 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java @@ -135,14 +135,7 @@ public class CallRunner { RpcServer.LOG.warn("Can not complete this request in time, drop it: " + call); return; } catch (Throwable e) { - if (e instanceof ServerNotRunningYetException) { - // If ServerNotRunningYetException, don't spew stack trace. - if (RpcServer.LOG.isTraceEnabled()) { - RpcServer.LOG.trace(call.toShortString(), e); - } - } else { - RpcServer.LOG.debug(call.toShortString(), e); - } + RpcServer.LOG.debug(Thread.currentThread().getName() + ": " + call.toShortString(), e); errorThrowable = e; error = StringUtils.stringifyException(e); if (e instanceof Error) { http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcExecutor.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcExecutor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcExecutor.java index 313535d..3cb6011 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcExecutor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcExecutor.java @@ -142,7 +142,7 @@ public abstract class RpcExecutor { queueClass = LinkedBlockingQueue.class; } - LOG.info("RpcExecutor " + name + " using " + callQueueType + LOG.info("RpcExecutor " + " name " + " using " + callQueueType + " as call queue; numCallQueues=" + numCallQueues + "; maxQueueLength=" + maxQueueLength + "; handlerCount=" + handlerCount); } @@ -205,8 +205,6 @@ public abstract class RpcExecutor { double handlerFailureThreshhold = conf == null ? 1.0 : conf.getDouble( HConstants.REGION_SERVER_HANDLER_ABORT_ON_ERROR_PERCENT, HConstants.DEFAULT_REGION_SERVER_HANDLER_ABORT_ON_ERROR_PERCENT); - LOG.debug("Started " + handlers.size() + " " + threadPrefix + - " handlers, qsize=" + qsize + " on port=" + port); for (int i = 0; i < numHandlers; i++) { final int index = qindex + (i % qsize); String name = "RpcServer." + threadPrefix + ".handler=" + handlers.size() + ",queue=" + index @@ -214,6 +212,7 @@ public abstract class RpcExecutor { Handler handler = getHandler(name, handlerFailureThreshhold, callQueues.get(index), activeHandlerCount); handler.start(); + LOG.debug("Started " + name); handlers.add(handler); } } http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java index 040209b..481b701 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java @@ -130,7 +130,7 @@ public class SimpleRpcServer extends RpcServer { // has an advantage in that it is easy to shutdown the pool. readPool = Executors.newFixedThreadPool(readThreads, new ThreadFactoryBuilder().setNameFormat( - "Reader=%d,bindAddress=" + bindAddress.getHostName() + + "RpcServer.reader=%d,bindAddress=" + bindAddress.getHostName() + ",port=" + port).setDaemon(true) .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); for (int i = 0; i < readThreads; ++i) { @@ -142,7 +142,7 @@ public class SimpleRpcServer extends RpcServer { // Register accepts on the server socket with the selector. acceptChannel.register(selector, SelectionKey.OP_ACCEPT); - this.setName("Listener,port=" + port); + this.setName("RpcServer.listener,port=" + port); this.setDaemon(true); } @@ -331,7 +331,7 @@ public class SimpleRpcServer extends RpcServer { throw ieo; } catch (Exception e) { if (LOG.isDebugEnabled()) { - LOG.debug("Caught exception while reading:", e); + LOG.debug(getName() + ": Caught exception while reading:", e); } count = -1; //so that the (count < 0) block is executed } @@ -608,8 +608,8 @@ public class SimpleRpcServer extends RpcServer { SimpleServerRpcConnection register(SocketChannel channel) { SimpleServerRpcConnection connection = getConnection(channel, System.currentTimeMillis()); add(connection); - if (LOG.isTraceEnabled()) { - LOG.trace("Connection from " + connection + + if (LOG.isDebugEnabled()) { + LOG.debug("Server connection from " + connection + "; connections=" + size() + ", queued calls size (bytes)=" + callQueueSizeInBytes.sum() + ", general queued calls=" + scheduler.getGeneralQueueLength() + @@ -621,8 +621,8 @@ public class SimpleRpcServer extends RpcServer { boolean close(SimpleServerRpcConnection connection) { boolean exists = remove(connection); if (exists) { - if (LOG.isTraceEnabled()) { - LOG.trace(Thread.currentThread().getName() + + if (LOG.isDebugEnabled()) { + LOG.debug(Thread.currentThread().getName() + ": disconnecting client " + connection + ". Number of active connections: "+ size()); } @@ -698,4 +698,4 @@ public class SimpleRpcServer extends RpcServer { } } -} +} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignCallable.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignCallable.java new file mode 100644 index 0000000..4513a5d --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignCallable.java @@ -0,0 +1,49 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master; + +import java.util.concurrent.Callable; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.HRegionInfo; + +/** + * A callable object that invokes the corresponding action that needs to be + * taken for assignment of a region in transition. + * Implementing as future callable we are able to act on the timeout + * asynchronously. + */ [email protected] +public class AssignCallable implements Callable<Object> { + private AssignmentManager assignmentManager; + + private HRegionInfo hri; + + public AssignCallable( + AssignmentManager assignmentManager, HRegionInfo hri) { + this.assignmentManager = assignmentManager; + this.hri = hri; + } + + @Override + public Object call() throws Exception { + assignmentManager.assign(hri); + return null; + } +}
