This is an automated email from the ASF dual-hosted git repository.
openinx pushed a commit to branch branch-2.1
in repository https://gitbox.apache.org/repos/asf/hbase.git
The following commit(s) were added to refs/heads/branch-2.1 by this push:
new e61b356 HBASE-22903 : Table to RegionStatesCount metrics - Use for
broken alter_status command (#611)
e61b356 is described below
commit e61b356ebc747a4b3f8efd2e42b39ac7d5be583a
Author: Viraj Jasani <[email protected]>
AuthorDate: Mon Sep 30 15:05:22 2019 +0530
HBASE-22903 : Table to RegionStatesCount metrics - Use for broken
alter_status command (#611)
Signed-off-by: huzheng <[email protected]>
---
.../org/apache/hadoop/hbase/ClusterMetrics.java | 15 +-
.../apache/hadoop/hbase/ClusterMetricsBuilder.java | 42 +++++-
.../org/apache/hadoop/hbase/ClusterStatus.java | 7 +
.../hadoop/hbase/client/RegionStatesCount.java | 167 +++++++++++++++++++++
.../hadoop/hbase/shaded/protobuf/ProtobufUtil.java | 48 ++++++
.../src/main/protobuf/ClusterStatus.proto | 15 ++
.../org/apache/hadoop/hbase/master/HMaster.java | 19 +++
.../hbase/master/assignment/AssignmentManager.java | 38 +++++
.../hadoop/hbase/TestClientClusterMetrics.java | 44 ++++++
hbase-shell/src/main/ruby/hbase/admin.rb | 15 +-
hbase-shell/src/test/ruby/hbase/admin_test.rb | 7 +
11 files changed, 407 insertions(+), 10 deletions(-)
diff --git
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetrics.java
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetrics.java
index 103c107..e3052d8 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetrics.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetrics.java
@@ -23,6 +23,7 @@ import edu.umd.cs.findbugs.annotations.Nullable;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionStatesCount;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.yetus.audience.InterfaceAudience;
@@ -151,6 +152,14 @@ public interface ClusterMetrics {
}
/**
+ * Provide region states count for given table.
+ * e.g howmany regions of give table are opened/closed/rit etc
+ *
+ * @return map of table to region states count
+ */
+ Map<TableName, RegionStatesCount> getTableRegionStatesCount();
+
+ /**
* Kinds of ClusterMetrics
*/
enum Option {
@@ -193,6 +202,10 @@ public interface ClusterMetrics {
/**
* metrics info port
*/
- MASTER_INFO_PORT
+ MASTER_INFO_PORT,
+ /**
+ * metrics about table to no of regions status count
+ */
+ TABLE_TO_REGIONS_COUNT
}
}
diff --git
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java
index ab6d353..a651feb 100644
---
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java
+++
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java
@@ -26,6 +26,8 @@ import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.stream.Collectors;
+
+import org.apache.hadoop.hbase.client.RegionStatesCount;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.yetus.audience.InterfaceAudience;
@@ -67,7 +69,14 @@ public final class ClusterMetricsBuilder {
.setRegionState(r.convert())
.build())
.collect(Collectors.toList()))
- .setMasterInfoPort(metrics.getMasterInfoPort());
+ .setMasterInfoPort(metrics.getMasterInfoPort())
+
.addAllTableRegionStatesCount(metrics.getTableRegionStatesCount().entrySet().stream()
+ .map(status ->
+ ClusterStatusProtos.TableRegionStatesCount.newBuilder()
+ .setTableName(ProtobufUtil.toProtoTableName((status.getKey())))
+
.setRegionStatesCount(ProtobufUtil.toTableRegionStatesCount(status.getValue()))
+ .build())
+ .collect(Collectors.toList()));
if (metrics.getMasterName() != null) {
builder.setMaster(ProtobufUtil.toServerName((metrics.getMasterName())));
}
@@ -103,7 +112,12 @@ public final class ClusterMetricsBuilder {
.collect(Collectors.toList()))
.setMasterCoprocessorNames(proto.getMasterCoprocessorsList().stream()
.map(HBaseProtos.Coprocessor::getName)
- .collect(Collectors.toList()));
+ .collect(Collectors.toList()))
+ .setTableRegionStatesCount(
+ proto.getTableRegionStatesCountList().stream()
+ .collect(Collectors.toMap(
+ e -> ProtobufUtil.toTableName(e.getTableName()),
+ e ->
ProtobufUtil.toTableRegionStatesCount(e.getRegionStatesCount()))));
if (proto.hasClusterId()) {
builder.setClusterId(ClusterId.convert(proto.getClusterId()).toString());
}
@@ -143,6 +157,7 @@ public final class ClusterMetricsBuilder {
case BACKUP_MASTERS: return ClusterMetrics.Option.BACKUP_MASTERS;
case BALANCER_ON: return ClusterMetrics.Option.BALANCER_ON;
case MASTER_INFO_PORT: return ClusterMetrics.Option.MASTER_INFO_PORT;
+ case TABLE_TO_REGIONS_COUNT: return
ClusterMetrics.Option.TABLE_TO_REGIONS_COUNT;
// should not reach here
default: throw new IllegalArgumentException("Invalid option: " + option);
}
@@ -165,6 +180,7 @@ public final class ClusterMetricsBuilder {
case BACKUP_MASTERS: return ClusterStatusProtos.Option.BACKUP_MASTERS;
case BALANCER_ON: return ClusterStatusProtos.Option.BALANCER_ON;
case MASTER_INFO_PORT: return
ClusterStatusProtos.Option.MASTER_INFO_PORT;
+ case TABLE_TO_REGIONS_COUNT: return
ClusterStatusProtos.Option.TABLE_TO_REGIONS_COUNT;
// should not reach here
default: throw new IllegalArgumentException("Invalid option: " + option);
}
@@ -206,6 +222,7 @@ public final class ClusterMetricsBuilder {
@Nullable
private Boolean balancerOn;
private int masterInfoPort;
+ private Map<TableName, RegionStatesCount> tableRegionStatesCount =
Collections.emptyMap();
private ClusterMetricsBuilder() {
}
@@ -251,6 +268,13 @@ public final class ClusterMetricsBuilder {
this.masterInfoPort = value;
return this;
}
+
+ public ClusterMetricsBuilder setTableRegionStatesCount(
+ Map<TableName, RegionStatesCount> tableRegionStatesCount) {
+ this.tableRegionStatesCount = tableRegionStatesCount;
+ return this;
+ }
+
public ClusterMetrics build() {
return new ClusterMetricsImpl(
hbaseVersion,
@@ -262,7 +286,9 @@ public final class ClusterMetricsBuilder {
clusterId,
masterCoprocessorNames,
balancerOn,
- masterInfoPort);
+ masterInfoPort,
+ tableRegionStatesCount
+ );
}
private static class ClusterMetricsImpl implements ClusterMetrics {
@Nullable
@@ -279,6 +305,7 @@ public final class ClusterMetricsBuilder {
@Nullable
private final Boolean balancerOn;
private final int masterInfoPort;
+ private final Map<TableName, RegionStatesCount> tableRegionStatesCount;
ClusterMetricsImpl(String hbaseVersion, List<ServerName> deadServerNames,
Map<ServerName, ServerMetrics> liveServerMetrics,
@@ -288,7 +315,8 @@ public final class ClusterMetricsBuilder {
String clusterId,
List<String> masterCoprocessorNames,
Boolean balancerOn,
- int masterInfoPort) {
+ int masterInfoPort,
+ Map<TableName, RegionStatesCount> tableRegionStatesCount) {
this.hbaseVersion = hbaseVersion;
this.deadServerNames = Preconditions.checkNotNull(deadServerNames);
this.liveServerMetrics = Preconditions.checkNotNull(liveServerMetrics);
@@ -299,6 +327,7 @@ public final class ClusterMetricsBuilder {
this.masterCoprocessorNames =
Preconditions.checkNotNull(masterCoprocessorNames);
this.balancerOn = balancerOn;
this.masterInfoPort = masterInfoPort;
+ this.tableRegionStatesCount =
Preconditions.checkNotNull(tableRegionStatesCount);
}
@Override
@@ -352,6 +381,11 @@ public final class ClusterMetricsBuilder {
}
@Override
+ public Map<TableName, RegionStatesCount> getTableRegionStatesCount() {
+ return Collections.unmodifiableMap(tableRegionStatesCount);
+ }
+
+ @Override
public String toString() {
StringBuilder sb = new StringBuilder(1024);
sb.append("Master: " + getMasterName());
diff --git
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
index f13de1e..ae6ab89 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
@@ -26,6 +26,8 @@ import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
+
+import org.apache.hadoop.hbase.client.RegionStatesCount;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.yetus.audience.InterfaceAudience;
@@ -345,6 +347,11 @@ public class ClusterStatus implements ClusterMetrics {
}
@Override
+ public Map<TableName, RegionStatesCount> getTableRegionStatesCount() {
+ return metrics.getTableRegionStatesCount();
+ }
+
+ @Override
public String toString() {
StringBuilder sb = new StringBuilder(1024);
sb.append("Master: " + metrics.getMasterName());
diff --git
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionStatesCount.java
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionStatesCount.java
new file mode 100644
index 0000000..1e1ce95
--- /dev/null
+++
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionStatesCount.java
@@ -0,0 +1,167 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.client;
+
+import org.apache.yetus.audience.InterfaceAudience;
+
[email protected]
+public final class RegionStatesCount {
+
+ private int openRegions;
+ private int splitRegions;
+ private int closedRegions;
+ private int regionsInTransition;
+ private int totalRegions;
+
+ private RegionStatesCount() {
+ }
+
+ public int getClosedRegions() {
+ return closedRegions;
+ }
+
+ public int getOpenRegions() {
+ return openRegions;
+ }
+
+ public int getSplitRegions() {
+ return splitRegions;
+ }
+
+ public int getRegionsInTransition() {
+ return regionsInTransition;
+ }
+
+ public int getTotalRegions() {
+ return totalRegions;
+ }
+
+ private void setClosedRegions(int closedRegions) {
+ this.closedRegions = closedRegions;
+ }
+
+ private void setOpenRegions(int openRegions) {
+ this.openRegions = openRegions;
+ }
+
+ private void setSplitRegions(int splitRegions) {
+ this.splitRegions = splitRegions;
+ }
+
+ private void setRegionsInTransition(int regionsInTransition) {
+ this.regionsInTransition = regionsInTransition;
+ }
+
+ private void setTotalRegions(int totalRegions) {
+ this.totalRegions = totalRegions;
+ }
+
+ public static class RegionStatesCountBuilder {
+ private int openRegions;
+ private int splitRegions;
+ private int closedRegions;
+ private int regionsInTransition;
+ private int totalRegions;
+
+ public RegionStatesCountBuilder setOpenRegions(int openRegions) {
+ this.openRegions = openRegions;
+ return this;
+ }
+
+ public RegionStatesCountBuilder setSplitRegions(int splitRegions) {
+ this.splitRegions = splitRegions;
+ return this;
+ }
+
+ public RegionStatesCountBuilder setClosedRegions(int closedRegions) {
+ this.closedRegions = closedRegions;
+ return this;
+ }
+
+ public RegionStatesCountBuilder setRegionsInTransition(int
regionsInTransition) {
+ this.regionsInTransition = regionsInTransition;
+ return this;
+ }
+
+ public RegionStatesCountBuilder setTotalRegions(int totalRegions) {
+ this.totalRegions = totalRegions;
+ return this;
+ }
+
+ public RegionStatesCount build() {
+ RegionStatesCount regionStatesCount=new RegionStatesCount();
+ regionStatesCount.setOpenRegions(openRegions);
+ regionStatesCount.setClosedRegions(closedRegions);
+ regionStatesCount.setRegionsInTransition(regionsInTransition);
+ regionStatesCount.setSplitRegions(splitRegions);
+ regionStatesCount.setTotalRegions(totalRegions);
+ return regionStatesCount;
+ }
+ }
+
+ @Override
+ public String toString() {
+ final StringBuilder sb = new StringBuilder("RegionStatesCount{");
+ sb.append("openRegions=").append(openRegions);
+ sb.append(", splitRegions=").append(splitRegions);
+ sb.append(", closedRegions=").append(closedRegions);
+ sb.append(", regionsInTransition=").append(regionsInTransition);
+ sb.append(", totalRegions=").append(totalRegions);
+ sb.append('}');
+ return sb.toString();
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+
+ RegionStatesCount that = (RegionStatesCount) o;
+
+ if (openRegions != that.openRegions) {
+ return false;
+ }
+ if (splitRegions != that.splitRegions) {
+ return false;
+ }
+ if (closedRegions != that.closedRegions) {
+ return false;
+ }
+ if (regionsInTransition != that.regionsInTransition) {
+ return false;
+ }
+ return totalRegions == that.totalRegions;
+ }
+
+ @Override
+ public int hashCode() {
+ int result = openRegions;
+ result = 31 * result + splitRegions;
+ result = 31 * result + closedRegions;
+ result = 31 * result + regionsInTransition;
+ result = 31 * result + totalRegions;
+ return result;
+ }
+
+}
diff --git
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index 7179272..3821fc1 100644
---
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -78,6 +78,7 @@ import
org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.RegionLoadStats;
+import org.apache.hadoop.hbase.client.RegionStatesCount;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.SnapshotDescription;
@@ -3229,4 +3230,51 @@ public final class ProtobufUtil {
.setTo(timeRange.getMax())
.build();
}
+
+ public static ClusterStatusProtos.RegionStatesCount toTableRegionStatesCount(
+ RegionStatesCount regionStatesCount) {
+ int openRegions = 0;
+ int splitRegions = 0;
+ int closedRegions = 0;
+ int regionsInTransition = 0;
+ int totalRegions = 0;
+ if (regionStatesCount != null) {
+ openRegions = regionStatesCount.getOpenRegions();
+ splitRegions = regionStatesCount.getSplitRegions();
+ closedRegions = regionStatesCount.getClosedRegions();
+ regionsInTransition = regionStatesCount.getRegionsInTransition();
+ totalRegions = regionStatesCount.getTotalRegions();
+ }
+ return ClusterStatusProtos.RegionStatesCount.newBuilder()
+ .setOpenRegions(openRegions)
+ .setSplitRegions(splitRegions)
+ .setClosedRegions(closedRegions)
+ .setRegionsInTransition(regionsInTransition)
+ .setTotalRegions(totalRegions)
+ .build();
+ }
+
+ public static RegionStatesCount toTableRegionStatesCount(
+ ClusterStatusProtos.RegionStatesCount regionStatesCount) {
+ int openRegions = 0;
+ int splitRegions = 0;
+ int closedRegions = 0;
+ int regionsInTransition = 0;
+ int totalRegions = 0;
+ if (regionStatesCount != null) {
+ closedRegions = regionStatesCount.getClosedRegions();
+ regionsInTransition = regionStatesCount.getRegionsInTransition();
+ openRegions = regionStatesCount.getOpenRegions();
+ splitRegions = regionStatesCount.getSplitRegions();
+ totalRegions = regionStatesCount.getTotalRegions();
+ }
+ return new RegionStatesCount.RegionStatesCountBuilder()
+ .setOpenRegions(openRegions)
+ .setSplitRegions(splitRegions)
+ .setClosedRegions(closedRegions)
+ .setRegionsInTransition(regionsInTransition)
+ .setTotalRegions(totalRegions)
+ .build();
+ }
+
}
diff --git a/hbase-protocol-shaded/src/main/protobuf/ClusterStatus.proto
b/hbase-protocol-shaded/src/main/protobuf/ClusterStatus.proto
index 9fd5f04..b1c7f87 100644
--- a/hbase-protocol-shaded/src/main/protobuf/ClusterStatus.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/ClusterStatus.proto
@@ -214,6 +214,19 @@ message LiveServerInfo {
required ServerLoad server_load = 2;
}
+message RegionStatesCount {
+ required uint32 open_regions = 1;
+ required uint32 split_regions = 2;
+ required uint32 closed_regions = 3;
+ required uint32 regions_in_transition = 4;
+ required uint32 total_regions = 5;
+}
+
+message TableRegionStatesCount {
+ required TableName table_name = 1;
+ required RegionStatesCount region_states_count = 2;
+}
+
message ClusterStatus {
optional HBaseVersionFileContent hbase_version = 1;
repeated LiveServerInfo live_servers = 2;
@@ -225,6 +238,7 @@ message ClusterStatus {
repeated ServerName backup_masters = 8;
optional bool balancer_on = 9;
optional int32 master_info_port = 10 [default = -1];
+ repeated TableRegionStatesCount table_region_states_count = 11;
}
enum Option {
@@ -238,4 +252,5 @@ enum Option {
REGIONS_IN_TRANSITION = 7;
BALANCER_ON = 8;
MASTER_INFO_PORT = 9;
+ TABLE_TO_REGIONS_COUNT = 10;
}
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index c0e362d..19824a8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -85,6 +85,7 @@ import
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.client.RegionStatesCount;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.client.TableState;
@@ -2703,6 +2704,24 @@ public class HMaster extends HRegionServer implements
MasterServices {
}
break;
}
+ case TABLE_TO_REGIONS_COUNT: {
+ if (isActiveMaster() && isInitialized() && assignmentManager !=
null) {
+ try {
+ Map<TableName, RegionStatesCount> tableRegionStatesCountMap =
new HashMap<>();
+ Map<String, TableDescriptor> tableDescriptorMap =
getTableDescriptors().getAll();
+ for (TableDescriptor tableDescriptor :
tableDescriptorMap.values()) {
+ TableName tableName = tableDescriptor.getTableName();
+ RegionStatesCount regionStatesCount = assignmentManager
+ .getRegionStatesCount(tableName);
+ tableRegionStatesCountMap.put(tableName, regionStatesCount);
+ }
+ builder.setTableRegionStatesCount(tableRegionStatesCountMap);
+ } catch (IOException e) {
+ LOG.error("Error while populating TABLE_TO_REGIONS_COUNT for
Cluster Metrics..", e);
+ }
+ }
+ break;
+ }
}
}
return builder.build();
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
index 99fd04d..1842044 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.YouAreDeadException;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.client.RegionStatesCount;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
@@ -1911,4 +1912,41 @@ public class AssignmentManager implements ServerListener
{
}
return rsReportsSnapshot;
}
+
+ /**
+ * Provide regions state count for given table.
+ * e.g howmany regions of give table are opened/closed/rit etc
+ *
+ * @param tableName TableName
+ * @return region states count
+ */
+ public RegionStatesCount getRegionStatesCount(TableName tableName) {
+ int openRegionsCount = 0;
+ int closedRegionCount = 0;
+ int ritCount = 0;
+ int splitRegionCount = 0;
+ int totalRegionCount = 0;
+ if (!isTableDisabled(tableName)) {
+ final List<RegionState> states =
regionStates.getTableRegionStates(tableName);
+ for (RegionState regionState : states) {
+ if (regionState.isOpened()) {
+ openRegionsCount++;
+ } else if (regionState.isClosed()) {
+ closedRegionCount++;
+ } else if (regionState.isSplit()) {
+ splitRegionCount++;
+ }
+ }
+ totalRegionCount = states.size();
+ ritCount = totalRegionCount - openRegionsCount - splitRegionCount;
+ }
+ return new RegionStatesCount.RegionStatesCountBuilder()
+ .setOpenRegions(openRegionsCount)
+ .setClosedRegions(closedRegionCount)
+ .setSplitRegions(splitRegionCount)
+ .setRegionsInTransition(ritCount)
+ .setTotalRegions(totalRegionCount)
+ .build();
+ }
+
}
diff --git
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java
index a2605f2..753bdf7 100644
---
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java
+++
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java
@@ -30,6 +30,9 @@ import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.AsyncAdmin;
import org.apache.hadoop.hbase.client.AsyncConnection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionStatesCount;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
@@ -38,6 +41,7 @@ import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
import org.junit.AfterClass;
@@ -60,6 +64,9 @@ public class TestClientClusterMetrics {
private final static int MASTERS = 3;
private static MiniHBaseCluster CLUSTER;
private static HRegionServer DEAD;
+ private static final TableName TABLE_NAME = TableName.valueOf("test");
+ private static final byte[] CF = Bytes.toBytes("cf");
+
@BeforeClass
public static void setUpBeforeClass() throws Exception {
@@ -119,6 +126,11 @@ public class TestClientClusterMetrics {
Assert.assertEquals(origin.getLiveServerMetrics().size(),
defaults.getLiveServerMetrics().size());
Assert.assertEquals(origin.getMasterInfoPort(),
defaults.getMasterInfoPort());
+ origin.getTableRegionStatesCount().forEach(((tableName,
regionStatesCount) -> {
+ RegionStatesCount defaultRegionStatesCount =
defaults.getTableRegionStatesCount()
+ .get(tableName);
+ Assert.assertEquals(defaultRegionStatesCount, regionStatesCount);
+ }));
}
}
@@ -161,6 +173,38 @@ public class TestClientClusterMetrics {
}
@Test
+ public void testRegionStatesCount() throws Exception {
+ Table table = UTIL.createTable(TABLE_NAME, CF);
+ table.put(new Put(Bytes.toBytes("k1"))
+ .addColumn(CF, Bytes.toBytes("q1"), Bytes.toBytes("v1")));
+ table.put(new Put(Bytes.toBytes("k2"))
+ .addColumn(CF, Bytes.toBytes("q2"), Bytes.toBytes("v2")));
+ table.put(new Put(Bytes.toBytes("k3"))
+ .addColumn(CF, Bytes.toBytes("q3"), Bytes.toBytes("v3")));
+
+ ClusterMetrics metrics = ADMIN.getClusterMetrics();
+ Assert.assertEquals(metrics.getTableRegionStatesCount().size(), 3);
+
Assert.assertEquals(metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME)
+ .getRegionsInTransition(), 0);
+
Assert.assertEquals(metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME)
+ .getOpenRegions(), 1);
+
Assert.assertEquals(metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME)
+ .getTotalRegions(), 1);
+
Assert.assertEquals(metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME)
+ .getClosedRegions(), 0);
+
Assert.assertEquals(metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME)
+ .getSplitRegions(), 0);
+ Assert.assertEquals(metrics.getTableRegionStatesCount().get(TABLE_NAME)
+ .getRegionsInTransition(), 0);
+ Assert.assertEquals(metrics.getTableRegionStatesCount().get(TABLE_NAME)
+ .getOpenRegions(), 1);
+ Assert.assertEquals(metrics.getTableRegionStatesCount().get(TABLE_NAME)
+ .getTotalRegions(), 1);
+
+ UTIL.deleteTable(TABLE_NAME);
+ }
+
+ @Test
public void testMasterAndBackupMastersStatus() throws Exception {
// get all the master threads
List<MasterThread> masterThreads = CLUSTER.getMasterThreads();
diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb
b/hbase-shell/src/main/ruby/hbase/admin.rb
index 386e1cb..140c724 100644
--- a/hbase-shell/src/main/ruby/hbase/admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/admin.rb
@@ -592,16 +592,21 @@ module Hbase
# Table should exist
raise(ArgumentError, "Can't find a table: #{table_name}") unless
exists?(table_name)
- status = Pair.new
begin
- status =
@admin.getAlterStatus(org.apache.hadoop.hbase.TableName.valueOf(table_name))
- if status.getSecond != 0
- puts "#{status.getSecond - status.getFirst}/#{status.getSecond}
regions updated."
+ cluster_metrics = @admin.getClusterMetrics
+ table_region_status = cluster_metrics
+ .getTableRegionStatesCount
+
.get(org.apache.hadoop.hbase.TableName.valueOf(table_name))
+ if table_region_status.getTotalRegions != 0
+ updated_regions = table_region_status.getTotalRegions -
+ table_region_status.getRegionsInTransition -
+ table_region_status.getClosedRegions
+ puts "#{updated_regions}/#{table_region_status.getTotalRegions}
regions updated."
else
puts 'All regions updated.'
end
sleep 1
- end while !status.nil? && status.getFirst != 0
+ end while !table_region_status.nil? &&
table_region_status.getRegionsInTransition != 0
puts 'Done.'
end
diff --git a/hbase-shell/src/test/ruby/hbase/admin_test.rb
b/hbase-shell/src/test/ruby/hbase/admin_test.rb
index a55bb28..c2a350a 100644
--- a/hbase-shell/src/test/ruby/hbase/admin_test.rb
+++ b/hbase-shell/src/test/ruby/hbase/admin_test.rb
@@ -113,6 +113,13 @@ module Hbase
#-------------------------------------------------------------------------------
+ define_test 'alter_status should work' do
+ output = capture_stdout { command(:alter_status, @test_name) }
+ assert(output.include?('1/1 regions updated'))
+ end
+
+
#-------------------------------------------------------------------------------
+
define_test "compact should work" do
command(:compact, 'hbase:meta')
end