This is an automated email from the ASF dual-hosted git repository.

east pushed a commit to branch cluster
in repository https://gitbox.apache.org/repos/asf/incubator-iotdb.git


The following commit(s) were added to refs/heads/cluster by this push:
     new fb6847a  update StorageGroup to query all storage groups
fb6847a is described below

commit fb6847acb6265f668a492ad1fd23d411d7b2d5bf
Author: mdf369 <[email protected]>
AuthorDate: Mon Jun 3 20:32:36 2019 +0800

    update StorageGroup to query all storage groups
---
 .../iotdb/cluster/service/ClusterMonitor.java      |  6 +++++
 .../iotdb/cluster/service/ClusterMonitorMBean.java |  8 ++++++
 .../iotdb/cluster/service/nodetool/Ring.java       |  1 +
 .../cluster/service/nodetool/StorageGroup.java     | 20 +++++++++++++--
 .../org/apache/iotdb/cluster/utils/RaftUtils.java  |  5 ++++
 .../UserGuideV0.7.0/7-Tools-NodeTool.md            | 30 +++++++++++++++++++---
 6 files changed, 64 insertions(+), 6 deletions(-)

diff --git 
a/cluster/src/main/java/org/apache/iotdb/cluster/service/ClusterMonitor.java 
b/cluster/src/main/java/org/apache/iotdb/cluster/service/ClusterMonitor.java
index c997971..01fe095 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/service/ClusterMonitor.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/service/ClusterMonitor.java
@@ -21,6 +21,7 @@ package org.apache.iotdb.cluster.service;
 import com.alipay.sofa.jraft.entity.PeerId;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.Set;
 import org.apache.iotdb.cluster.utils.RaftUtils;
 import org.apache.iotdb.db.exception.StartupException;
 import org.apache.iotdb.db.service.IService;
@@ -92,6 +93,11 @@ public class ClusterMonitor implements ClusterMonitorMBean, 
IService {
   }
 
   @Override
+  public Set<String> getAllStorageGroupsLocally() {
+    return RaftUtils.getAllStorageGroupsLocally();
+  }
+
+  @Override
   public Map<String[], String[]> getDataPartitonOfNode(String ip) {
     return RaftUtils.getDataPartitionOfNode(ip);
   }
diff --git 
a/cluster/src/main/java/org/apache/iotdb/cluster/service/ClusterMonitorMBean.java
 
b/cluster/src/main/java/org/apache/iotdb/cluster/service/ClusterMonitorMBean.java
index 7144e1c..cca0820 100644
--- 
a/cluster/src/main/java/org/apache/iotdb/cluster/service/ClusterMonitorMBean.java
+++ 
b/cluster/src/main/java/org/apache/iotdb/cluster/service/ClusterMonitorMBean.java
@@ -19,6 +19,7 @@
 package org.apache.iotdb.cluster.service;
 
 import java.util.Map;
+import java.util.Set;
 
 public interface ClusterMonitorMBean {
 
@@ -53,6 +54,13 @@ public interface ClusterMonitorMBean {
   String getDataPartitionOfSG(String sg);
 
   /**
+   * Get all storage groups
+   *
+   * @return Set of all storage groups
+   */
+  Set<String> getAllStorageGroupsLocally();
+
+  /**
    * Get data partitions that input node belongs to.
    *
    * @param ip node ip
diff --git 
a/cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/Ring.java 
b/cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/Ring.java
index 9de9274..8dd96a0 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/Ring.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/Ring.java
@@ -26,6 +26,7 @@ import 
org.apache.iotdb.cluster.service.nodetool.NodeTool.NodeToolCmd;
 
 @Command(name = "ring", description = "Print information about the hash ring")
 public class Ring extends NodeToolCmd {
+
   @Option(title = "physical_ring", name = {"-p", "--physical"}, description = 
"Show physical nodes instead of virtual ones")
   private boolean physical = false;
 
diff --git 
a/cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/StorageGroup.java
 
b/cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/StorageGroup.java
index 727e9eb..e44aa64 100644
--- 
a/cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/StorageGroup.java
+++ 
b/cluster/src/main/java/org/apache/iotdb/cluster/service/nodetool/StorageGroup.java
@@ -20,19 +20,35 @@ package org.apache.iotdb.cluster.service.nodetool;
 
 import io.airlift.airline.Command;
 import io.airlift.airline.Option;
+import java.util.HashSet;
+import java.util.Set;
 import org.apache.iotdb.cluster.service.ClusterMonitorMBean;
 import org.apache.iotdb.cluster.service.nodetool.NodeTool.NodeToolCmd;
 
 @Command(name = "storagegroup", description = "Print all hosts information of 
specific storage group")
 public class StorageGroup extends NodeToolCmd {
 
+  @Option(title = "all storagegroup", name = {"-a", "--all"}, description = 
"Show hosts info of all storage groups")
+  private boolean showAll = false;
+
   @Option(title = "storage group", name = {"-sg",
       "--storagegroup"}, description = "Specify a storage group for accurate 
hosts information")
   private String sg = null;
 
   @Override
   public void execute(ClusterMonitorMBean proxy) {
-    String nodes = proxy.getDataPartitionOfSG(sg);
-    System.out.println(nodes);
+    Set<String> sgSet;
+    if (showAll) {
+      sgSet = proxy.getAllStorageGroupsLocally();
+    } else {
+      sgSet = new HashSet<>();
+      sgSet.add(sg);
+    }
+
+    if (!showAll && sg == null) {
+      System.out.println("Metadata\t->\t" + proxy.getDataPartitionOfSG(sg));
+    } else {
+      sgSet.forEach(sg -> System.out.println(sg + "\t->\t" + 
proxy.getDataPartitionOfSG(sg)));
+    }
   }
 }
diff --git 
a/cluster/src/main/java/org/apache/iotdb/cluster/utils/RaftUtils.java 
b/cluster/src/main/java/org/apache/iotdb/cluster/utils/RaftUtils.java
index 44b8747..1e1908d 100644
--- a/cluster/src/main/java/org/apache/iotdb/cluster/utils/RaftUtils.java
+++ b/cluster/src/main/java/org/apache/iotdb/cluster/utils/RaftUtils.java
@@ -787,4 +787,9 @@ public class RaftUtils {
       return false;
     }
   }
+
+  public static Set<String> getAllStorageGroupsLocally() {
+    MetadataRaftHolder metadataRaftHolder = (MetadataRaftHolder) 
server.getMetadataHolder();
+    return metadataRaftHolder.getFsm().getAllStorageGroups();
+  }
 }
diff --git a/docs/Documentation/UserGuideV0.7.0/7-Tools-NodeTool.md 
b/docs/Documentation/UserGuideV0.7.0/7-Tools-NodeTool.md
index 011271d..48be8f7 100644
--- a/docs/Documentation/UserGuideV0.7.0/7-Tools-NodeTool.md
+++ b/docs/Documentation/UserGuideV0.7.0/7-Tools-NodeTool.md
@@ -108,15 +108,16 @@ The command to query data group information for storage 
group is `storagegroup`,
 
 |Parameter name| Description| Example |
 |:---|:---|:---|
+| -a \| --all | Whether to query all storage groups, default `false` | `-a` |
 | -sg \| --storagegroup <storage group path> | The path of the storage group 
that need to query, metadata group is queried by default | `-sg root.t1.d1` |
 
 #### Output
 
-The output is one string line that represents multiple node IPs, where each IP 
is separated by commas and the first one acts as the leader.
+The output is multiple string lines, each string line represents a key-value 
pair, where the key is storage group and the value is multiple node IPs, where 
each IP is separated by commas and the first one acts as the leader. The format 
of each line is `key -> value`.
 
 #### Example
 
-Assume that the IoTDB Cluster is running on 3 nodes: 192.168.130.14, 
192.168.130.16 and 192.168.130.18, and number of replicas is 2.
+Assume that the IoTDB Cluster is running on 3 nodes: 192.168.130.14, 
192.168.130.16 and 192.168.130.18, and number of replicas is 2, and contains 
three storage groups: {root.t1.d1、root.t1.d2、root.t1.d3}.
 
 * No storage group
 
@@ -131,7 +132,7 @@ Assume that the IoTDB Cluster is running on 3 nodes: 
192.168.130.14, 192.168.130
        After using the command, the successful output will be as follows: 
        
        ```
-       192.168.130.14 (leader), 192.168.130.16, 192.168.130.18
+       Metadata  ->  192.168.130.14 (leader), 192.168.130.16, 192.168.130.18
        ```
        The above output indicates that the current metadata group contains 3 
nodes, among which 192.168.130.14 acts as leader.
        
@@ -150,10 +151,31 @@ Assume that the IoTDB Cluster is running on 3 nodes: 
192.168.130.14, 192.168.130
        After using the command, the successful output will be as follows: 
        
        ```
-       192.168.130.14 (leader), 192.168.130.18
+       root.t1.d1  ->  192.168.130.14 (leader), 192.168.130.18
        ```
        The above output indicates that the data partition which `root.t1.d1` 
belongs to contains 2 nodes, among which 192.168.130.14 acts as leader.
        
+* All storage groups
+
+       The Linux and MacOS system startup commands are as follows:
+       ```
+         Shell > ./bin/nodetool.sh -h 192.168.130.14 storagegroup -a
+       ```
+         
+       The Windows system startup commands are as follows:
+       ```
+         Shell > \bin\nodetool.bat -h 192.168.130.14 storagegroup -a
+       ```
+         
+       After using the command, the successful output will be as follows: 
+       
+       ```
+       root.t1.d1  ->  192.168.130.14 (leader), 192.168.130.18
+       root.t1.d2  ->  192.168.130.16 (leader), 192.168.130.14
+       root.t1.d3  ->  192.168.130.18 (leader), 192.168.130.16
+       ```
+       The above output indicates that current cluster contains three storage 
groups, wherein the data partition which `root.t1.d1` belongs to contains 2 
nodes, among which 192.168.130.14 acts as leader; wherein the data partition 
which `root.t1.d2` belongs to contains 2 nodes, among which 192.168.130.16 acts 
as leader; wherein the data partition which `root.t1.d3` belongs to contains 2 
nodes, among which 192.168.130.18 acts as leader.
+
 ### Query Data Partition Info and Storage Group for Host (host)
 
 The data partition and storage group of the IoTDB Cluster has a one-to-many 
relationship, that is, the same data partition contains multiple storage 
groups. A data partition consists of multiple nodes, the number of nodes is the 
number of replicas, and one of the nodes acts as leader. With this command, 
users are able to know all the data partitions to which the connected node 
belongs, and all the storage groups contained in each data partition.

Reply via email to