This is an automated email from the ASF dual-hosted git repository.

vgalaxies pushed a commit to branch trans-pd
in repository https://gitbox.apache.org/repos/asf/incubator-hugegraph.git

commit 508923dd19feb0aee491c293dec042d2b08354dd
Author: VGalaxies <[email protected]>
AuthorDate: Tue Aug 6 23:57:43 2024 +0800

    fixup
---
 .../org/apache/hugegraph/pd/client/PDClient.java   | 16 ++++++++--------
 .../org/apache/hugegraph/pd/PartitionService.java  | 22 +++++++++++-----------
 .../org/apache/hugegraph/pd/StoreNodeService.java  |  2 +-
 3 files changed, 20 insertions(+), 20 deletions(-)

diff --git 
a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
 
b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
index 17318595f..200a35ee8 100644
--- 
a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
+++ 
b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
@@ -1019,7 +1019,7 @@ public class PDClient {
 
     /**
      * Working mode
-     * Auto:If the number of partitions on each store reaches the maximum 
value, you need to
+     * Auto: If the number of partitions on each store reaches the maximum 
value, you need to
      * specify the store group id. The store group id is 0, which is the 
default partition
      * splitData(ClusterOp.OperationMode mode, int storeGroupId, 
List<ClusterOp.SplitDataParam>
      * params)
@@ -1038,9 +1038,9 @@ public class PDClient {
 
     /**
      * Working mode
-     * Auto:If the number of partitions on each store reaches the maximum 
value, you need to
+     * Auto: If the number of partitions on each store reaches the maximum 
value, you need to
      * specify the store group id. The store group id is 0, which is the 
default partition
-     * Expert:Expert Mode,Specifier is required splitParams, limit 
SplitDataParam in the same
+     * Expert: Expert Mode, Specifier is required splitParams, limit 
SplitDataParam in the same
      * store group
      *
      * @param mode
@@ -1085,11 +1085,11 @@ public class PDClient {
 
     /**
      * Migrate partitions in manual mode
-     * //Working mode
-     * //  Auto:Automatic transfer to the same number of partitions per Store
-     * //  Expert:Expert Mode,Specifier is required transferParams
+     * // Working mode
+     * //  Auto: Automatic transfer to the same number of partitions per Store
+     * //  Expert: Expert Mode, Specifier is required transferParams
      *
-     * @param params Designation transferParams, expert mode,request source 
store / target store
+     * @param params Designation transferParams, expert mode, request source 
store / target store
      *               in the same store group
      * @throws PDException
      */
@@ -1228,7 +1228,7 @@ public class PDClient {
      * Used for the store's shard list rebuild
      *
      * @param groupId shard group id
-     * @param shards  shard list,delete when shards size is 0
+     * @param shards  shard list, delete when shards size is 0
      */
     public void updateShardGroupOp(int groupId, List<Metapb.Shard> shards) 
throws PDException {
         Pdpb.ChangeShardRequest request = Pdpb.ChangeShardRequest.newBuilder()
diff --git 
a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java
 
b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java
index 6ec779d27..9f4dda31f 100644
--- 
a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java
+++ 
b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java
@@ -145,7 +145,7 @@ public class PartitionService implements RaftStateListener {
                                                                        
partition, 0))
                                                                .build();
         log.debug(
-                "{} Partition get code = {}, partition id  = {}, start = {}, 
end = {}, leader = {}",
+                "{} Partition get code = {}, partition id  = {}, start = {}, 
end = {}, leader = {}",
                 graphName, (code), partition.getId(), partition.getStartKey(),
                 partition.getEndKey(), partShard.getLeader());
 
@@ -264,7 +264,7 @@ public class PartitionService implements RaftStateListener {
     }
 
     /**
-     * compute graph partition id。partition gap * store group id + offset
+     * compute graph partition id, partition gap * store group id + offset
      *
      * @param graph  graph
      * @param offset offset
@@ -764,7 +764,7 @@ public class PartitionService implements RaftStateListener {
     }
 
     /**
-     * transfer leader to other shard 。
+     * transfer leader to other shard
      * Just transfer a partition
      */
     public void transferLeader(Integer partId, Metapb.Shard shard) {
@@ -862,7 +862,7 @@ public class PartitionService implements RaftStateListener {
 
         var groupSize = partitions.size() / toCount; // merge group size
         // 0~12 to 4 partitions
-        // scheme:0,1,2 => 0, 3,4,5 -> 1, 6,7,8 ->2, 9,10,11 -> 3
+        // scheme: 0,1,2 => 0, 3,4,5 => 1, 6,7,8 => 2, 9,10,11 => 3
         // Ensure the continuity of partitions
         for (int i = 0; i < toCount; i++) {
             var startKey = partitions.get(i * groupSize).getStartKey();
@@ -1010,7 +1010,7 @@ public class PartitionService implements 
RaftStateListener {
      */
     protected void fireChangeShard(Metapb.Partition partition, 
List<Metapb.Shard> shards,
                                    ConfChangeType changeType) {
-        log.info("fireChangeShard partition: {}-{}, changeType:{} {}", 
partition.getGraphName(),
+        log.info("fireChangeShard partition: {}-{}, changeType: {} {}", 
partition.getGraphName(),
                  partition.getId(), changeType, shards);
         instructionListeners.forEach(cmd -> {
             try {
@@ -1037,7 +1037,7 @@ public class PartitionService implements 
RaftStateListener {
      * @param partition
      */
     protected void fireSplitPartition(Metapb.Partition partition, 
SplitPartition splitPartition) {
-        log.info("fireSplitPartition partition: {}-{}, split :{}",
+        log.info("fireSplitPartition partition: {}-{}, split: {}",
                  partition.getGraphName(), partition.getId(), splitPartition);
         instructionListeners.forEach(cmd -> {
             try {
@@ -1052,7 +1052,7 @@ public class PartitionService implements 
RaftStateListener {
      * Send a Leader Switchover message
      */
     protected void fireTransferLeader(Metapb.Partition partition, 
TransferLeader transferLeader) {
-        log.info("fireTransferLeader partition: {}-{}, leader :{}",
+        log.info("fireTransferLeader partition: {}-{}, leader: {}",
                  partition.getGraphName(), partition.getId(), transferLeader);
         instructionListeners.forEach(cmd -> {
             try {
@@ -1067,7 +1067,7 @@ public class PartitionService implements 
RaftStateListener {
      * Send a message to the partition to move data
      *
      * @param partition     Original partition
-     * @param movePartition Target partition,contains key range
+     * @param movePartition Target partition, contains key range
      */
     protected void fireMovePartition(Metapb.Partition partition, MovePartition 
movePartition) {
         log.info("fireMovePartition partition: {} -> {}",
@@ -1159,8 +1159,8 @@ public class PartitionService implements 
RaftStateListener {
     /**
      * When all migration subtasks succeed:
      * 1. Send cleanup source partition directives
-     * 2. Set up target online,renewal key range, renewal graph partition count
-     * 3. delete move task,mission ended
+     * 2. Set up target online, renewal key range, renewal graph partition 
count
+     * 3. delete move task, mission ended
      *
      * @param subTasks     all move sub tasks
      * @param graphName    graph name
@@ -1238,7 +1238,7 @@ public class PartitionService implements 
RaftStateListener {
             fireCleanPartition(source, cleanPartition);
         }
 
-        // renewal key range, Local updates,client renewal
+        // renewal key range, Local updates, client renewal
         // updatePartition(targetPartitions);
 
         // renewal target Partition status, source may be deleted, so do not 
process
diff --git 
a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java
 
b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java
index 50d810a51..9ca248022 100644
--- 
a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java
+++ 
b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java
@@ -575,7 +575,7 @@ public class StoreNodeService {
     }
 
     /**
-     * According to the number of partitions,distribute group shard
+     * According to the number of partitions, distribute group shard
      *
      * @param groups list of (partition id, count)
      * @return total groups

Reply via email to