This is an automated email from the ASF dual-hosted git repository.

vgalaxies pushed a commit to branch trans-pd
in repository https://gitbox.apache.org/repos/asf/incubator-hugegraph.git

commit 82b5e0b9bf5c9ed0e9f99142ececea47695041dc
Author: VGalaxies <[email protected]>
AuthorDate: Sun May 12 12:40:41 2024 +0800

    translate pd grpc
---
 .../hg-pd-grpc/src/main/proto/metaTask.proto       |  10 +-
 .../hg-pd-grpc/src/main/proto/metapb.proto         | 124 ++++++++++-----------
 .../hg-pd-grpc/src/main/proto/pd_common.proto      |   2 -
 .../hg-pd-grpc/src/main/proto/pd_pulse.proto       |  30 ++---
 .../hg-pd-grpc/src/main/proto/pd_watch.proto       |   1 -
 hugegraph-pd/hg-pd-grpc/src/main/proto/pdpb.proto  | 123 ++++++++++----------
 6 files changed, 140 insertions(+), 150 deletions(-)

diff --git a/hugegraph-pd/hg-pd-grpc/src/main/proto/metaTask.proto 
b/hugegraph-pd/hg-pd-grpc/src/main/proto/metaTask.proto
index c4bb8bde1..65ab26a68 100644
--- a/hugegraph-pd/hg-pd-grpc/src/main/proto/metaTask.proto
+++ b/hugegraph-pd/hg-pd-grpc/src/main/proto/metaTask.proto
@@ -30,7 +30,6 @@ enum TaskType {
   Change_KeyRange = 5;
 }
 
-// 一条任务信息
 message Task {
   uint64 id = 1;
   TaskType type = 2;
@@ -38,7 +37,6 @@ message Task {
   int64 start_timestamp = 4;
   metapb.Partition partition = 5;
   string message = 6;
-  //每个shard执行的任务状态
   repeated ShardTaskState shardState = 7;
   ChangeShard changeShard = 9;
   SplitPartition splitPartition = 10;
@@ -49,10 +47,10 @@ message Task {
 
 enum TaskState{
   Task_Unknown = 0;
-  Task_Ready = 1;   //任务就绪
-  Task_Doing = 2;   //执行中
-  Task_Done = 3;    //完成
-  Task_Exit = 4;    //退出
+  Task_Ready = 1;
+  Task_Doing = 2;
+  Task_Done = 3;
+  Task_Exit = 4;
   Task_Stop = 10;
   Task_Success = 11;
   Task_Failure = 12;
diff --git a/hugegraph-pd/hg-pd-grpc/src/main/proto/metapb.proto 
b/hugegraph-pd/hg-pd-grpc/src/main/proto/metapb.proto
index a8a695be0..2d361de66 100644
--- a/hugegraph-pd/hg-pd-grpc/src/main/proto/metapb.proto
+++ b/hugegraph-pd/hg-pd-grpc/src/main/proto/metapb.proto
@@ -21,17 +21,17 @@ option java_package = "org.apache.hugegraph.pd.grpc";
 import "google/protobuf/any.proto";
 
 enum ClusterState{
-  // 集群健康
+  // Cluster health
   Cluster_OK = 0;
-  // 分区警告,存在部分故障节点,短时间不影响读写
+  // Partition warning: There are some faulty nodes, which do not affect 
read/write for a short time
   Cluster_Warn = 2;
-  // 分区下线,可以读,无法写
+  // The partition is offline, which can be read but cannot be written
   Cluster_Offline = 10;
-  // 分区故障,无法读写,需要尽快修复故障节点。
+  // If the partition is faulty and cannot be read or written, you need to 
repair the faulty node as soon as possible.
   Cluster_Fault = 11;
   Cluster_Not_Ready = -1;
 }
-// 集群状态
+// Cluster status
 message ClusterStats{
   ClusterState state = 1;
   string message = 2;
@@ -40,15 +40,13 @@ message ClusterStats{
 
 enum StoreState {
   Unknown = 0;
-  // 未激活
+  // Not activated
   Pending = 4;
-  // 在线
+  // online
   Up = 1;
-  // 离线
+  // Offline
   Offline = 2;
-  // 下线中
   Exiting = 5;
-  // 已下线
   Tombstone = 3;
 }
 
@@ -64,7 +62,7 @@ message Store {
   string address = 2;
   string raft_address = 3;
   repeated StoreLabel labels = 4;
-  // Store软件版本号
+  // Store Software version number
   string version = 5;
   StoreState state = 6;
   // The start timestamp of the current store
@@ -73,7 +71,7 @@ message Store {
   // The last heartbeat timestamp of the store.
   int64 last_heartbeat = 9;
   StoreStats stats = 10;
-  // 数据格式版本号
+  // The version number of the data format
   int32 data_version = 11;
   int32 cores = 12;
   string data_path = 13;
@@ -103,38 +101,37 @@ message ShardGroup{
 
 message Graph {
   string graph_name = 2;
-  // 分区数量,0表示无效,不能大于raft分组总数
+  // The number of partitions, 0 indicates invalid and cannot be greater than 
the total number of raft packets
   int32 partition_count = 3;
-  // 当前工作状态
   PartitionState state = 10;
   string message = 11;
   GraphState graph_state = 12;
 }
-// 分区工作状态
+// Partition working status
 enum PartitionState{
   PState_None = 0;
   //
   PState_Normal = 1;
-  // 分区警告,存在部分故障节点,短时间不影响读写
+  // Partition warning: There are some faulty nodes, which do not affect 
read/write for a short time
   PState_Warn = 2;
-  // 分区下线,可以读,无法写
+  // The partition is offline, which can be read but cannot be written
   PState_Offline = 10;
-  // 分区故障,无法读写,需要尽快修复故障节点。
+  // If the partition is faulty and cannot be read or written, you need to 
repair the faulty node as soon as possible.
   PState_Fault = 11;
 }
 
 message PartitionV36 {
   uint32 id = 1;
   string graph_name = 3;
-  // 分区范围 [start_key, end_key).
+  // Partition range [start_key, end_key].
   uint64 start_key = 4;
   uint64 end_key = 5;
   repeated Shard shards = 6;
-  // Leader任期,leader切换后递增
+  // Leader term, leader increment after switching
   uint64 version = 7;
-  // shards版本号,每次改变后递增
+  // shards The version number, which is incremented with each change
   uint64 conf_ver = 8;
-  // 当前工作状态
+  // Current working status
   PartitionState state = 10;
   string message = 11;
 }
@@ -142,16 +139,16 @@ message PartitionV36 {
 message Partition {
   uint32 id = 1;
   string graph_name = 3;
-  // 分区范围 [start_key, end_key).
+  // Partition range [start_key, end_key].
   uint64 start_key = 4;
   uint64 end_key = 5;
-  // Partition 对象不在保存 shard list(根据对应的shard group 去查询), version 和 conf 
version不再有实际的意义
+  // The partition object no longer stores the shard list (which is queried 
according to the corresponding shard group), and the version and conf version 
are no longer meaningful
   // repeated Shard shards = 6;
-  // key range 每次改变后递增
+  // key range increment after each change
   uint64 version = 7;
-  // shards版本号,每次改变后递增
+  // shards The version number, which is incremented with each change
   // uint64 conf_ver = 8;
-  // 当前工作状态
+  // Current working status
   PartitionState state = 10;
   string message = 11;
 }
@@ -159,21 +156,21 @@ message Partition {
 message PartitionShard {
   metapb.Partition partition = 1;
   metapb.Shard leader = 2;
-  // 离线的Shard
+  // Offline Shard
   repeated metapb.Shard offline_shards = 3;
 }
-// 记录分区所在的存储位置
+// The storage location where the record partition is located
 message PartitionStore {
   uint32 partition_id = 1;
   string graph_name = 3;
-  // 存储位置
+  // Storage location
   string store_location = 4;
 }
 
 message PartitionRaft {
   uint32 partition_id = 1;
   string graph_name = 3;
-  // 存储位置
+  // Storage location
   string raft_location = 4;
 }
 
@@ -181,42 +178,43 @@ message ShardStats{
   uint64 store_id = 2;
   ShardRole role = 3;
   ShardState state = 4;
-  // 安装快照的进度
+  // The progress of the installation of the snapshot
   uint32 progress = 5;
 }
+
 message PartitionStats{
   uint32 id = 1;
-  // raft分组的任期.
+  // raft Term of office of the group.
   uint64 leader_term = 2;
   repeated string graph_name = 3;
   metapb.Shard leader = 4;
-  // 离线 shards
+  // Offline shards
   repeated metapb.Shard shard = 5;
   repeated metapb.Shard learner = 6;
   uint64 conf_ver = 7;
-  // 分区状态
+  // partition status
   PartitionState state = 8;
   repeated ShardStats shardStats = 9;
-  // 分区近似大小
+  // The approximate size of the partition
   uint64 approximate_size = 10;
-  // 分区key的近似数量
+  // The approximate number of partition keys
   uint64 approximate_keys = 13;
   // heartbeat timestamp
   int64 timestamp = 16;
 }
 
 message GraphStats{
-  // 图名
+  // graph
   string graph_name = 1;
-  // 分区近似大小
+  // The approximate size of the partition
   uint64 approximate_size = 2;
-  // 分区key的近似数量
+  // The approximate number of partition keys
   uint64 approximate_keys = 3;
   //  // committed index
   //  uint64 committed_index = 4;
   uint32 partition_id = 5;
   ShardRole role = 6;
-  // 当前工作状态
+  // Current working status
   PartitionState work_state = 8;
 }
 
@@ -252,11 +250,11 @@ message QueryStats {
 
 enum ShardState{
   SState_None = 0;
-  // 正常
+  // Normal
   SState_Normal = 1;
-  // 安装快照
+  // Install snapshots
   SState_Snapshot = 2;
-  // 离线
+  // Offline
   SState_Offline = 10;
 }
 
@@ -310,14 +308,14 @@ message StoreStats {
   repeated RecordPair system_metrics = 25;
 }
 
-// 分区查询条件
+// Partition query criteria
 message PartitionQuery{
-  optional uint64 store_id = 1;      // 0 表示查询条件不包含store_id
+  optional uint64 store_id = 1;      // 0 Indicates that the query criteria do 
not contain store_id
   optional string graph_name = 2;
   optional uint32 partition_id = 4;
 }
 
-//PD 节点信息
+// PD Node information
 message Member {
   uint64 cluster_id = 1;
   string raft_url = 3;
@@ -329,37 +327,38 @@ message Member {
   string replicator_state = 9;
 }
 
-// 图空间配置
+// Graph space configuration
 message GraphSpace{
   string name = 1;
-  // 最大占用存储
+  // Maximum occupied storage
   uint64 storage_limit = 2;
-  // 已使用空间
+  // Space used
   uint64 used_size = 3;
-  // 修改时间
+  // Modify time
   uint64 timestamp = 10;
 }
 
-// PD 配置
+// PD Config
 message PDConfig{
   uint64 version = 1;
-  // 分区数量, 初始化根据Store数量动态计算,分裂后进行修改
+  // The number of partitions is dynamically calculated based on the number of 
stores at the initialization and modified after splitting
+  // The exact count needs to be determined based on the store group
   int32 partition_count = 2;
-  // 每分区副本数量
+  // Number of replicas per partition
   int32 shard_count = 3;
-  // pd集群列表
+  // pd List of clusters
   string peers_list = 4;
-  // 集群中最少store数量
+  // The minimum number of stores in the cluster
   int32 min_store_count = 6;
-  // 每个store最大副本数
+  // Maximum number of replicas per store
   int32 max_Shards_Per_Store = 7;
-  // 修改时间
+  // Modify time
   uint64 timestamp = 10;
 }
 
 
 
-//消息持久化
+// Message persistence
 message QueueItem{
   string item_id = 1;
   string item_class = 2;
@@ -387,8 +386,7 @@ enum GraphMode{
 }
 
 enum GraphModeReason{
-  Empty = 0; // 空
-  Initiative = 1; // 主动的状态设置
-  Quota = 2; // 达到限额条件
-
+  Empty = 0;
+  Initiative = 1; // Active status settings
+  Quota = 2; // The limit condition is reached
 }
diff --git a/hugegraph-pd/hg-pd-grpc/src/main/proto/pd_common.proto 
b/hugegraph-pd/hg-pd-grpc/src/main/proto/pd_common.proto
index c9eec8149..c2b55c278 100644
--- a/hugegraph-pd/hg-pd-grpc/src/main/proto/pd_common.proto
+++ b/hugegraph-pd/hg-pd-grpc/src/main/proto/pd_common.proto
@@ -22,9 +22,7 @@ option java_package = "org.apache.hugegraph.pd.grpc.common";
 option java_outer_classname = "HgPdCommonProto";
 
 message RequestHeader {
-  // 集群 ID.
   uint64 cluster_id = 1;
-  // 发送者 ID.
   uint64 sender_id = 2;
 }
 
diff --git a/hugegraph-pd/hg-pd-grpc/src/main/proto/pd_pulse.proto 
b/hugegraph-pd/hg-pd-grpc/src/main/proto/pd_pulse.proto
index fb8940df6..afb6d6287 100644
--- a/hugegraph-pd/hg-pd-grpc/src/main/proto/pd_pulse.proto
+++ b/hugegraph-pd/hg-pd-grpc/src/main/proto/pd_pulse.proto
@@ -56,8 +56,8 @@ message PulseAckRequest {
   int64 notice_id = 2;
 }
 
-// 分区心跳,分区的peer增减、leader改变等事件发生时,由leader发送心跳。
-// 同时pd对分区进行shard增减通过Response发送给leader
+// When an event such as a partition heartbeat occurs such as the increase or 
decrease of peers in a partition or the change of leader, the leader sends a 
heartbeat.
+// At the same time, the pd adds or decreases shards to the partition and 
sends the response to the leader
 message PartitionHeartbeatRequest {
   RequestHeader header = 1;
   // Leader Peer sending the heartbeat
@@ -83,15 +83,15 @@ message PartitionHeartbeatResponse {
   ChangeShard change_shard = 4;
 
   TransferLeader transfer_leader = 5;
-  // 拆分成多个分区,第一个SplitPartition是原分区,从第二开始是新分区
+  // Split into multiple partitions, with the first SplitPartition being the 
original partition and the second starting being the new partition
   SplitPartition split_partition = 6;
-  // rocksdb compaction 指定的表,null是针对所有
+  // rocksdb compaction specifies the table, null is for all
   DbCompaction db_compaction = 7;
-  // 将partition的数据,迁移到 target
+  // Migrate data from the partition to the target
   MovePartition move_partition = 8;
-  // 清理partition的graph的数据
+  // Clean up the data for the partition of the graph
   CleanPartition clean_partition = 9;
-  // partition key range 变化
+  // partition key range variation
   PartitionKeyRange key_range = 10;
 }
 
@@ -113,11 +113,11 @@ message DbCompaction {
   string table_name = 3;
 }
 
-message MovePartition{
-  // target partition的key range为,迁移后的新range
+message MovePartition {
+  // The new range after migration
   metapb.Partition target_partition = 1;
-  // partition 的 key start 和 key end的所有数据,
-  // 会迁移到 target partition 上
+  // partition's key start and key end,
+  // will migrate to target partition
   uint64 key_start = 2;
   uint64 key_end = 3;
 }
@@ -126,7 +126,7 @@ message CleanPartition {
   uint64 key_start = 1;
   uint64 key_end = 2;
   CleanType clean_type = 3;
-  bool delete_partition = 4; //是否删除分区
+  bool delete_partition = 4; // Whether to delete the partition
 }
 
 message PartitionKeyRange{
@@ -159,12 +159,12 @@ enum ConfChangeType {
   CONF_CHANGE_TYPE_ADD_NODE = 1;
   CONF_CHANGE_TYPE_REMOVE_NODE = 2;
   CONF_CHANGE_TYPE_ADD_LEARNER_NODE = 3;
-  CONF_CHANGE_TYPE_ADJUST = 4;    // 调整shard,leader根据新的配置动态增减。
+  CONF_CHANGE_TYPE_ADJUST = 4;    // Adjust the shard, and the leader 
dynamically increases or decreases according to the new configuration.
 }
 
 enum CleanType {
-  CLEAN_TYPE_KEEP_RANGE = 0; // 仅保留这个range
-  CLEAN_TYPE_EXCLUDE_RANGE = 1; // 删除这个range
+  CLEAN_TYPE_KEEP_RANGE = 0; // Only this range remains
+  CLEAN_TYPE_EXCLUDE_RANGE = 1; // Delete this range
 }
 
 enum PdInstructionType {
diff --git a/hugegraph-pd/hg-pd-grpc/src/main/proto/pd_watch.proto 
b/hugegraph-pd/hg-pd-grpc/src/main/proto/pd_watch.proto
index febc41f52..6d0c016c2 100644
--- a/hugegraph-pd/hg-pd-grpc/src/main/proto/pd_watch.proto
+++ b/hugegraph-pd/hg-pd-grpc/src/main/proto/pd_watch.proto
@@ -98,6 +98,5 @@ enum NodeEventType {
   NODE_EVENT_TYPE_NODE_ONLINE = 1;
   NODE_EVENT_TYPE_NODE_OFFLINE = 2;
   NODE_EVENT_TYPE_NODE_RAFT_CHANGE = 3;
-  // pd leader 变更
   NODE_EVENT_TYPE_PD_LEADER_CHANGE = 4;
 }
diff --git a/hugegraph-pd/hg-pd-grpc/src/main/proto/pdpb.proto 
b/hugegraph-pd/hg-pd-grpc/src/main/proto/pdpb.proto
index 4e293ca08..f7754824e 100644
--- a/hugegraph-pd/hg-pd-grpc/src/main/proto/pdpb.proto
+++ b/hugegraph-pd/hg-pd-grpc/src/main/proto/pdpb.proto
@@ -24,83 +24,83 @@ import "metaTask.proto";
 option java_package = "org.apache.hugegraph.pd.grpc";
 
 service PD {
-  // 注册store,首次注册会生成新的store_id, store_id是store唯一标识
+  // Register store, the first registration will generate a new store_id, 
store_id is the unique identifier of the store
   rpc RegisterStore(RegisterStoreRequest) returns (RegisterStoreResponse) {}
   rpc GetStore(GetStoreRequest) returns (GetStoreResponse) {}
-  // 修改Store状态等信息.
+  // Modify Store status and other information.
   rpc SetStore(SetStoreRequest) returns (SetStoreResponse) {}
-  // 根据可以查找所属分区
+  // Look up the partition based on the key
   rpc DelStore(DetStoreRequest) returns (DetStoreResponse) {}
   rpc GetAllStores(GetAllStoresRequest) returns (GetAllStoresResponse) {}
   rpc StoreHeartbeat(StoreHeartbeatRequest) returns (StoreHeartbeatResponse) {}
 
-  // 根据可以查找所属分区
+  // Look up the partition based on the key
   rpc GetPartition(GetPartitionRequest) returns (GetPartitionResponse) {}
 
-  // 根据HashCode查找所属分区
+  // Look up the partition based on HashCode
   rpc GetPartitionByCode(GetPartitionByCodeRequest) returns 
(GetPartitionResponse) {}
-  // 根据PartitionID返回分区
+  // Return partition by PartitionID
   rpc GetPartitionByID(GetPartitionByIDRequest) returns (GetPartitionResponse) 
{}
   rpc ScanPartitions(ScanPartitionsRequest) returns (ScanPartitionsResponse) {}
-  // 更新分区信息,主要用来更新分区key范围,调用此接口需谨慎,否则会造成数据丢失。
+  // Update partition information, mainly used to update partition key range, 
call this interface carefully, otherwise data loss will occur.
   rpc UpdatePartition(UpdatePartitionRequest) returns 
(UpdatePartitionResponse) {}
-  // 根据可以查找所属分区
+  // Look up the partition based on the key
   rpc DelPartition(DelPartitionRequest) returns (DelPartitionResponse) {}
-  // 根据条件查询分区信息, 包括Store、Graph等条件
+  // Query partition information based on conditions, including Store, Graph 
and other conditions
   rpc QueryPartitions(QueryPartitionsRequest) returns 
(QueryPartitionsResponse){}
-  // 读取图信息
+  // Read graph information
   rpc GetGraph(GetGraphRequest) returns (GetGraphResponse){}
-  // 修改图信息
+  // Modify graph information
   rpc SetGraph(SetGraphRequest) returns (SetGraphResponse){}
   rpc DelGraph(DelGraphRequest) returns (DelGraphResponse){}
-  // 全局唯一自增ID
+  // Global unique incremental ID
   rpc GetId(GetIdRequest) returns (GetIdResponse){}
   rpc ResetId(ResetIdRequest) returns (ResetIdResponse){}
-  // PD的集群列表
+  // PD cluster list
   rpc GetMembers(GetMembersRequest) returns (GetMembersResponse) {}
   rpc GetStoreStatus(GetAllStoresRequest) returns (GetAllStoresResponse) {}
   rpc GetPDConfig(GetPDConfigRequest) returns (GetPDConfigResponse){}
   rpc SetPDConfig(SetPDConfigRequest) returns (SetPDConfigResponse){}
   rpc GetGraphSpace(GetGraphSpaceRequest) returns (GetGraphSpaceResponse){}
   rpc SetGraphSpace(SetGraphSpaceRequest) returns (SetGraphSpaceResponse){}
-  // 获取集群健康状态
+  // Get cluster health status
   rpc GetClusterStats(GetClusterStatsRequest) returns 
(GetClusterStatsResponse){}
-  // 替换PD的集群节点
+  // Replace PD cluster nodes
   rpc ChangePeerList(ChangePeerListRequest) returns 
(getChangePeerListResponse) {}
-  // 数据分裂
+  // Data splitting
   rpc SplitData(SplitDataRequest) returns (SplitDataResponse){}
 
   rpc SplitGraphData(SplitGraphDataRequest) returns (SplitDataResponse) {}
-  // 数据迁移
+  // Data migration
   rpc MovePartition(MovePartitionRequest) returns (MovePartitionResponse){}
-  // 汇报分区分裂等任务执行结果
+  // Report partition splitting and other task execution results
   rpc ReportTask(ReportTaskRequest) returns (ReportTaskResponse){}
 
   rpc GetPartitionStats(GetPartitionStatsRequest) returns 
(GetPartitionStatsResponse){}
-  //平衡store中分区leader的数量
+  // Balance the number of partition leaders in the store
   rpc BalanceLeaders(BalanceLeadersRequest) returns (BalanceLeadersResponse){}
 
-  // 替换license文件
+  // Replace license file
   rpc PutLicense(PutLicenseRequest) returns (PutLicenseResponse){}
 
-  // 通知rocksdb进行compaction
+  // Notify rocksdb to perform compaction
   rpc DbCompaction(DbCompactionRequest) returns (DbCompactionResponse){}
 
-  // 合并分区
+  // Merge partitions
   rpc CombineCluster(CombineClusterRequest) returns (CombineClusterResponse){}
-  // 单个图缩容
+  // Shrink a single graph
   rpc CombineGraph(CombineGraphRequest) returns (CombineGraphResponse) {}
 
   // shard group
   rpc GetShardGroup(GetShardGroupRequest) returns (GetShardGroupResponse){}
   rpc UpdateShardGroup(UpdateShardGroupRequest) returns 
(UpdateShardGroupResponse){}
-  // 删除掉shard group
+  // Delete shard group
   rpc DeleteShardGroup(DeleteShardGroupRequest) returns 
(DeleteShardGroupResponse) {}
-  // shard group 运维相关的处理
+  // shard group operation related to operation
   rpc UpdateShardGroupOp(ChangeShardRequest) returns (ChangeShardResponse){}
-  // change shard
+  // Change shard
   rpc ChangeShard(ChangeShardRequest) returns (ChangeShardResponse) {}
-  // 更新pd raft
+  // Update pd raft
   rpc updatePdRaft(UpdatePdRaftRequest) returns (UpdatePdRaftResponse)  {}
 
   rpc getCache(GetGraphRequest) returns (CacheResponse)  {}
@@ -108,9 +108,7 @@ service PD {
 }
 
 message RequestHeader {
-  // 集群 ID.
   uint64 cluster_id = 1;
-  // 发送者 ID.
   uint64 sender_id = 2;
 }
 
@@ -141,21 +139,21 @@ enum ErrorType {
   ROCKSDB_SAVE_SNAPSHOT_ERROR = 1005;
   ROCKSDB_LOAD_SNAPSHOT_ERROR = 1006;
 
-  // 当前集群状态禁止分裂
+  // Current cluster state prohibits splitting
   Cluster_State_Forbid_Splitting = 1007;
-  // 正在分裂中
+  // Currently splitting
   Split_Partition_Doing = 1008;
-  // store上分区数量超过上限
+  // The number of partitions on the store exceeds the limit
   Too_Many_Partitions_Per_Store = 1009;
-  // license 错误
+  // License error
   LICENSE_ERROR = 107;
-  // license 认证错误
+  // License authentication error
   LICENSE_VERIFY_ERROR = 108;
 
-  //分区下线正在进行
+  // Partition offline is in progress
   Store_Tombstone_Doing = 1010;
 
-  // 不合法的分裂个数
+  // Invalid number of splits
   Invalid_Split_Partition_Count = 1011;
 }
 
@@ -193,7 +191,7 @@ message RegisterStoreRequest {
 
 message RegisterStoreResponse {
   ResponseHeader header = 1;
-  // 初次注册,返回新的store_id
+  // Upon initial registration, returns the new store_id
   uint64 store_id = 2;
 }
 
@@ -204,16 +202,15 @@ message SetStoreRequest {
 
 message SetStoreResponse {
   ResponseHeader header = 1;
-  // 返回修改后的Store
+  // Returns the modified Store
   metapb.Store store = 2;
 }
 
-
-// 返回graph_name所在的所有store,如果graph_name为空值,则返回系统所有的store
+// Returns all stores where graph_name is located. If graph_name is empty, 
returns all system stores.
 message GetAllStoresRequest {
   RequestHeader header = 1;
   string graph_name = 2;
-  // 是否返回离线的store
+  // Whether to exclude offline stores
   bool exclude_offline_stores = 3;
 }
 
@@ -254,7 +251,7 @@ message GetPartitionResponse {
   ResponseHeader header = 1;
   metapb.Partition partition = 2;
   metapb.Shard leader = 3;
-  // 离线的Shard
+  // Offline Shards
   repeated metapb.Shard offline_shards = 4;
 }
 
@@ -435,65 +432,65 @@ enum OperationMode {
   Expert = 1;
 }
 
-message SplitDataParam{
-  // 被分裂的源分区ID
+message SplitDataParam {
+  // ID of the source partition being split
   uint32 partition_id = 1;
-  //目标分区数量
+  // Number of target partitions
   uint32 count = 2;
 }
 
-message SplitDataRequest{
+message SplitDataRequest {
   RequestHeader header = 1;
-  //工作模式
-  //  Auto:自动分裂,每个Store上分区数达到最大值
-  //  Expert:专家模式,需要指定splitParams
+  // Operation mode
+  //  Auto: Automatic splitting, when the number of partitions on each Store 
reaches the maximum value
+  //  Expert: Expert mode, requires specifying splitParams
   OperationMode mode = 2;
   repeated SplitDataParam param = 3;
 }
 
-message SplitGraphDataRequest{
+message SplitGraphDataRequest {
   RequestHeader header = 1;
-  //工作模式
+  // Operation mode
   string graph_name = 2;
   uint32 to_count = 3;
 }
 
-message SplitDataResponse{
+message SplitDataResponse {
   ResponseHeader header = 1;
 }
 
-message MovePartitionParam{
+message MovePartitionParam {
   uint32 partition_id = 1;
   uint64 src_store_id = 2;
   uint64 dst_store_id = 3;
 }
 
-message MovePartitionRequest{
+message MovePartitionRequest {
   RequestHeader header = 1;
-  //工作模式
-  //  Auto:自动转移,达到每个Store上分区数量相同
-  //  Expert:专家模式,需要指定transferParams
+  // Operation mode
+  //  Auto: Automatic transfer, aiming for equal number of partitions on each 
Store
+  //  Expert: Expert mode, requires specifying transferParams
   OperationMode mode = 2;
   repeated MovePartitionParam param = 3;
 }
 
-message MovePartitionResponse{
+message MovePartitionResponse {
   ResponseHeader header = 1;
 }
 
-message ReportTaskRequest{
+message ReportTaskRequest {
   RequestHeader header = 1;
   metaTask.Task task = 2;
 }
 
-message ReportTaskResponse{
+message ReportTaskResponse {
   ResponseHeader header = 1;
 }
 
-message GetPartitionStatsRequest{
+message GetPartitionStatsRequest {
   RequestHeader header = 1;
   uint32 partition_id = 2;
-  // 如果未空,返回所有图的同一分区ID
+  // If empty, returns the same partition ID for all graphs
   string graph_name = 4;
 }
 
@@ -596,7 +593,7 @@ message UpdatePdRaftResponse{
 }
 message CacheResponse {
   ResponseHeader header = 1;
-  // 返回修改后的Store
+  // Returns the modified Store
   repeated metapb.Store stores = 2;
   repeated metapb.ShardGroup shards = 3;
   repeated metapb.Graph graphs = 4;

Reply via email to