fantengfeiNana opened a new issue, #2810:
URL: https://github.com/apache/incubator-hugegraph/issues/2810

   ### Problem Type (问题类型)
   
   configs (配置项 / 文档相关)
   
   ### Before submit
   
   - [x] 我已经确认现有的 [Issues](https://github.com/apache/hugegraph/issues) 与 
[FAQ](https://hugegraph.apache.org/docs/guides/faq/) 中没有相同 / 重复问题 (I have 
confirmed and searched that there are no similar problems in the historical 
issue and documents)
   
   ### Environment (环境信息)
   
   - Server Version: 1.5.0 (Apache Release Version)
   - Backend: RocksDB x 3,  SSD 
   - OS: xx CPUs, xx G RAM, Ubuntu 2x.x / CentOS 7.x 
   
   
   
   ### Your Question (问题描述)
   
   我现在部署了1.5.0集群版本,现在store服务的集群显示部署成功,但是当我创建图谱时,图谱文件生成在了一个服务节点上面,其他节点没有同步
   
   1. 服务器A:
   
   **hugegraph.properties**
   gremlin.graph=org.apache.hugegraph.auth.HugeFactoryAuthProxy
   vertex.cache_type=l2
   edge.cache_type=l2
   backend=rocksdb
   serializer=binary
   store=hugegraph
   task.scheduler_type=local
   task.schedule_period=10
   task.retry=0
   task.wait_timeout=10
   search.text_analyzer=jieba
   search.text_analyzer_mode=INDEX
   rocksdb.data_path=../../rocksdb-data/data
   rocksdb.wal_path=../../rocksdb-data/wal
   pd.peers=172.16.44.26:8686,172.16.44.27:8686,172.16.44.28:8686
   
   **rest-server.properties**
   restserver.url=http://0.0.0.0:8080
   gremlinserver.url=http://127.0.0.1:8182
   
   
graphs=/usr/fifedu/apache-hugegraph-incubating-1.5.0/apache-hugegraph-server-incubating-1.5.0/conf/graphs
   
   
   batch.max_write_ratio=80
   batch.max_write_threads=0
   
   arthas.telnet_port=8562
   arthas.http_port=8561
   arthas.ip=127.0.0.1
   arthas.disabled_commands=jad
   
   auth.authenticator=org.apache.hugegraph.auth.StandardAuthenticator
   auth.graph_store=hugegraph
   
   
   rpc.server_host=172.16.44.26
   rpc.server_port=8092
   
   
   
   rpc.remote_url=172.16.44.26:8091,172.16.44.27:8092,172.16.44.28:8093
   
   raft.group_peers=172.16.44.26:8091,172.16.44.27:8092,172.16.44.28:8093
   
   
   server.id=server-1
   server.role=master
   log.slow_query_threshold=1000
   memory_monitor.threshold=0.85
   memory_monitor.period=2000
   
   **pd appliaction.yml**
   spring:
     application:
       name: hugegraph-pd
   
   management:
     metrics:
       export:
         prometheus:
           enabled: true
     endpoints:
       web:
         exposure:
           include: "*"
   
   logging:
     config: 'file:./conf/log4j2.xml'
   # TODO: handle the license file later (PDConfig)
   license:
     verify-path: ./conf/verify-license.json
     license-path: ./conf/hugegraph.license
   grpc:
     port: 8686
     # The service address of grpc needs to be changed to the actual local IPv4 
address when deploying.
     host: 172.16.44.26
   
   server:
     # REST service port number
     port: 8620
   
   pd:
     # Storage path
     data-path: ./pd_data
     # The check cycle of automatic expansion regularly checks the number of 
partitions in each store and automatically balances the number of partitions
     patrol-interval: 1800
     # The minimum number of surviving store nodes, less than which the entire 
cluster is unavailable
     initial-store-count: 3
     # The initial store list, grpc IP: grpc port, the store in the list is 
automatically activated
     initial-store-list: 172.16.44.26:8500,172.16.44.27:8500,172.16.44.28:8500
   
   raft:
     # The address of the local raft service
     address: 172.16.44.26:8610
     # The service address of the PD cluster 
172.16.44.26:8610,172.16.44.27:8610,172.16.44.28:8610
     peers-list: 172.16.44.26:8610,172.16.44.27:8610,172.16.44.28:8610
   
   store:
     # The time when the store went offline. After that time, the store is 
considered permanently unavailable, and the replica is allocated to another 
machine, in seconds
     max-down-time: 172800
     # Specifies whether to enable store monitoring data storage
     monitor_data_enabled: true
     # The interval between monitoring data, minute, hour, second
     # default: 1 min * 1 day = 1440
     monitor_data_interval: 1 minute
     # Retention time of monitoring data is 1 day; day, month, year
     monitor_data_retention: 1 day
   
   partition:
     # Default number of replicas per partition
     default-shard-count: 3
     # The default maximum number of replicas per machine
     # the initial number of partitions= store-max-shard-count * store-number / 
default-shard-count
     store-max-shard-count: 12
   
   
   **store appliaction.yml**
   
   
   pdserver:
     # PD service address, multiple PD addresses separated by commas
     address: 172.16.44.26:8686,172.16.44.27:8686,172.16.44.28:8686
   
   management:
     metrics:
       export:
         prometheus:
           enabled: true
     endpoints:
       web:
         exposure:
           include: "*"
   
   grpc:
     # grpc service address
     host: 172.16.44.26
     port: 8500
     netty-server:
       max-inbound-message-size: 1000MB
   raft:
     # raft cache queue size
     disruptorBufferSize: 1024
     address: 172.16.44.26:8510
     max-log-file-size: 600000000000
     # Snapshot generation interval, in seconds
     snapshotInterval: 1800
   server:
     # rest service address
     port: 8520
   
   app:
     # Storage path, support multiple paths, separated by commas
     data-path: ./storage-a
     #raft-path: ./storage
   
   spring:
     application:
       name: store-node-grpc-server
     profiles:
       active: default
       include: pd
   
   logging:
     config: 'file:./conf/log4j2.xml'
     level:
       root: info
   
   2. 服务器B:
   
   **hugegraph.properties**
   gremlin.graph=org.apache.hugegraph.auth.HugeFactoryAuthProxy
   vertex.cache_type=l2
   edge.cache_type=l2
   backend=rocksdb
   serializer=binary
   store=hugegraph
   task.scheduler_type=local
   task.schedule_period=10
   task.retry=0
   task.wait_timeout=10
   search.text_analyzer=jieba
   search.text_analyzer_mode=INDEX
   rocksdb.data_path=../../rocksdb-data/data
   rocksdb.wal_path=../../rocksdb-data/wal
   pd.peers=172.16.44.26:8686,172.16.44.27:8686,172.16.44.28:8686
   
   **rest-server.properties**
   restserver.url=http://0.0.0.0:8080
   gremlinserver.url=http://127.0.0.1:8182
   
   
graphs=/usr/fifedu/apache-hugegraph-incubating-1.5.0/apache-hugegraph-server-incubating-1.5.0/conf/graphs
   
   
   batch.max_write_ratio=80
   batch.max_write_threads=0
   
   arthas.telnet_port=8562
   arthas.http_port=8561
   arthas.ip=127.0.0.1
   arthas.disabled_commands=jad
   
   auth.authenticator=org.apache.hugegraph.auth.StandardAuthenticator
   auth.graph_store=hugegraph
   
   
   rpc.server_host=172.16.44.27
   rpc.server_port=8092
   
   
   
   rpc.remote_url=172.16.44.26:8091,172.16.44.27:8092,172.16.44.28:8093
   
   raft.group_peers=172.16.44.26:8091,172.16.44.27:8092,172.16.44.28:8093
   
   
   server.id=server-2
   server.role=worker
   log.slow_query_threshold=1000
   memory_monitor.threshold=0.85
   memory_monitor.period=2000
   
   **pd appliaction.yml**
   spring:
     application:
       name: hugegraph-pd
   
   management:
     metrics:
       export:
         prometheus:
           enabled: true
     endpoints:
       web:
         exposure:
           include: "*"
   
   logging:
     config: 'file:./conf/log4j2.xml'
   # TODO: handle the license file later (PDConfig)
   license:
     verify-path: ./conf/verify-license.json
     license-path: ./conf/hugegraph.license
   grpc:
     port: 8686
     # The service address of grpc needs to be changed to the actual local IPv4 
address when deploying.
     host: 172.16.44.27
   
   server:
     # REST service port number
     port: 8620
   
   pd:
     # Storage path
     data-path: ./pd_data
     # The check cycle of automatic expansion regularly checks the number of 
partitions in each store and automatically balances the number of partitions
     patrol-interval: 1800
     # The minimum number of surviving store nodes, less than which the entire 
cluster is unavailable
     initial-store-count: 3
     # The initial store list, grpc IP: grpc port, the store in the list is 
automatically activated
     initial-store-list: 172.16.44.26:8500,172.16.44.27:8500,172.16.44.28:8500
   
   raft:
     # The address of the local raft service
     address: 172.16.44.27:8610
     # The service address of the PD cluster 
172.16.44.26:8610,172.16.44.27:8610,172.16.44.28:8610
     peers-list: 172.16.44.26:8610,172.16.44.27:8610,172.16.44.28:8610
   
   store:
     # The time when the store went offline. After that time, the store is 
considered permanently unavailable, and the replica is allocated to another 
machine, in seconds
     max-down-time: 172800
     # Specifies whether to enable store monitoring data storage
     monitor_data_enabled: true
     # The interval between monitoring data, minute, hour, second
     # default: 1 min * 1 day = 1440
     monitor_data_interval: 1 minute
     # Retention time of monitoring data is 1 day; day, month, year
     monitor_data_retention: 1 day
   
   partition:
     # Default number of replicas per partition
     default-shard-count: 3
     # The default maximum number of replicas per machine
     # the initial number of partitions= store-max-shard-count * store-number / 
default-shard-count
     store-max-shard-count: 12
   
   
   **store appliaction.yml**
   
   
   pdserver:
     # PD service address, multiple PD addresses separated by commas
     address: 172.16.44.26:8686,172.16.44.27:8686,172.16.44.28:8686
   
   management:
     metrics:
       export:
         prometheus:
           enabled: true
     endpoints:
       web:
         exposure:
           include: "*"
   
   grpc:
     # grpc service address
     host: 172.16.44.27
     port: 8500
     netty-server:
       max-inbound-message-size: 1000MB
   raft:
     # raft cache queue size
     disruptorBufferSize: 1024
     address: 172.16.44.26:8510
     max-log-file-size: 600000000000
     # Snapshot generation interval, in seconds
     snapshotInterval: 1800
   server:
     # rest service address
     port: 8520
   
   app:
     # Storage path, support multiple paths, separated by commas
     data-path: ./storage-b
     #raft-path: ./storage
   
   spring:
     application:
       name: store-node-grpc-server
     profiles:
       active: default
       include: pd
   
   logging:
     config: 'file:./conf/log4j2.xml'
     level:
       root: info
        
        
   2. 服务器C:
   
   **hugegraph.properties**
   gremlin.graph=org.apache.hugegraph.auth.HugeFactoryAuthProxy
   vertex.cache_type=l2
   edge.cache_type=l2
   backend=rocksdb
   serializer=binary
   store=hugegraph
   task.scheduler_type=local
   task.schedule_period=10
   task.retry=0
   task.wait_timeout=10
   search.text_analyzer=jieba
   search.text_analyzer_mode=INDEX
   rocksdb.data_path=../../rocksdb-data/data
   rocksdb.wal_path=../../rocksdb-data/wal
   pd.peers=172.16.44.26:8686,172.16.44.27:8686,172.16.44.28:8686
   
   **rest-server.properties**
   restserver.url=http://0.0.0.0:8080
   gremlinserver.url=http://127.0.0.1:8182
   
   
graphs=/usr/fifedu/apache-hugegraph-incubating-1.5.0/apache-hugegraph-server-incubating-1.5.0/conf/graphs
   
   
   batch.max_write_ratio=80
   batch.max_write_threads=0
   
   arthas.telnet_port=8562
   arthas.http_port=8561
   arthas.ip=127.0.0.1
   arthas.disabled_commands=jad
   
   auth.authenticator=org.apache.hugegraph.auth.StandardAuthenticator
   auth.graph_store=hugegraph
   
   
   rpc.server_host=172.16.44.28
   rpc.server_port=8092
   
   
   
   rpc.remote_url=172.16.44.26:8091,172.16.44.27:8092,172.16.44.28:8093
   
   raft.group_peers=172.16.44.26:8091,172.16.44.27:8092,172.16.44.28:8093
   
   
   server.id=server-2
   server.role=worker
   log.slow_query_threshold=1000
   memory_monitor.threshold=0.85
   memory_monitor.period=2000
   
   **pd appliaction.yml**
   spring:
     application:
       name: hugegraph-pd
   
   management:
     metrics:
       export:
         prometheus:
           enabled: true
     endpoints:
       web:
         exposure:
           include: "*"
   
   logging:
     config: 'file:./conf/log4j2.xml'
   # TODO: handle the license file later (PDConfig)
   license:
     verify-path: ./conf/verify-license.json
     license-path: ./conf/hugegraph.license
   grpc:
     port: 8686
     # The service address of grpc needs to be changed to the actual local IPv4 
address when deploying.
     host: 172.16.44.28
   
   server:
     # REST service port number
     port: 8620
   
   pd:
     # Storage path
     data-path: ./pd_data
     # The check cycle of automatic expansion regularly checks the number of 
partitions in each store and automatically balances the number of partitions
     patrol-interval: 1800
     # The minimum number of surviving store nodes, less than which the entire 
cluster is unavailable
     initial-store-count: 3
     # The initial store list, grpc IP: grpc port, the store in the list is 
automatically activated
     initial-store-list: 172.16.44.26:8500,172.16.44.27:8500,172.16.44.28:8500
   
   raft:
     # The address of the local raft service
     address: 172.16.44.28:8610
     # The service address of the PD cluster 
172.16.44.26:8610,172.16.44.27:8610,172.16.44.28:8610
     peers-list: 172.16.44.26:8610,172.16.44.27:8610,172.16.44.28:8610
   
   store:
     # The time when the store went offline. After that time, the store is 
considered permanently unavailable, and the replica is allocated to another 
machine, in seconds
     max-down-time: 172800
     # Specifies whether to enable store monitoring data storage
     monitor_data_enabled: true
     # The interval between monitoring data, minute, hour, second
     # default: 1 min * 1 day = 1440
     monitor_data_interval: 1 minute
     # Retention time of monitoring data is 1 day; day, month, year
     monitor_data_retention: 1 day
   
   partition:
     # Default number of replicas per partition
     default-shard-count: 3
     # The default maximum number of replicas per machine
     # the initial number of partitions= store-max-shard-count * store-number / 
default-shard-count
     store-max-shard-count: 12
   
   
   **store appliaction.yml**
   
   
   pdserver:
     # PD service address, multiple PD addresses separated by commas
     address: 172.16.44.26:8686,172.16.44.27:8686,172.16.44.28:8686
   
   management:
     metrics:
       export:
         prometheus:
           enabled: true
     endpoints:
       web:
         exposure:
           include: "*"
   
   grpc:
     # grpc service address
     host: 172.16.44.28
     port: 8500
     netty-server:
       max-inbound-message-size: 1000MB
   raft:
     # raft cache queue size
     disruptorBufferSize: 1024
     address: 172.16.44.26:8510
     max-log-file-size: 600000000000
     # Snapshot generation interval, in seconds
     snapshotInterval: 1800
   server:
     # rest service address
     port: 8520
   
   app:
     # Storage path, support multiple paths, separated by commas
     data-path: ./storage-b
     #raft-path: ./storage
   
   spring:
     application:
       name: store-node-grpc-server
     profiles:
       active: default
       include: pd
   
   logging:
     config: 'file:./conf/log4j2.xml'
     level:
       root: info
        
   nginx 配置:
   
   upstream hugegraph route {
   server 172.16.44.26:8080;
   server 172.16.44.27:8080;
   server 172.16.44.28:8080;
   }
   
   
   ### Vertex/Edge example (问题点 / 边数据举例)
   
   ```javascript
   
   ```
   
   ### Schema [VertexLabel, EdgeLabel, IndexLabel] (元数据结构)
   
   ```javascript
   
   ```


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: dev-unsubscr...@hugegraph.apache.org.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to