This is an automated email from the ASF dual-hosted git repository.

lidongdai pushed a commit to branch master
in repository 
https://gitbox.apache.org/repos/asf/incubator-dolphinscheduler-website.git


The following commit(s) were added to refs/heads/master by this push:
     new 54e8af9  update english doc : 3.1 HDFS resource configuration
     new 8b336e8  Merge pull request #194 from BoYiZhang/master
54e8af9 is described below

commit 54e8af9b555480ba3bbaf4c72c8ace27e9addc20
Author: BoYiZhang <[email protected]>
AuthorDate: Thu Oct 15 15:01:17 2020 +0800

    update english doc : 3.1 HDFS resource configuration
---
 docs/en-us/1.3.1/user_doc/system-manual.md | 21 +++++++++------------
 1 file changed, 9 insertions(+), 12 deletions(-)

diff --git a/docs/en-us/1.3.1/user_doc/system-manual.md 
b/docs/en-us/1.3.1/user_doc/system-manual.md
index 1e3210d..4b71172 100644
--- a/docs/en-us/1.3.1/user_doc/system-manual.md
+++ b/docs/en-us/1.3.1/user_doc/system-manual.md
@@ -206,13 +206,13 @@ Add a Shell task to the drawing board, as shown in the 
figure below:
   - Upload resource files and udf functions, all uploaded files and resources 
will be stored on hdfs, so the following configuration items are required:
   
 ```  
-conf/common/common.properties  
+conf/common.properties  
     # Users who have permission to create directories under the HDFS root path
     hdfs.root.user=hdfs
-    # data base dir, resource file will store to this hadoop hdfs path, self 
configuration, please make sure the directory exists on hdfs and have read 
write permissions。"/escheduler" is recommended
-    data.store2hdfs.basepath=/dolphinscheduler
-    # resource upload startup type : HDFS,S3,NONE
-    res.upload.startup.type=HDFS
+    # data base dir, resource file will store to this hadoop hdfs path, self 
configuration, please make sure the directory exists on hdfs and have read 
write permissions。"/dolphinscheduler" is recommended
+    resource.upload.path=/dolphinscheduler
+    # resource storage type : HDFS,S3,NONE
+    resource.storage.type=HDFS
     # whether kerberos starts
     hadoop.security.authentication.startup.state=false
     # java.security.krb5.conf path
@@ -220,11 +220,10 @@ conf/common/common.properties
     # loginUserFromKeytab user
     [email protected]
     # loginUserFromKeytab path
-    login.user.keytab.path=/opt/hdfs.headless.keytab
-    
-conf/common/hadoop.properties      
-    # ha or single namenode,If namenode ha needs to copy core-site.xml and 
hdfs-site.xml
-    # to the conf directory,support s3,for example : s3a://dolphinscheduler
+    login.user.keytab.path=/opt/hdfs.headless.keytab    
+    # if resource.storage.type is HDFS,and your Hadoop Cluster NameNode has HA 
enabled, you need to put core-site.xml and hdfs-site.xml in the 
installPath/conf directory. In this example, it is placed under 
/opt/soft/dolphinscheduler/conf, and configure the namenode cluster name; if 
the NameNode is not HA, modify it to a specific IP or host name.
+    # if resource.storage.type is S3,write S3 address,HA,for example 
:s3a://dolphinscheduler,
+    # Note,s3 be sure to create the root directory /dolphinscheduler
     fs.defaultFS=hdfs://mycluster:8020    
     #resourcemanager ha note this need ips , this empty if single
     yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx    
@@ -232,8 +231,6 @@ conf/common/hadoop.properties
     yarn.application.status.address=http://xxxx:8088/ws/v1/cluster/apps/%s
 
 ```
-* Only one address needs to be configured for yarn.resourcemanager.ha.rm.ids 
and yarn.application.status.address, and the other address is empty.
-* You need to copy core-site.xml and hdfs-site.xml from the conf directory of 
the Hadoop cluster to the conf directory of the dolphinscheduler project, and 
restart the api-server service.
 
 #### 3.2 File management
 

Reply via email to