This is an automated email from the ASF dual-hosted git repository.

kerwin pushed a commit to branch 3.0.1-prepare
in repository https://gitbox.apache.org/repos/asf/dolphinscheduler.git


The following commit(s) were added to refs/heads/3.0.1-prepare by this push:
     new 275b68992c fix hdfs defaultFs not working (#11823) (#11936)
275b68992c is described below

commit 275b68992ce55134b37a27c790d402db4ce5367c
Author: Kerwin <[email protected]>
AuthorDate: Thu Sep 15 15:02:41 2022 +0800

    fix hdfs defaultFs not working (#11823) (#11936)
---
 .../src/main/java/org/apache/dolphinscheduler/common/Constants.java  | 5 +++++
 .../java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java   | 4 ++--
 2 files changed, 7 insertions(+), 2 deletions(-)

diff --git 
a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java
 
b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java
index fda49f30f5..3bc103f33d 100644
--- 
a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java
+++ 
b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java
@@ -73,6 +73,11 @@ public final class Constants {
     public static final String FS_DEFAULT_FS = "fs.defaultFS";
 
 
+    /**
+     * hdfs defaultFS property name. Should be consistent with the property 
name in hdfs-site.xml
+     */
+    public static final String HDFS_DEFAULT_FS = "fs.defaultFS";
+
     /**
      * hadoop configuration
      */
diff --git 
a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java
 
b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java
index 314d946b60..16fdbe74fa 100644
--- 
a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java
+++ 
b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java
@@ -64,7 +64,7 @@ import static 
org.apache.dolphinscheduler.common.Constants.RESOURCE_TYPE_UDF;
 public class HadoopUtils implements Closeable, StorageOperate {
 
     private static final Logger logger = 
LoggerFactory.getLogger(HadoopUtils.class);
-    private String hdfsUser = 
PropertyUtils.getString(Constants.HDFS_ROOT_USER);
+    private String hdfsUser;
     public static final String RM_HA_IDS = 
PropertyUtils.getString(Constants.YARN_RESOURCEMANAGER_HA_RM_IDS);
     public static final String APP_ADDRESS = 
PropertyUtils.getString(Constants.YARN_APPLICATION_STATUS_ADDRESS);
     public static final String JOB_HISTORY_ADDRESS = 
PropertyUtils.getString(Constants.YARN_JOB_HISTORY_STATUS_ADDRESS);
@@ -132,7 +132,7 @@ public class HadoopUtils implements Closeable, 
StorageOperate {
             // the default is the local file system
             if (StringUtils.isNotBlank(defaultFS)) {
                 Map<String, String> fsRelatedProps = 
PropertyUtils.getPrefixedProperties("fs.");
-                configuration.set(Constants.FS_DEFAULT_FS, defaultFS);
+                configuration.set(Constants.HDFS_DEFAULT_FS, defaultFS);
                 fsRelatedProps.forEach((key, value) -> configuration.set(key, 
value));
             } else {
                 logger.error("property:{} can not to be empty, please set!", 
Constants.FS_DEFAULT_FS);

Reply via email to