This is an automated email from the ASF dual-hosted git repository.
zihaoxiang pushed a commit to branch dev
in repository https://gitbox.apache.org/repos/asf/dolphinscheduler.git
The following commit(s) were added to refs/heads/dev by this push:
new ebcffb04aa fix hdfs defaultFs not working (#11823)
ebcffb04aa is described below
commit ebcffb04aad9db8ec6df1105e4770b187088e701
Author: xiangzihao <[email protected]>
AuthorDate: Wed Sep 7 16:40:43 2022 +0800
fix hdfs defaultFs not working (#11823)
---
.../src/main/java/org/apache/dolphinscheduler/common/Constants.java | 5 +++++
.../java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java | 4 ++--
2 files changed, 7 insertions(+), 2 deletions(-)
diff --git
a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java
b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java
index 055eb54a96..804a2ddaef 100644
---
a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java
+++
b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java
@@ -65,6 +65,11 @@ public final class Constants {
*/
public static final String FS_DEFAULT_FS = "resource.hdfs.fs.defaultFS";
+ /**
+ * hdfs defaultFS property name. Should be consistent with the property
name in hdfs-site.xml
+ */
+ public static final String HDFS_DEFAULT_FS = "fs.defaultFS";
+
/**
* hadoop configuration
*/
diff --git
a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java
b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java
index 8d7e4c2a6b..9f35607fb3 100644
---
a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java
+++
b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java
@@ -70,7 +70,7 @@ import com.google.common.cache.LoadingCache;
public class HadoopUtils implements Closeable, StorageOperate {
private static final Logger logger =
LoggerFactory.getLogger(HadoopUtils.class);
- private String hdfsUser =
PropertyUtils.getString(Constants.HDFS_ROOT_USER);
+ private String hdfsUser;
public static final String RM_HA_IDS =
PropertyUtils.getString(Constants.YARN_RESOURCEMANAGER_HA_RM_IDS);
public static final String APP_ADDRESS =
PropertyUtils.getString(Constants.YARN_APPLICATION_STATUS_ADDRESS);
public static final String JOB_HISTORY_ADDRESS =
PropertyUtils.getString(Constants.YARN_JOB_HISTORY_STATUS_ADDRESS);
@@ -140,7 +140,7 @@ public class HadoopUtils implements Closeable,
StorageOperate {
// the default is the local file system
if (StringUtils.isNotBlank(defaultFS)) {
Map<String, String> fsRelatedProps =
PropertyUtils.getPrefixedProperties("fs.");
- configuration.set(Constants.FS_DEFAULT_FS, defaultFS);
+ configuration.set(Constants.HDFS_DEFAULT_FS, defaultFS);
fsRelatedProps.forEach((key, value) -> configuration.set(key,
value));
} else {
logger.error("property:{} can not to be empty, please set!",
Constants.FS_DEFAULT_FS);