kyoty opened a new issue #5760:
URL: https://github.com/apache/dolphinscheduler/issues/5760


   I'm not sure does the current configuration file support the initialization 
of HDFSConfiguration by customizing the path of hdfs-site.xml and core-site.xml?
   
   Currently, Hadoop initialization only supports the configuration prefixed 
with **fs**. This configuration capability is too narrow. Actually, there has 
many other configurations like ` dfs.replication`  starts with dfs, how to make 
this configuration to take effect?
   
   ```java
   private void init() {
           try {
               configuration = new HdfsConfiguration();
   
               String resourceStorageType = 
PropertyUtils.getUpperCaseString(Constants.RESOURCE_STORAGE_TYPE);
               ResUploadType resUploadType = 
ResUploadType.valueOf(resourceStorageType);
   
               if (resUploadType == ResUploadType.HDFS) {
                   if (CommonUtils.loadKerberosConf(configuration)) {
                       hdfsUser = "";
                   }
   
                   String defaultFS = configuration.get(Constants.FS_DEFAULTFS);
                   //first get key from core-site.xml hdfs-site.xml ,if null 
,then try to get from properties file
                   // the default is the local file system
                   if (defaultFS.startsWith("file")) {
                       String defaultFSProp = 
PropertyUtils.getString(Constants.FS_DEFAULTFS);
                       if (StringUtils.isNotBlank(defaultFSProp)) {
                           Map<String, String> fsRelatedProps = 
PropertyUtils.getPrefixedProperties("fs.");
                           configuration.set(Constants.FS_DEFAULTFS, 
defaultFSProp);
                           fsRelatedProps.forEach((key, value) -> 
configuration.set(key, value));
                       } else {
                           logger.error("property:{} can not to be empty, 
please set!", Constants.FS_DEFAULTFS);
                           throw new RuntimeException(
                                   String.format("property: %s can not to be 
empty, please set!", Constants.FS_DEFAULTFS)
                           );
                       }
                   } else {
                       logger.info("get property:{} -> {}, from core-site.xml 
hdfs-site.xml ", Constants.FS_DEFAULTFS, defaultFS);
                   }
   
                   if (StringUtils.isNotEmpty(hdfsUser)) {
                       UserGroupInformation ugi = 
UserGroupInformation.createRemoteUser(hdfsUser);
                       ugi.doAs((PrivilegedExceptionAction<Boolean>) () -> {
                           fs = FileSystem.get(configuration);
                           return true;
                       });
                   } else {
                       logger.warn("hdfs.root.user is not set value!");
                       fs = FileSystem.get(configuration);
                   }
               } else if (resUploadType == ResUploadType.S3) {
                   System.setProperty(Constants.AWS_S3_V4, 
Constants.STRING_TRUE);
                   configuration.set(Constants.FS_DEFAULTFS, 
PropertyUtils.getString(Constants.FS_DEFAULTFS));
                   configuration.set(Constants.FS_S3A_ENDPOINT, 
PropertyUtils.getString(Constants.FS_S3A_ENDPOINT));
                   configuration.set(Constants.FS_S3A_ACCESS_KEY, 
PropertyUtils.getString(Constants.FS_S3A_ACCESS_KEY));
                   configuration.set(Constants.FS_S3A_SECRET_KEY, 
PropertyUtils.getString(Constants.FS_S3A_SECRET_KEY));
                   fs = FileSystem.get(configuration);
               }
   
           } catch (Exception e) {
               logger.error(e.getMessage(), e);
           }
       }
   ```


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to