This is an automated email from the ASF dual-hosted git repository. volodymyr pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/drill.git
commit 8ddc9d79e0d298e74f2256328dd9ddee06a20066 Author: Bohdan Kazydub <bohdan.kazy...@gmail.com> AuthorDate: Fri Aug 3 12:31:01 2018 +0300 DRILL-6662: Access AWS access key ID and secret access key using Credential Provider API for S3 storage plugin closes #1419 --- distribution/src/resources/core-site-example.xml | 14 ++++++++++ .../drill/exec/store/dfs/FileSystemPlugin.java | 32 ++++++++++++++++++++++ 2 files changed, 46 insertions(+) diff --git a/distribution/src/resources/core-site-example.xml b/distribution/src/resources/core-site-example.xml index 854e54d..c7225a1 100644 --- a/distribution/src/resources/core-site-example.xml +++ b/distribution/src/resources/core-site-example.xml @@ -30,4 +30,18 @@ <value>ENTER_YOUR_SECRETKEY</value> </property> + <!--Use this property to specify one or more credential provider URIs instead of + configuring above credentials in plain text--> + <!--<property> + <name>hadoop.security.credential.provider.path</name> + <value>ENTER_YOUR_PROVIDER_PATH</value> + </property>--> + + <!--Set this property to true to avoid caching of S3 file system configuration properties, + so when you add/update a property (e.g. fs.s3a.secret.key) in S3 storage plugin its new value will be taken--> + <!--<property> + <name>fs.s3a.impl.disable.cache</name> + <value>true</value> + </property>--> + </configuration> diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/dfs/FileSystemPlugin.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/dfs/FileSystemPlugin.java index b1f41a4..cb66913 100644 --- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/dfs/FileSystemPlugin.java +++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/dfs/FileSystemPlugin.java @@ -20,6 +20,7 @@ package org.apache.drill.exec.store.dfs; import static org.apache.drill.exec.store.dfs.FileSystemSchemaFactory.DEFAULT_WS_NAME; import java.io.IOException; +import java.net.URI; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -77,6 +78,10 @@ public class FileSystemPlugin extends AbstractStoragePlugin { fsConf.set("fs.classpath.impl", ClassPathFileSystem.class.getName()); fsConf.set("fs.drill-local.impl", LocalSyncableFileSystem.class.getName()); + if (isS3Connection(fsConf)) { + handleS3Credentials(fsConf); + } + formatCreator = newFormatCreator(config, context, fsConf); List<FormatMatcher> matchers = new ArrayList<>(); formatPluginsByConfig = new HashMap<>(); @@ -104,6 +109,33 @@ public class FileSystemPlugin extends AbstractStoragePlugin { } } + private boolean isS3Connection(Configuration conf) { + URI uri = FileSystem.getDefaultUri(conf); + return uri.getScheme().equals("s3a"); + } + + /** + * Retrieve secret and access keys from configured (with + * {@link org.apache.hadoop.security.alias.CredentialProviderFactory#CREDENTIAL_PROVIDER_PATH} property) + * credential providers and set it into {@code conf}. If provider path is not configured or credential + * is absent in providers, it will conditionally fallback to configuration setting. The fallback will occur unless + * {@link org.apache.hadoop.security.alias.CredentialProvider#CLEAR_TEXT_FALLBACK} is set to {@code false}. + * + * @param conf {@code Configuration} which will be updated with credentials from provider + * @throws IOException thrown if a credential cannot be retrieved from provider + */ + private void handleS3Credentials(Configuration conf) throws IOException { + String[] credentialKeys = {"fs.s3a.secret.key", "fs.s3a.access.key"}; + for (String key : credentialKeys) { + char[] credentialChars = conf.getPassword(key); + if (credentialChars == null) { + logger.warn(String.format("Property '%s' is absent.", key)); + } else { + conf.set(key, String.valueOf(credentialChars)); + } + } + } + /** * Creates a new FormatCreator instance. *