Repository: hadoop
Updated Branches:
  refs/heads/trunk 8d202f125 -> 96fa0f848


http://git-wip-us.apache.org/repos/asf/hadoop/blob/96fa0f84/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestS3Credentials.java
----------------------------------------------------------------------
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestS3Credentials.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestS3Credentials.java
new file mode 100644
index 0000000..33d0320
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestS3Credentials.java
@@ -0,0 +1,147 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.s3native;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.ProviderUtils;
+import org.apache.hadoop.security.alias.CredentialProvider;
+import org.apache.hadoop.security.alias.CredentialProviderFactory;
+
+import java.io.File;
+import java.net.URI;
+
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.junit.rules.TestName;
+
+import static 
org.apache.hadoop.fs.s3native.S3NativeFileSystemConfigKeys.S3_NATIVE_AWS_ACCESS_KEY_ID;
+import static 
org.apache.hadoop.fs.s3native.S3NativeFileSystemConfigKeys.S3_NATIVE_AWS_SECRET_ACCESS_KEY;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+/**
+ * This is to test the {@link S3Credentials} class for extracting AWS
+ * credentials.
+ */
+public class TestS3Credentials {
+  public static final Log LOG = LogFactory.getLog(TestS3Credentials.class);
+
+  @Rule
+  public final TestName test = new TestName();
+
+  @Before
+  public void announce() {
+    LOG.info("Running test " + test.getMethodName());
+  }
+
+  private static final String EXAMPLE_ID = "AKASOMEACCESSKEY";
+  private static final String EXAMPLE_KEY =
+      "RGV0cm9pdCBSZ/WQgY2xl/YW5lZCB1cAEXAMPLE";
+
+  @Test
+  public void testInvalidHostnameWithUnderscores() throws Exception {
+    S3Credentials s3Credentials = new S3Credentials();
+    try {
+      s3Credentials.initialize(new URI("s3n://a:b@c_d"), new Configuration());
+      fail("Should throw IllegalArgumentException");
+    } catch (IllegalArgumentException e) {
+      assertEquals("Invalid hostname in URI s3n://a:b@c_d", e.getMessage());
+    }
+  }
+
+  @Test
+  public void testPlaintextConfigPassword() throws Exception {
+    S3Credentials s3Credentials = new S3Credentials();
+    Configuration conf = new Configuration();
+    conf.set(S3_NATIVE_AWS_ACCESS_KEY_ID, EXAMPLE_ID);
+    conf.set(S3_NATIVE_AWS_SECRET_ACCESS_KEY, EXAMPLE_KEY);
+    s3Credentials.initialize(new URI("s3n://foobar"), conf);
+    assertEquals("Could not retrieve proper access key", EXAMPLE_ID,
+        s3Credentials.getAccessKey());
+    assertEquals("Could not retrieve proper secret", EXAMPLE_KEY,
+        s3Credentials.getSecretAccessKey());
+  }
+
+  @Test
+  public void testPlaintextConfigPasswordWithWhitespace() throws Exception {
+    S3Credentials s3Credentials = new S3Credentials();
+    Configuration conf = new Configuration();
+    conf.set(S3_NATIVE_AWS_ACCESS_KEY_ID, "\r\n " + EXAMPLE_ID +
+        " \r\n");
+    conf.set(S3_NATIVE_AWS_SECRET_ACCESS_KEY, "\r\n " + EXAMPLE_KEY +
+        " \r\n");
+    s3Credentials.initialize(new URI("s3n://foobar"), conf);
+    assertEquals("Could not retrieve proper access key", EXAMPLE_ID,
+        s3Credentials.getAccessKey());
+    assertEquals("Could not retrieve proper secret", EXAMPLE_KEY,
+        s3Credentials.getSecretAccessKey());
+  }
+
+  @Rule
+  public final TemporaryFolder tempDir = new TemporaryFolder();
+
+  @Test
+  public void testCredentialProvider() throws Exception {
+    // set up conf to have a cred provider
+    final Configuration conf = new Configuration();
+    final File file = tempDir.newFile("test.jks");
+    final URI jks = ProviderUtils.nestURIForLocalJavaKeyStoreProvider(
+        file.toURI());
+    conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,
+        jks.toString());
+
+    // add our creds to the provider
+    final CredentialProvider provider =
+        CredentialProviderFactory.getProviders(conf).get(0);
+    provider.createCredentialEntry(S3_NATIVE_AWS_SECRET_ACCESS_KEY,
+        EXAMPLE_KEY.toCharArray());
+    provider.flush();
+
+    // make sure S3Creds can retrieve things.
+    S3Credentials s3Credentials = new S3Credentials();
+    conf.set(S3_NATIVE_AWS_ACCESS_KEY_ID, EXAMPLE_ID);
+    s3Credentials.initialize(new URI("s3n://foobar"), conf);
+    assertEquals("Could not retrieve proper access key", EXAMPLE_ID,
+        s3Credentials.getAccessKey());
+    assertEquals("Could not retrieve proper secret", EXAMPLE_KEY,
+        s3Credentials.getSecretAccessKey());
+  }
+
+  @Test(expected=IllegalArgumentException.class)
+  @Ignore
+  public void noSecretShouldThrow() throws Exception {
+    S3Credentials s3Credentials = new S3Credentials();
+    Configuration conf = new Configuration();
+    conf.set(S3_NATIVE_AWS_ACCESS_KEY_ID, EXAMPLE_ID);
+    s3Credentials.initialize(new URI("s3n://foobar"), conf);
+  }
+
+  @Test(expected=IllegalArgumentException.class)
+  @Ignore
+  public void noAccessIdShouldThrow() throws Exception {
+    S3Credentials s3Credentials = new S3Credentials();
+    Configuration conf = new Configuration();
+    conf.set(S3_NATIVE_AWS_SECRET_ACCESS_KEY, EXAMPLE_KEY);
+    s3Credentials.initialize(new URI("s3n://foobar"), conf);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96fa0f84/hadoop-tools/hadoop-aws/src/test/resources/contract/s3.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/resources/contract/s3.xml 
b/hadoop-tools/hadoop-aws/src/test/resources/contract/s3.xml
deleted file mode 100644
index 4b742c1..0000000
--- a/hadoop-tools/hadoop-aws/src/test/resources/contract/s3.xml
+++ /dev/null
@@ -1,104 +0,0 @@
-<!--
-  ~ Licensed to the Apache Software Foundation (ASF) under one
-  ~  or more contributor license agreements.  See the NOTICE file
-  ~  distributed with this work for additional information
-  ~  regarding copyright ownership.  The ASF licenses this file
-  ~  to you under the Apache License, Version 2.0 (the
-  ~  "License"); you may not use this file except in compliance
-  ~  with the License.  You may obtain a copy of the License at
-  ~
-  ~       http://www.apache.org/licenses/LICENSE-2.0
-  ~
-  ~  Unless required by applicable law or agreed to in writing, software
-  ~  distributed under the License is distributed on an "AS IS" BASIS,
-  ~  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  ~  See the License for the specific language governing permissions and
-  ~  limitations under the License.
-  -->
-
-<configuration>
-  <!--
-  S3 is backed by a blobstore.
-  -->
-
-  <property>
-    <name>fs.contract.test.root-tests-enabled</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>fs.contract.test.random-seek-count</name>
-    <value>10</value>
-  </property>
-
-  <property>
-    <name>fs.contract.is-blobstore</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>fs.contract.is-case-sensitive</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>fs.contract.rename-returns-false-if-source-missing</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>fs.contract.supports-append</name>
-    <value>false</value>
-  </property>
-
-  <property>
-    <name>fs.contract.supports-atomic-directory-delete</name>
-    <value>false</value>
-  </property>
-
-  <property>
-    <name>fs.contract.supports-atomic-rename</name>
-    <value>false</value>
-  </property>
-
-  <property>
-    <name>fs.contract.supports-block-locality</name>
-    <value>false</value>
-  </property>
-
-  <property>
-    <name>fs.contract.supports-concat</name>
-    <value>false</value>
-  </property>
-
-  <property>
-    <name>fs.contract.supports-seek</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>fs.contract.supports-seek-on-closed-file</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>fs.contract.supports-available-on-closed-file</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>fs.contract.rejects-seek-past-eof</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>fs.contract.supports-strict-exceptions</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>fs.contract.supports-unix-permissions</name>
-    <value>false</value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96fa0f84/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json 
b/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json
index d7c219d..cc3dcea 100644
--- a/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json
+++ b/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json
@@ -4546,7 +4546,6 @@
     "hadoop.http.authentication.kerberos.keytab" : 
"${user.home}/hadoop.keytab",
     "yarn.nodemanager.keytab" : "/etc/krb5.keytab",
     "io.seqfile.sorter.recordlimit" : "1000000",
-    "s3.blocksize" : "67108864",
     "mapreduce.task.io.sort.factor" : "10",
     "yarn.nodemanager.disk-health-checker.interval-ms" : "120000",
     "mapreduce.job.working.dir" : "hdfs://a2115.smile.com:9820/user/jenkins",
@@ -4556,12 +4555,10 @@
     "dfs.namenode.delegation.token.renew-interval" : "86400000",
     "yarn.nodemanager.resource.memory-mb" : "8192",
     "io.map.index.interval" : "128",
-    "s3.client-write-packet-size" : "65536",
     "mapreduce.task.files.preserve.failedtasks" : "false",
     "dfs.namenode.http-address" : "a2115.smile.com:20101",
     "ha.zookeeper.session-timeout.ms" : "5000",
     "hadoop.hdfs.configuration.version" : "1",
-    "s3.replication" : "3",
     "dfs.datanode.balance.bandwidthPerSec" : "1048576",
     "mapreduce.reduce.shuffle.connect.timeout" : "180000",
     "hadoop.ssl.enabled" : "false",
@@ -4662,7 +4659,7 @@
     "mapreduce.shuffle.ssl.enabled" : "false",
     "dfs.namenode.invalidate.work.pct.per.iteration" : "0.32f",
     "dfs.blockreport.intervalMsec" : "21600000",
-    "fs.s3.sleepTimeSeconds" : "10",
+    "fs.s3n.sleepTimeSeconds" : "10",
     "dfs.namenode.replication.considerLoad" : "true",
     "dfs.client.block.write.retries" : "3",
     "hadoop.ssl.server.conf" : "ssl-server.xml",
@@ -4676,7 +4673,6 @@
     "dfs.replication" : "3",
     "ipc.client.tcpnodelay" : "false",
     "dfs.namenode.accesstime.precision" : "3600000",
-    "s3.stream-buffer-size" : "4096",
     "mapreduce.jobtracker.tasktracker.maxblacklists" : "4",
     "dfs.client.read.shortcircuit.skip.checksum" : "false",
     "mapreduce.job.jvm.numtasks" : "1",
@@ -4694,7 +4690,7 @@
     "kfs.stream-buffer-size" : "4096",
     "dfs.ha.tail-edits.period" : "60",
     "hadoop.security.authentication" : "simple",
-    "fs.s3.buffer.dir" : "${hadoop.tmp.dir}/s3",
+    "fs.s3n.buffer.dir" : "${hadoop.tmp.dir}/s3n",
     "rpc.engine.org.apache.hadoop.yarn.api.AMRMProtocolPB" : 
"org.apache.hadoop.ipc.ProtobufRpcEngine",
     "mapreduce.jobtracker.taskscheduler" : 
"org.apache.hadoop.mapred.JobQueueTaskScheduler",
     "yarn.app.mapreduce.am.job.task.listener.thread-count" : "30",
@@ -4776,7 +4772,7 @@
     "mapreduce.job.dir" : "/user/jenkins/.staging/job_1369942127770_1205",
     "io.map.index.skip" : "0",
     "net.topology.node.switch.mapping.impl" : 
"org.apache.hadoop.net.ScriptBasedMapping",
-    "fs.s3.maxRetries" : "4",
+    "fs.s3n.maxRetries" : "4",
     "ha.failover-controller.new-active.rpc-timeout.ms" : "60000",
     "s3native.client-write-packet-size" : "65536",
     "yarn.resourcemanager.amliveliness-monitor.interval-ms" : "1000",
@@ -4844,11 +4840,9 @@
     "ftp.bytes-per-checksum" : "512",
     "ipc.server.tcpnodelay" : "false",
     "dfs.namenode.stale.datanode.interval" : "30000",
-    "s3.bytes-per-checksum" : "512",
     "mapreduce.job.speculative.slowtaskthreshold" : "1.0",
     "yarn.nodemanager.localizer.cache.target-size-mb" : "10240",
     "yarn.nodemanager.remote-app-log-dir" : "/tmp/logs",
-    "fs.s3.block.size" : "67108864",
     "mapreduce.job.queuename" : "sls_queue_1",
     "dfs.client.failover.connection.retries" : "0",
     "hadoop.rpc.protection" : "authentication",
@@ -9649,7 +9643,6 @@
     "hadoop.http.authentication.kerberos.keytab" : 
"${user.home}/hadoop.keytab",
     "yarn.nodemanager.keytab" : "/etc/krb5.keytab",
     "io.seqfile.sorter.recordlimit" : "1000000",
-    "s3.blocksize" : "67108864",
     "mapreduce.task.io.sort.factor" : "10",
     "yarn.nodemanager.disk-health-checker.interval-ms" : "120000",
     "mapreduce.job.working.dir" : "hdfs://a2115.smile.com:9820/user/jenkins",
@@ -9659,12 +9652,10 @@
     "dfs.namenode.delegation.token.renew-interval" : "86400000",
     "yarn.nodemanager.resource.memory-mb" : "8192",
     "io.map.index.interval" : "128",
-    "s3.client-write-packet-size" : "65536",
     "mapreduce.task.files.preserve.failedtasks" : "false",
     "dfs.namenode.http-address" : "a2115.smile.com:20101",
     "ha.zookeeper.session-timeout.ms" : "5000",
     "hadoop.hdfs.configuration.version" : "1",
-    "s3.replication" : "3",
     "dfs.datanode.balance.bandwidthPerSec" : "1048576",
     "mapreduce.reduce.shuffle.connect.timeout" : "180000",
     "hadoop.ssl.enabled" : "false",
@@ -9765,7 +9756,7 @@
     "mapreduce.shuffle.ssl.enabled" : "false",
     "dfs.namenode.invalidate.work.pct.per.iteration" : "0.32f",
     "dfs.blockreport.intervalMsec" : "21600000",
-    "fs.s3.sleepTimeSeconds" : "10",
+    "fs.s3n.sleepTimeSeconds" : "10",
     "dfs.namenode.replication.considerLoad" : "true",
     "dfs.client.block.write.retries" : "3",
     "hadoop.ssl.server.conf" : "ssl-server.xml",
@@ -9779,7 +9770,6 @@
     "dfs.replication" : "3",
     "ipc.client.tcpnodelay" : "false",
     "dfs.namenode.accesstime.precision" : "3600000",
-    "s3.stream-buffer-size" : "4096",
     "mapreduce.jobtracker.tasktracker.maxblacklists" : "4",
     "dfs.client.read.shortcircuit.skip.checksum" : "false",
     "mapreduce.job.jvm.numtasks" : "1",
@@ -9797,7 +9787,7 @@
     "kfs.stream-buffer-size" : "4096",
     "dfs.ha.tail-edits.period" : "60",
     "hadoop.security.authentication" : "simple",
-    "fs.s3.buffer.dir" : "${hadoop.tmp.dir}/s3",
+    "fs.s3n.buffer.dir" : "${hadoop.tmp.dir}/s3",
     "rpc.engine.org.apache.hadoop.yarn.api.AMRMProtocolPB" : 
"org.apache.hadoop.ipc.ProtobufRpcEngine",
     "mapreduce.jobtracker.taskscheduler" : 
"org.apache.hadoop.mapred.JobQueueTaskScheduler",
     "yarn.app.mapreduce.am.job.task.listener.thread-count" : "30",
@@ -9879,7 +9869,7 @@
     "mapreduce.job.dir" : "/user/jenkins/.staging/job_1369942127770_1206",
     "io.map.index.skip" : "0",
     "net.topology.node.switch.mapping.impl" : 
"org.apache.hadoop.net.ScriptBasedMapping",
-    "fs.s3.maxRetries" : "4",
+    "fs.s3n.maxRetries" : "4",
     "ha.failover-controller.new-active.rpc-timeout.ms" : "60000",
     "s3native.client-write-packet-size" : "65536",
     "yarn.resourcemanager.amliveliness-monitor.interval-ms" : "1000",
@@ -9947,11 +9937,9 @@
     "ftp.bytes-per-checksum" : "512",
     "ipc.server.tcpnodelay" : "false",
     "dfs.namenode.stale.datanode.interval" : "30000",
-    "s3.bytes-per-checksum" : "512",
     "mapreduce.job.speculative.slowtaskthreshold" : "1.0",
     "yarn.nodemanager.localizer.cache.target-size-mb" : "10240",
     "yarn.nodemanager.remote-app-log-dir" : "/tmp/logs",
-    "fs.s3.block.size" : "67108864",
     "mapreduce.job.queuename" : "sls_queue_1",
     "dfs.client.failover.connection.retries" : "0",
     "hadoop.rpc.protection" : "authentication",
@@ -10252,7 +10240,6 @@
 "hadoop.http.authentication.kerberos.keytab" : "${user.home}/hadoop.keytab",
 "yarn.nodemanager.keytab" : "/etc/krb5.keytab",
 "io.seqfile.sorter.recordlimit" : "1000000",
-"s3.blocksize" : "67108864",
 "mapreduce.task.io.sort.factor" : "10",
 "yarn.nodemanager.disk-health-checker.interval-ms" : "120000",
 "mapreduce.job.working.dir" : "hdfs://a2115.smile.com:9820/user/jenkins",
@@ -10262,12 +10249,10 @@
 "dfs.namenode.delegation.token.renew-interval" : "86400000",
 "yarn.nodemanager.resource.memory-mb" : "8192",
 "io.map.index.interval" : "128",
-"s3.client-write-packet-size" : "65536",
 "mapreduce.task.files.preserve.failedtasks" : "false",
 "dfs.namenode.http-address" : "a2115.smile.com:20101",
 "ha.zookeeper.session-timeout.ms" : "5000",
 "hadoop.hdfs.configuration.version" : "1",
-"s3.replication" : "3",
 "dfs.datanode.balance.bandwidthPerSec" : "1048576",
 "mapreduce.reduce.shuffle.connect.timeout" : "180000",
 "hadoop.ssl.enabled" : "false",
@@ -10369,7 +10354,7 @@
 "mapreduce.shuffle.ssl.enabled" : "false",
 "dfs.namenode.invalidate.work.pct.per.iteration" : "0.32f",
 "dfs.blockreport.intervalMsec" : "21600000",
-"fs.s3.sleepTimeSeconds" : "10",
+"fs.s3n.sleepTimeSeconds" : "10",
 "dfs.namenode.replication.considerLoad" : "true",
 "dfs.client.block.write.retries" : "3",
 "hadoop.ssl.server.conf" : "ssl-server.xml",
@@ -10383,7 +10368,6 @@
 "dfs.replication" : "3",
 "ipc.client.tcpnodelay" : "false",
 "dfs.namenode.accesstime.precision" : "3600000",
-"s3.stream-buffer-size" : "4096",
 "mapreduce.jobtracker.tasktracker.maxblacklists" : "4",
 "dfs.client.read.shortcircuit.skip.checksum" : "false",
 "mapreduce.job.jvm.numtasks" : "1",
@@ -10401,7 +10385,7 @@
 "kfs.stream-buffer-size" : "4096",
 "dfs.ha.tail-edits.period" : "60",
 "hadoop.security.authentication" : "simple",
-"fs.s3.buffer.dir" : "${hadoop.tmp.dir}/s3",
+"fs.s3n.buffer.dir" : "${hadoop.tmp.dir}/s3n",
 "rpc.engine.org.apache.hadoop.yarn.api.AMRMProtocolPB" : 
"org.apache.hadoop.ipc.ProtobufRpcEngine",
 "mapreduce.jobtracker.taskscheduler" : 
"org.apache.hadoop.mapred.JobQueueTaskScheduler",
 "yarn.app.mapreduce.am.job.task.listener.thread-count" : "30",
@@ -10483,7 +10467,7 @@
 "mapreduce.job.dir" : "/user/jenkins/.staging/job_1369942127770_1207",
 "io.map.index.skip" : "0",
 "net.topology.node.switch.mapping.impl" : 
"org.apache.hadoop.net.ScriptBasedMapping",
-"fs.s3.maxRetries" : "4",
+"fs.s3n.maxRetries" : "4",
 "ha.failover-controller.new-active.rpc-timeout.ms" : "60000",
 "s3native.client-write-packet-size" : "65536",
 "yarn.resourcemanager.amliveliness-monitor.interval-ms" : "1000",
@@ -10551,11 +10535,9 @@
 "ftp.bytes-per-checksum" : "512",
 "ipc.server.tcpnodelay" : "false",
 "dfs.namenode.stale.datanode.interval" : "30000",
-"s3.bytes-per-checksum" : "512",
 "mapreduce.job.speculative.slowtaskthreshold" : "1.0",
 "yarn.nodemanager.localizer.cache.target-size-mb" : "10240",
 "yarn.nodemanager.remote-app-log-dir" : "/tmp/logs",
-"fs.s3.block.size" : "67108864",
 "mapreduce.job.queuename" : "sls_queue_1",
 "dfs.client.failover.connection.retries" : "0",
 "hadoop.rpc.protection" : "authentication",


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to