http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ec7fcd9/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
----------------------------------------------------------------------
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
new file mode 100644
index 0000000..f6f9ae9
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
@@ -0,0 +1,233 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3native;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.URI;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystemContractBaseTest;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.s3native.NativeS3FileSystem.NativeS3FsInputStream;
+
+public abstract class NativeS3FileSystemContractBaseTest
+  extends FileSystemContractBaseTest {
+  
+  private NativeFileSystemStore store;
+  
+  abstract NativeFileSystemStore getNativeFileSystemStore() throws IOException;
+
+  @Override
+  protected void setUp() throws Exception {
+    Configuration conf = new Configuration();
+    store = getNativeFileSystemStore();
+    fs = new NativeS3FileSystem(store);
+    fs.initialize(URI.create(conf.get("test.fs.s3n.name")), conf);
+  }
+  
+  @Override
+  protected void tearDown() throws Exception {
+    store.purge("test");
+    super.tearDown();
+  }
+
+  public void testCanonicalName() throws Exception {
+    assertNull("s3n doesn't support security token and shouldn't have 
canonical name",
+               fs.getCanonicalServiceName());
+  }
+
+  public void testListStatusForRoot() throws Exception {
+    FileStatus[] paths = fs.listStatus(path("/"));
+    assertEquals("Root directory is not empty; ", 0, paths.length);
+    
+    Path testDir = path("/test");
+    assertTrue(fs.mkdirs(testDir));
+    
+    paths = fs.listStatus(path("/"));
+    assertEquals(1, paths.length);
+    assertEquals(path("/test"), paths[0].getPath());
+  }
+
+  public void testNoTrailingBackslashOnBucket() throws Exception {
+    assertTrue(fs.getFileStatus(new 
Path(fs.getUri().toString())).isDirectory());
+  }
+
+  private void createTestFiles(String base) throws IOException {
+    store.storeEmptyFile(base + "/file1");
+    store.storeEmptyFile(base + "/dir/file2");
+    store.storeEmptyFile(base + "/dir/file3");
+  }
+
+  public void testDirWithDifferentMarkersWorks() throws Exception {
+
+    for (int i = 0; i < 3; i++) {
+      String base = "test/hadoop" + i;
+      Path path = path("/" + base);
+
+      createTestFiles(base);
+
+      if (i == 0 ) {
+        //do nothing, we are testing correctness with no markers
+      }
+      else if (i == 1) {
+        // test for _$folder$ marker
+        store.storeEmptyFile(base + "_$folder$");
+        store.storeEmptyFile(base + "/dir_$folder$");
+      }
+      else if (i == 2) {
+        // test the end slash file marker
+        store.storeEmptyFile(base + "/");
+        store.storeEmptyFile(base + "/dir/");
+      }
+      else if (i == 3) {
+        // test both markers
+        store.storeEmptyFile(base + "_$folder$");
+        store.storeEmptyFile(base + "/dir_$folder$");
+        store.storeEmptyFile(base + "/");
+        store.storeEmptyFile(base + "/dir/");
+      }
+
+      assertTrue(fs.getFileStatus(path).isDirectory());
+      assertEquals(2, fs.listStatus(path).length);
+    }
+  }
+
+  public void testDeleteWithNoMarker() throws Exception {
+    String base = "test/hadoop";
+    Path path = path("/" + base);
+
+    createTestFiles(base);
+
+    fs.delete(path, true);
+
+    path = path("/test");
+    assertTrue(fs.getFileStatus(path).isDirectory());
+    assertEquals(0, fs.listStatus(path).length);
+  }
+
+  public void testRenameWithNoMarker() throws Exception {
+    String base = "test/hadoop";
+    Path dest = path("/test/hadoop2");
+
+    createTestFiles(base);
+
+    fs.rename(path("/" + base), dest);
+
+    Path path = path("/test");
+    assertTrue(fs.getFileStatus(path).isDirectory());
+    assertEquals(1, fs.listStatus(path).length);
+    assertTrue(fs.getFileStatus(dest).isDirectory());
+    assertEquals(2, fs.listStatus(dest).length);
+  }
+
+  public void testEmptyFile() throws Exception {
+    store.storeEmptyFile("test/hadoop/file1");
+    fs.open(path("/test/hadoop/file1")).close();
+  }
+  
+  public void testBlockSize() throws Exception {
+    Path file = path("/test/hadoop/file");
+    createFile(file);
+    assertEquals("Default block size", fs.getDefaultBlockSize(file),
+    fs.getFileStatus(file).getBlockSize());
+
+    // Block size is determined at read time
+    long newBlockSize = fs.getDefaultBlockSize(file) * 2;
+    fs.getConf().setLong("fs.s3n.block.size", newBlockSize);
+    assertEquals("Double default block size", newBlockSize,
+    fs.getFileStatus(file).getBlockSize());
+  }
+  
+  public void testRetryOnIoException() throws Exception {
+    class TestInputStream extends InputStream {
+      boolean shouldThrow = false;
+      int throwCount = 0;
+      int pos = 0;
+      byte[] bytes;
+      
+      public TestInputStream() {
+        bytes = new byte[256];
+        for (int i = 0; i < 256; i++) {
+          bytes[i] = (byte)i;
+        }
+      }
+      
+      @Override
+      public int read() throws IOException {
+        shouldThrow = !shouldThrow;
+        if (shouldThrow) {
+          throwCount++;
+          throw new IOException();
+        }
+        return pos++;
+      }
+      
+      @Override
+      public int read(byte[] b, int off, int len) throws IOException {
+        shouldThrow = !shouldThrow;
+        if (shouldThrow) {
+          throwCount++;
+          throw new IOException();
+        }
+        
+        int sizeToRead = Math.min(len, 256 - pos);
+        for (int i = 0; i < sizeToRead; i++) {
+          b[i] = bytes[pos + i];
+        }
+        pos += sizeToRead;
+        return sizeToRead;
+      }
+    }
+    
+    final InputStream is = new TestInputStream();
+    
+    class MockNativeFileSystemStore extends Jets3tNativeFileSystemStore {
+      @Override
+      public InputStream retrieve(String key, long byteRangeStart) throws 
IOException {
+        return is;
+      }
+    }
+    
+    NativeS3FsInputStream stream = new NativeS3FsInputStream(new 
MockNativeFileSystemStore(), null, is, "");
+    
+    // Test reading methods.
+    byte[] result = new byte[256];
+    for (int i = 0; i < 128; i++) {
+      result[i] = (byte)stream.read();
+    }
+    for (int i = 128; i < 256; i += 8) {
+      byte[] temp = new byte[8];
+      int read = stream.read(temp, 0, 8);
+      assertEquals(8, read);
+      System.arraycopy(temp, 0, result, i, 8);
+    }
+    
+    // Assert correct
+    for (int i = 0; i < 256; i++) {
+      assertEquals((byte)i, result[i]);
+    }
+    
+    // Test to make sure the throw path was exercised.
+    // 144 = 128 + (128 / 8)
+    assertEquals(144, ((TestInputStream)is).throwCount);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ec7fcd9/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/S3NInMemoryFileSystem.java
----------------------------------------------------------------------
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/S3NInMemoryFileSystem.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/S3NInMemoryFileSystem.java
new file mode 100644
index 0000000..c0ea85b
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/S3NInMemoryFileSystem.java
@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3native;
+
+import org.apache.hadoop.fs.s3native.NativeS3FileSystem;
+import org.apache.hadoop.fs.s3native.InMemoryNativeFileSystemStore;
+
+/**
+ * A helper implementation of {@link NativeS3FileSystem}
+ * without actually connecting to S3 for unit testing.
+ */
+public class S3NInMemoryFileSystem extends NativeS3FileSystem {
+    public S3NInMemoryFileSystem() {
+        super(new InMemoryNativeFileSystemStore());
+    }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ec7fcd9/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestInMemoryNativeS3FileSystemContract.java
----------------------------------------------------------------------
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestInMemoryNativeS3FileSystemContract.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestInMemoryNativeS3FileSystemContract.java
new file mode 100644
index 0000000..664d39e
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestInMemoryNativeS3FileSystemContract.java
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3native;
+
+import java.io.IOException;
+
+public class TestInMemoryNativeS3FileSystemContract
+  extends NativeS3FileSystemContractBaseTest {
+
+  @Override
+  NativeFileSystemStore getNativeFileSystemStore() throws IOException {
+    return new InMemoryNativeFileSystemStore();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ec7fcd9/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestJets3tNativeFileSystemStore.java
----------------------------------------------------------------------
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestJets3tNativeFileSystemStore.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestJets3tNativeFileSystemStore.java
new file mode 100644
index 0000000..b1078a4
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestJets3tNativeFileSystemStore.java
@@ -0,0 +1,126 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3native;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+
+import static org.junit.Assert.*;
+import static org.junit.Assume.*;
+
+import org.junit.Before;
+import org.junit.After;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.URI;
+import java.security.DigestInputStream;
+import java.security.DigestOutputStream;
+import java.security.MessageDigest;
+import java.security.NoSuchAlgorithmException;
+
+
+public class TestJets3tNativeFileSystemStore {
+  private Configuration conf;
+  private Jets3tNativeFileSystemStore store;
+  private NativeS3FileSystem fs;
+
+  @Before
+  public void setUp() throws Exception {
+    conf = new Configuration();
+    store = new Jets3tNativeFileSystemStore();
+    fs = new NativeS3FileSystem(store);
+    conf.setBoolean("fs.s3n.multipart.uploads.enabled", true);
+    conf.setLong("fs.s3n.multipart.uploads.block.size", 64 * 1024 * 1024);
+    fs.initialize(URI.create(conf.get("test.fs.s3n.name")), conf);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    try {
+      store.purge("test");
+    } catch (Exception e) {}
+  }
+
+  @BeforeClass
+  public static void checkSettings() throws Exception {
+    Configuration conf = new Configuration();
+    assumeNotNull(conf.get("fs.s3n.awsAccessKeyId"));
+    assumeNotNull(conf.get("fs.s3n.awsSecretAccessKey"));
+    assumeNotNull(conf.get("test.fs.s3n.name"));
+  }
+
+  protected void writeRenameReadCompare(Path path, long len)
+      throws IOException, NoSuchAlgorithmException {
+    // If len > fs.s3n.multipart.uploads.block.size,
+    // we'll use a multipart upload copy
+    MessageDigest digest = MessageDigest.getInstance("MD5");
+    OutputStream out = new BufferedOutputStream(
+        new DigestOutputStream(fs.create(path, false), digest));
+    for (long i = 0; i < len; i++) {
+      out.write('Q');
+    }
+    out.flush();
+    out.close();
+
+    assertTrue("Exists", fs.exists(path));
+
+    // Depending on if this file is over 5 GB or not,
+    // rename will cause a multipart upload copy
+    Path copyPath = path.suffix(".copy");
+    fs.rename(path, copyPath);
+
+    assertTrue("Copy exists", fs.exists(copyPath));
+
+    // Download file from S3 and compare the digest against the original
+    MessageDigest digest2 = MessageDigest.getInstance("MD5");
+    InputStream in = new BufferedInputStream(
+        new DigestInputStream(fs.open(copyPath), digest2));
+    long copyLen = 0;
+    while (in.read() != -1) {copyLen++;}
+    in.close();
+
+    assertEquals("Copy length matches original", len, copyLen);
+    assertArrayEquals("Digests match", digest.digest(), digest2.digest());
+  }
+
+  @Test
+  public void testSmallUpload() throws IOException, NoSuchAlgorithmException {
+    // Regular upload, regular copy
+    writeRenameReadCompare(new Path("/test/small"), 16384);
+  }
+
+  @Test
+  public void testMediumUpload() throws IOException, NoSuchAlgorithmException {
+    // Multipart upload, regular copy
+    writeRenameReadCompare(new Path("/test/medium"), 33554432);    // 100 MB
+  }
+
+  @Test
+  public void testExtraLargeUpload()
+      throws IOException, NoSuchAlgorithmException {
+    // Multipart upload, multipart copy
+    writeRenameReadCompare(new Path("/test/xlarge"), 5368709121L); // 5GB+1byte
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ec7fcd9/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestS3NInMemoryFileSystem.java
----------------------------------------------------------------------
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestS3NInMemoryFileSystem.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestS3NInMemoryFileSystem.java
new file mode 100644
index 0000000..b457df2
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestS3NInMemoryFileSystem.java
@@ -0,0 +1,69 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.s3native;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.net.URI;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.Path;
+
+public class TestS3NInMemoryFileSystem extends TestCase {
+
+  private static final String TEST_PATH = "s3n://test/data.txt";
+  
+  private static final String TEST_DATA = "Sample data for testing.";
+  
+  private S3NInMemoryFileSystem fs;
+  
+  @Override
+  public void setUp() throws IOException {
+    fs = new S3NInMemoryFileSystem();
+    fs.initialize(URI.create("s3n://test/"), new Configuration());
+  }
+ 
+  public void testBasicReadWriteIO() throws IOException {
+    FSDataOutputStream writeData = fs.create(new Path(TEST_PATH));
+    writeData.write(TEST_DATA.getBytes());
+    writeData.flush();
+    writeData.close();
+    
+    FSDataInputStream readData = fs.open(new Path(TEST_PATH));
+    BufferedReader br = new BufferedReader(new InputStreamReader(readData));
+    String line = "";
+    StringBuffer stringBuffer = new StringBuffer();
+    while ((line = br.readLine()) != null) {
+        stringBuffer.append(line);
+    }
+    br.close();
+    
+    assert(TEST_DATA.equals(stringBuffer.toString()));
+  }
+  
+  @Override
+  public void tearDown() throws IOException {
+    fs.close();  
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ec7fcd9/hadoop-tools/hadoop-aws/src/test/resources/contract/s3n.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/resources/contract/s3n.xml 
b/hadoop-tools/hadoop-aws/src/test/resources/contract/s3n.xml
new file mode 100644
index 0000000..ab46178
--- /dev/null
+++ b/hadoop-tools/hadoop-aws/src/test/resources/contract/s3n.xml
@@ -0,0 +1,95 @@
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~  or more contributor license agreements.  See the NOTICE file
+  ~  distributed with this work for additional information
+  ~  regarding copyright ownership.  The ASF licenses this file
+  ~  to you under the Apache License, Version 2.0 (the
+  ~  "License"); you may not use this file except in compliance
+  ~  with the License.  You may obtain a copy of the License at
+  ~
+  ~       http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~  Unless required by applicable law or agreed to in writing, software
+  ~  distributed under the License is distributed on an "AS IS" BASIS,
+  ~  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~  See the License for the specific language governing permissions and
+  ~  limitations under the License.
+  -->
+
+<configuration>
+  <!--
+  S3N is a blobstore, with very different behavior than a
+  classic filesystem.
+  -->
+
+  <property>
+    <name>fs.contract.test.root-tests-enabled</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>fs.contract.test.random-seek-count</name>
+    <value>10</value>
+  </property>
+
+  <property>
+    <name>fs.contract.is-blobstore</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>fs.contract.is-case-sensitive</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>fs.contract.rename-returns-false-if-source-missing</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>fs.contract.supports-append</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <name>fs.contract.supports-atomic-directory-delete</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <name>fs.contract.supports-atomic-rename</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <name>fs.contract.supports-block-locality</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <name>fs.contract.supports-concat</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <name>fs.contract.supports-seek</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>fs.contract.rejects-seek-past-eof</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>fs.contract.supports-strict-exceptions</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>fs.contract.supports-unix-permissions</name>
+    <value>false</value>
+  </property>
+
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ec7fcd9/hadoop-tools/hadoop-tools-dist/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-tools-dist/pom.xml 
b/hadoop-tools/hadoop-tools-dist/pom.xml
index 38043f7..7a01085 100644
--- a/hadoop-tools/hadoop-tools-dist/pom.xml
+++ b/hadoop-tools/hadoop-tools-dist/pom.xml
@@ -85,6 +85,12 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-aws</artifactId>
+      <scope>compile</scope>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-azure</artifactId>
       <scope>compile</scope>
       <version>${project.version}</version>

Reply via email to