http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fbcf763/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrencyLive.java ---------------------------------------------------------------------- diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrencyLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrencyLive.java deleted file mode 100644 index 6c28158..0000000 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrencyLive.java +++ /dev/null @@ -1,187 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.azure; - - -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.junit.Assert; -import org.junit.Test; - -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.Callable; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; - -/*** - * Test class to hold all Live Azure storage concurrency tests. - */ -public class TestNativeAzureFileSystemConcurrencyLive - extends AbstractWasbTestBase { - - private static final int THREAD_COUNT = 102; - private static final int TEST_EXECUTION_TIMEOUT = 5000; - - @Override - protected AzureBlobStorageTestAccount createTestAccount() throws Exception { - return AzureBlobStorageTestAccount.create(); - } - - /** - * Validate contract for FileSystem.create when overwrite is true and there - * are concurrent callers of FileSystem.delete. An existing file should be - * overwritten, even if the original destination exists but is deleted by an - * external agent during the create operation. - */ - @Test(timeout = TEST_EXECUTION_TIMEOUT) - public void testConcurrentCreateDeleteFile() throws Exception { - Path testFile = new Path("test.dat"); - fs.create(testFile).close(); - - List<CreateFileTask> tasks = new ArrayList<>(THREAD_COUNT); - - for (int i = 0; i < THREAD_COUNT; i++) { - tasks.add(new CreateFileTask(fs, testFile)); - } - - ExecutorService es = null; - - try { - es = Executors.newFixedThreadPool(THREAD_COUNT); - - List<Future<Void>> futures = es.invokeAll(tasks); - - for (Future<Void> future : futures) { - Assert.assertTrue(future.isDone()); - - // we are using Callable<V>, so if an exception - // occurred during the operation, it will be thrown - // when we call get - Assert.assertEquals(null, future.get()); - } - } finally { - if (es != null) { - es.shutdownNow(); - } - } - } - - /** - * Validate contract for FileSystem.delete when invoked concurrently. - * One of the threads should successfully delete the file and return true; - * all other threads should return false. - */ - @Test(timeout = TEST_EXECUTION_TIMEOUT) - public void testConcurrentDeleteFile() throws Exception { - Path testFile = new Path("test.dat"); - fs.create(testFile).close(); - - List<DeleteFileTask> tasks = new ArrayList<>(THREAD_COUNT); - - for (int i = 0; i < THREAD_COUNT; i++) { - tasks.add(new DeleteFileTask(fs, testFile)); - } - - ExecutorService es = null; - try { - es = Executors.newFixedThreadPool(THREAD_COUNT); - - List<Future<Boolean>> futures = es.invokeAll(tasks); - - int successCount = 0; - for (Future<Boolean> future : futures) { - Assert.assertTrue(future.isDone()); - - // we are using Callable<V>, so if an exception - // occurred during the operation, it will be thrown - // when we call get - Boolean success = future.get(); - if (success) { - successCount++; - } - } - - Assert.assertEquals( - "Exactly one delete operation should return true.", - 1, - successCount); - } finally { - if (es != null) { - es.shutdownNow(); - } - } - } - - - abstract class FileSystemTask<V> implements Callable<V> { - private final FileSystem fileSystem; - private final Path path; - - protected FileSystem getFileSystem() { - return this.fileSystem; - } - - protected Path getFilePath() { - return this.path; - } - - FileSystemTask(FileSystem fs, Path p) { - this.fileSystem = fs; - this.path = p; - } - - public abstract V call() throws Exception; - } - - class DeleteFileTask extends FileSystemTask<Boolean> { - - DeleteFileTask(FileSystem fs, Path p) { - super(fs, p); - } - - @Override - public Boolean call() throws Exception { - return this.getFileSystem().delete(this.getFilePath(), false); - } - } - - class CreateFileTask extends FileSystemTask<Void> { - CreateFileTask(FileSystem fs, Path p) { - super(fs, p); - } - - public Void call() throws Exception { - FileSystem fs = getFileSystem(); - Path p = getFilePath(); - - // Create an empty file and close the stream. - FSDataOutputStream stream = fs.create(p, true); - stream.close(); - - // Delete the file. We don't care if delete returns true or false. - // We just want to ensure the file does not exist. - this.getFileSystem().delete(this.getFilePath(), false); - - return null; - } - } -} \ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fbcf763/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractEmulator.java ---------------------------------------------------------------------- diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractEmulator.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractEmulator.java deleted file mode 100644 index b4a71f6..0000000 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractEmulator.java +++ /dev/null @@ -1,50 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.azure; - -import org.apache.hadoop.fs.FileSystemContractBaseTest; - -public class TestNativeAzureFileSystemContractEmulator extends - FileSystemContractBaseTest { - private AzureBlobStorageTestAccount testAccount; - - @Override - protected void setUp() throws Exception { - testAccount = AzureBlobStorageTestAccount.createForEmulator(); - if (testAccount != null) { - fs = testAccount.getFileSystem(); - } - } - - @Override - protected void tearDown() throws Exception { - if (testAccount != null) { - testAccount.cleanup(); - testAccount = null; - fs = null; - } - } - - @Override - protected void runTest() throws Throwable { - if (testAccount != null) { - super.runTest(); - } - } -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fbcf763/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractLive.java ---------------------------------------------------------------------- diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractLive.java deleted file mode 100644 index 0d7b9ad..0000000 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractLive.java +++ /dev/null @@ -1,76 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.azure; - -import org.apache.hadoop.fs.FileSystemContractBaseTest; -import org.junit.Ignore; - -public class TestNativeAzureFileSystemContractLive extends - FileSystemContractBaseTest { - private AzureBlobStorageTestAccount testAccount; - - @Override - protected void setUp() throws Exception { - testAccount = AzureBlobStorageTestAccount.create(); - if (testAccount != null) { - fs = testAccount.getFileSystem(); - } - } - - @Override - protected void tearDown() throws Exception { - if (testAccount != null) { - testAccount.cleanup(); - testAccount = null; - fs = null; - } - } - - @Override - protected void runTest() throws Throwable { - if (testAccount != null) { - super.runTest(); - } - } - - /** - * The following tests are failing on Azure and the Azure - * file system code needs to be modified to make them pass. - * A separate work item has been opened for this. - */ - @Ignore - public void testMoveFileUnderParent() throws Throwable { - } - - @Ignore - public void testRenameFileToSelf() throws Throwable { - } - - @Ignore - public void testRenameChildDirForbidden() throws Exception { - } - - @Ignore - public void testMoveDirUnderParent() throws Throwable { - } - - @Ignore - public void testRenameDirToSelf() throws Throwable { - } -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fbcf763/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractMocked.java ---------------------------------------------------------------------- diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractMocked.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractMocked.java index 6d3df25..ab53adc 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractMocked.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractMocked.java @@ -21,6 +21,9 @@ package org.apache.hadoop.fs.azure; import org.apache.hadoop.fs.FileSystemContractBaseTest; import org.junit.Ignore; +/** + * Mocked testing of FileSystemContractBaseTest. + */ public class TestNativeAzureFileSystemContractMocked extends FileSystemContractBaseTest { http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fbcf763/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractPageBlobLive.java ---------------------------------------------------------------------- diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractPageBlobLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractPageBlobLive.java deleted file mode 100644 index 3c3b782..0000000 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractPageBlobLive.java +++ /dev/null @@ -1,90 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.azure; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystemContractBaseTest; -import org.junit.Ignore; - -public class TestNativeAzureFileSystemContractPageBlobLive extends - FileSystemContractBaseTest { - private AzureBlobStorageTestAccount testAccount; - - private AzureBlobStorageTestAccount createTestAccount() - throws Exception { - Configuration conf = new Configuration(); - - // Configure the page blob directories key so every file created is a page blob. - conf.set(AzureNativeFileSystemStore.KEY_PAGE_BLOB_DIRECTORIES, "/"); - - // Configure the atomic rename directories key so every folder will have - // atomic rename applied. - conf.set(AzureNativeFileSystemStore.KEY_ATOMIC_RENAME_DIRECTORIES, "/"); - return AzureBlobStorageTestAccount.create(conf); - } - - @Override - protected void setUp() throws Exception { - testAccount = createTestAccount(); - if (testAccount != null) { - fs = testAccount.getFileSystem(); - } - } - - @Override - protected void tearDown() throws Exception { - if (testAccount != null) { - testAccount.cleanup(); - testAccount = null; - fs = null; - } - } - - @Override - protected void runTest() throws Throwable { - if (testAccount != null) { - super.runTest(); - } - } - - /** - * The following tests are failing on Azure and the Azure - * file system code needs to be modified to make them pass. - * A separate work item has been opened for this. - */ - @Ignore - public void testMoveFileUnderParent() throws Throwable { - } - - @Ignore - public void testRenameFileToSelf() throws Throwable { - } - - @Ignore - public void testRenameChildDirForbidden() throws Exception { - } - - @Ignore - public void testMoveDirUnderParent() throws Throwable { - } - - @Ignore - public void testRenameDirToSelf() throws Throwable { - } -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fbcf763/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemFileNameCheck.java ---------------------------------------------------------------------- diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemFileNameCheck.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemFileNameCheck.java index 82eabaa..0dfbb37 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemFileNameCheck.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemFileNameCheck.java @@ -18,17 +18,11 @@ package org.apache.hadoop.fs.azure; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - import java.io.IOException; import java.util.HashMap; -import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.junit.After; -import org.junit.Before; + import org.junit.Test; /** @@ -38,24 +32,18 @@ import org.junit.Test; * creation/rename of files/directories through WASB that have colons in the * names. */ -public class TestNativeAzureFileSystemFileNameCheck { - private FileSystem fs = null; - private AzureBlobStorageTestAccount testAccount = null; +public class TestNativeAzureFileSystemFileNameCheck extends AbstractWasbTestBase { private String root = null; - @Before + @Override public void setUp() throws Exception { - testAccount = AzureBlobStorageTestAccount.createMock(); - fs = testAccount.getFileSystem(); + super.setUp(); root = fs.getUri().toString(); } - @After - public void tearDown() throws Exception { - testAccount.cleanup(); - root = null; - fs = null; - testAccount = null; + @Override + protected AzureBlobStorageTestAccount createTestAccount() throws Exception { + return AzureBlobStorageTestAccount.createMock(); } @Test @@ -138,4 +126,4 @@ public class TestNativeAzureFileSystemFileNameCheck { fsck.run(new String[] { p.toString() }); return fsck.getPathNameWarning(); } -} \ No newline at end of file +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fbcf763/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemLive.java ---------------------------------------------------------------------- diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemLive.java deleted file mode 100644 index 6baba33..0000000 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemLive.java +++ /dev/null @@ -1,242 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.azure; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -import java.util.concurrent.CountDownLatch; - -import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.permission.FsPermission; - -import org.junit.Test; - -import com.microsoft.azure.storage.StorageException; - -/* - * Tests the Native Azure file system (WASB) against an actual blob store if - * provided in the environment. - */ -public class TestNativeAzureFileSystemLive extends - NativeAzureFileSystemBaseTest { - - @Override - protected AzureBlobStorageTestAccount createTestAccount() throws Exception { - return AzureBlobStorageTestAccount.create(); - } - - @Test - public void testLazyRenamePendingCanOverwriteExistingFile() - throws Exception { - final String SRC_FILE_KEY = "srcFile"; - final String DST_FILE_KEY = "dstFile"; - Path srcPath = new Path(SRC_FILE_KEY); - FSDataOutputStream srcStream = fs.create(srcPath); - assertTrue(fs.exists(srcPath)); - Path dstPath = new Path(DST_FILE_KEY); - FSDataOutputStream dstStream = fs.create(dstPath); - assertTrue(fs.exists(dstPath)); - NativeAzureFileSystem nfs = (NativeAzureFileSystem)fs; - final String fullSrcKey = nfs.pathToKey(nfs.makeAbsolute(srcPath)); - final String fullDstKey = nfs.pathToKey(nfs.makeAbsolute(dstPath)); - nfs.getStoreInterface().rename(fullSrcKey, fullDstKey, true, null); - assertTrue(fs.exists(dstPath)); - assertFalse(fs.exists(srcPath)); - IOUtils.cleanup(null, srcStream); - IOUtils.cleanup(null, dstStream); - } - /** - * Tests fs.delete() function to delete a blob when another blob is holding a - * lease on it. Delete if called without a lease should fail if another process - * is holding a lease and throw appropriate exception - * This is a scenario that would happen in HMaster startup when it tries to - * clean up the temp dirs while the HMaster process which was killed earlier - * held lease on the blob when doing some DDL operation - */ - @Test - public void testDeleteThrowsExceptionWithLeaseExistsErrorMessage() - throws Exception { - LOG.info("Starting test"); - final String FILE_KEY = "fileWithLease"; - // Create the file - Path path = new Path(FILE_KEY); - fs.create(path); - assertTrue(fs.exists(path)); - NativeAzureFileSystem nfs = (NativeAzureFileSystem)fs; - final String fullKey = nfs.pathToKey(nfs.makeAbsolute(path)); - final AzureNativeFileSystemStore store = nfs.getStore(); - - // Acquire the lease on the file in a background thread - final CountDownLatch leaseAttemptComplete = new CountDownLatch(1); - final CountDownLatch beginningDeleteAttempt = new CountDownLatch(1); - Thread t = new Thread() { - @Override - public void run() { - // Acquire the lease and then signal the main test thread. - SelfRenewingLease lease = null; - try { - lease = store.acquireLease(fullKey); - LOG.info("Lease acquired: " + lease.getLeaseID()); - } catch (AzureException e) { - LOG.warn("Lease acqusition thread unable to acquire lease", e); - } finally { - leaseAttemptComplete.countDown(); - } - - // Wait for the main test thread to signal it will attempt the delete. - try { - beginningDeleteAttempt.await(); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - - // Keep holding the lease past the lease acquisition retry interval, so - // the test covers the case of delete retrying to acquire the lease. - try { - Thread.sleep(SelfRenewingLease.LEASE_ACQUIRE_RETRY_INTERVAL * 3); - } catch (InterruptedException ex) { - Thread.currentThread().interrupt(); - } - - try { - if (lease != null){ - LOG.info("Freeing lease"); - lease.free(); - } - } catch (StorageException se) { - LOG.warn("Unable to free lease.", se); - } - } - }; - - // Start the background thread and wait for it to signal the lease is held. - t.start(); - try { - leaseAttemptComplete.await(); - } catch (InterruptedException ex) { - Thread.currentThread().interrupt(); - } - - // Try to delete the same file - beginningDeleteAttempt.countDown(); - store.delete(fullKey); - - // At this point file SHOULD BE DELETED - assertFalse(fs.exists(path)); - } - - /** - * Check that isPageBlobKey works as expected. This assumes that - * in the test configuration, the list of supported page blob directories - * only includes "pageBlobs". That's why this test is made specific - * to this subclass. - */ - @Test - public void testIsPageBlobKey() { - AzureNativeFileSystemStore store = ((NativeAzureFileSystem) fs).getStore(); - - // Use literal strings so it's easier to understand the tests. - // In case the constant changes, we want to know about it so we can update this test. - assertEquals(AzureBlobStorageTestAccount.DEFAULT_PAGE_BLOB_DIRECTORY, "pageBlobs"); - - // URI prefix for test environment. - String uriPrefix = "file:///"; - - // negative tests - String[] negativeKeys = { "", "/", "bar", "bar/", "bar/pageBlobs", "bar/pageBlobs/foo", - "bar/pageBlobs/foo/", "/pageBlobs/", "/pageBlobs", "pageBlobs", "pageBlobsxyz/" }; - for (String s : negativeKeys) { - assertFalse(store.isPageBlobKey(s)); - assertFalse(store.isPageBlobKey(uriPrefix + s)); - } - - // positive tests - String[] positiveKeys = { "pageBlobs/", "pageBlobs/foo/", "pageBlobs/foo/bar/" }; - for (String s : positiveKeys) { - assertTrue(store.isPageBlobKey(s)); - assertTrue(store.isPageBlobKey(uriPrefix + s)); - } - } - - /** - * Test that isAtomicRenameKey() works as expected. - */ - @Test - public void testIsAtomicRenameKey() { - - AzureNativeFileSystemStore store = ((NativeAzureFileSystem) fs).getStore(); - - // We want to know if the default configuration changes so we can fix - // this test. - assertEquals(AzureBlobStorageTestAccount.DEFAULT_ATOMIC_RENAME_DIRECTORIES, - "/atomicRenameDir1,/atomicRenameDir2"); - - // URI prefix for test environment. - String uriPrefix = "file:///"; - - // negative tests - String[] negativeKeys = { "", "/", "bar", "bar/", "bar/hbase", - "bar/hbase/foo", "bar/hbase/foo/", "/hbase/", "/hbase", "hbase", - "hbasexyz/", "foo/atomicRenameDir1/"}; - for (String s : negativeKeys) { - assertFalse(store.isAtomicRenameKey(s)); - assertFalse(store.isAtomicRenameKey(uriPrefix + s)); - } - - // Positive tests. The directories for atomic rename are /hbase - // plus the ones in the configuration (DEFAULT_ATOMIC_RENAME_DIRECTORIES - // for this test). - String[] positiveKeys = { "hbase/", "hbase/foo/", "hbase/foo/bar/", - "atomicRenameDir1/foo/", "atomicRenameDir2/bar/"}; - for (String s : positiveKeys) { - assertTrue(store.isAtomicRenameKey(s)); - assertTrue(store.isAtomicRenameKey(uriPrefix + s)); - } - } - - /** - * Tests fs.mkdir() function to create a target blob while another thread - * is holding the lease on the blob. mkdir should not fail since the blob - * already exists. - * This is a scenario that would happen in HBase distributed log splitting. - * Multiple threads will try to create and update "recovered.edits" folder - * under the same path. - */ - @Test - public void testMkdirOnExistingFolderWithLease() throws Exception { - SelfRenewingLease lease; - final String FILE_KEY = "folderWithLease"; - // Create the folder - fs.mkdirs(new Path(FILE_KEY)); - NativeAzureFileSystem nfs = (NativeAzureFileSystem) fs; - String fullKey = nfs.pathToKey(nfs.makeAbsolute(new Path(FILE_KEY))); - AzureNativeFileSystemStore store = nfs.getStore(); - // Acquire the lease on the folder - lease = store.acquireLease(fullKey); - assertTrue(lease.getLeaseID() != null); - // Try to create the same folder - store.storeEmptyFolder(fullKey, - nfs.createPermissionStatus(FsPermission.getDirDefault())); - lease.free(); - } -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fbcf763/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemMocked.java ---------------------------------------------------------------------- diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemMocked.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemMocked.java index aa1e4f7..20d45b2 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemMocked.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemMocked.java @@ -21,6 +21,10 @@ package org.apache.hadoop.fs.azure; import java.io.IOException; import org.junit.Ignore; +/** + * Run {@link NativeAzureFileSystemBaseTest} tests against a mocked store, + * skipping tests of unsupported features + */ public class TestNativeAzureFileSystemMocked extends NativeAzureFileSystemBaseTest { http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fbcf763/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemUploadLogic.java ---------------------------------------------------------------------- diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemUploadLogic.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemUploadLogic.java index 4c2df8d..7f63295 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemUploadLogic.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemUploadLogic.java @@ -18,41 +18,27 @@ package org.apache.hadoop.fs.azure; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; - import java.io.ByteArrayInputStream; import java.io.InputStream; import java.io.OutputStream; import org.apache.hadoop.fs.Path; -import org.junit.After; -import org.junit.Before; import org.junit.Ignore; import org.junit.Test; /** * Tests for the upload, buffering and flush logic in WASB. */ -public class TestNativeAzureFileSystemUploadLogic { - private AzureBlobStorageTestAccount testAccount; +public class TestNativeAzureFileSystemUploadLogic extends AbstractWasbTestBase { // Just an arbitrary number so that the values I write have a predictable // pattern: 0, 1, 2, .. , 45, 46, 0, 1, 2, ... static final int byteValuePeriod = 47; - @Before - public void setUp() throws Exception { - testAccount = AzureBlobStorageTestAccount.createMock(); - } - - @After - public void tearDown() throws Exception { - if (testAccount != null) { - testAccount.cleanup(); - testAccount = null; - } + @Override + protected AzureBlobStorageTestAccount createTestAccount() throws Exception { + return AzureBlobStorageTestAccount.createMock(); } /** @@ -126,9 +112,9 @@ public class TestNativeAzureFileSystemUploadLogic { * @param expectedSize The expected size of the data in there. */ private void assertDataInFile(Path file, int expectedSize) throws Exception { - InputStream inStream = testAccount.getFileSystem().open(file); - assertDataInStream(inStream, expectedSize); - inStream.close(); + try(InputStream inStream = getFileSystem().open(file)) { + assertDataInStream(inStream, expectedSize); + } } /** @@ -139,7 +125,7 @@ public class TestNativeAzureFileSystemUploadLogic { private void assertDataInTempBlob(int expectedSize) throws Exception { // Look for the temporary upload blob in the backing store. InMemoryBlockBlobStore backingStore = - testAccount.getMockStorage().getBackingStore(); + getTestAccount().getMockStorage().getBackingStore(); String tempKey = null; for (String key : backingStore.getKeys()) { if (key.contains(NativeAzureFileSystem.AZURE_TEMP_FOLDER)) { @@ -149,9 +135,10 @@ public class TestNativeAzureFileSystemUploadLogic { } } assertNotNull(tempKey); - InputStream inStream = new ByteArrayInputStream(backingStore.getContent(tempKey)); - assertDataInStream(inStream, expectedSize); - inStream.close(); + try (InputStream inStream = new ByteArrayInputStream( + backingStore.getContent(tempKey))) { + assertDataInStream(inStream, expectedSize); + } } /** @@ -162,25 +149,30 @@ public class TestNativeAzureFileSystemUploadLogic { */ private void testConsistencyAfterManyFlushes(FlushFrequencyVariation variation) throws Exception { - Path uploadedFile = new Path("/uploadedFile"); - OutputStream outStream = testAccount.getFileSystem().create(uploadedFile); - final int totalSize = 9123; - int flushPeriod; - switch (variation) { - case BeforeSingleBufferFull: flushPeriod = 300; break; - case AfterSingleBufferFull: flushPeriod = 600; break; - case AfterAllRingBufferFull: flushPeriod = 1600; break; - default: - throw new IllegalArgumentException("Unknown variation: " + variation); - } - for (int i = 0; i < totalSize; i++) { - outStream.write(i % byteValuePeriod); - if ((i + 1) % flushPeriod == 0) { - outStream.flush(); - assertDataInTempBlob(i + 1); + Path uploadedFile = methodPath(); + try { + OutputStream outStream = getFileSystem().create(uploadedFile); + final int totalSize = 9123; + int flushPeriod; + switch (variation) { + case BeforeSingleBufferFull: flushPeriod = 300; break; + case AfterSingleBufferFull: flushPeriod = 600; break; + case AfterAllRingBufferFull: flushPeriod = 1600; break; + default: + throw new IllegalArgumentException("Unknown variation: " + variation); } + for (int i = 0; i < totalSize; i++) { + outStream.write(i % byteValuePeriod); + if ((i + 1) % flushPeriod == 0) { + outStream.flush(); + assertDataInTempBlob(i + 1); + } + } + outStream.close(); + assertDataInFile(uploadedFile, totalSize); + } finally { + getFileSystem().delete(uploadedFile, false); + } - outStream.close(); - assertDataInFile(uploadedFile, totalSize); } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fbcf763/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperations.java ---------------------------------------------------------------------- diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperations.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperations.java index 9d5d6a22c..c106fb1 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperations.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperations.java @@ -18,11 +18,6 @@ package org.apache.hadoop.fs.azure; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; - import java.util.HashMap; import org.apache.hadoop.fs.FileStatus; @@ -37,7 +32,8 @@ import org.junit.Test; * Tests that WASB handles things gracefully when users add blobs to the Azure * Storage container from outside WASB's control. */ -public class TestOutOfBandAzureBlobOperations { +public class TestOutOfBandAzureBlobOperations + extends AbstractWasbTestWithTimeout { private AzureBlobStorageTestAccount testAccount; private FileSystem fs; private InMemoryBlockBlobStore backingStore; http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fbcf763/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperationsLive.java ---------------------------------------------------------------------- diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperationsLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperationsLive.java deleted file mode 100644 index 60b01c6..0000000 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestOutOfBandAzureBlobOperationsLive.java +++ /dev/null @@ -1,203 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.azure; - -import static org.junit.Assert.assertTrue; -import static org.junit.Assume.assumeNotNull; - -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.security.UserGroupInformation; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import com.microsoft.azure.storage.blob.BlobOutputStream; -import com.microsoft.azure.storage.blob.CloudBlockBlob; - -public class TestOutOfBandAzureBlobOperationsLive { - private FileSystem fs; - private AzureBlobStorageTestAccount testAccount; - - @Before - public void setUp() throws Exception { - testAccount = AzureBlobStorageTestAccount.create(); - if (testAccount != null) { - fs = testAccount.getFileSystem(); - } - assumeNotNull(testAccount); - } - - @After - public void tearDown() throws Exception { - if (testAccount != null) { - testAccount.cleanup(); - testAccount = null; - fs = null; - } - } - - // scenario for this particular test described at MONARCH-HADOOP-764 - // creating a file out-of-band would confuse mkdirs("<oobfilesUncleFolder>") - // eg oob creation of "user/<name>/testFolder/a/input/file" - // Then wasb creation of "user/<name>/testFolder/a/output" fails - @Test - public void outOfBandFolder_uncleMkdirs() throws Exception { - - // NOTE: manual use of CloubBlockBlob targets working directory explicitly. - // WASB driver methods prepend working directory implicitly. - String workingDir = "user/" - + UserGroupInformation.getCurrentUser().getShortUserName() + "/"; - - CloudBlockBlob blob = testAccount.getBlobReference(workingDir - + "testFolder1/a/input/file"); - BlobOutputStream s = blob.openOutputStream(); - s.close(); - assertTrue(fs.exists(new Path("testFolder1/a/input/file"))); - - Path targetFolder = new Path("testFolder1/a/output"); - assertTrue(fs.mkdirs(targetFolder)); - } - - // scenario for this particular test described at MONARCH-HADOOP-764 - @Test - public void outOfBandFolder_parentDelete() throws Exception { - - // NOTE: manual use of CloubBlockBlob targets working directory explicitly. - // WASB driver methods prepend working directory implicitly. - String workingDir = "user/" - + UserGroupInformation.getCurrentUser().getShortUserName() + "/"; - CloudBlockBlob blob = testAccount.getBlobReference(workingDir - + "testFolder2/a/input/file"); - BlobOutputStream s = blob.openOutputStream(); - s.close(); - assertTrue(fs.exists(new Path("testFolder2/a/input/file"))); - - Path targetFolder = new Path("testFolder2/a/input"); - assertTrue(fs.delete(targetFolder, true)); - } - - @Test - public void outOfBandFolder_rootFileDelete() throws Exception { - - CloudBlockBlob blob = testAccount.getBlobReference("fileY"); - BlobOutputStream s = blob.openOutputStream(); - s.close(); - assertTrue(fs.exists(new Path("/fileY"))); - assertTrue(fs.delete(new Path("/fileY"), true)); - } - - @Test - public void outOfBandFolder_firstLevelFolderDelete() throws Exception { - - CloudBlockBlob blob = testAccount.getBlobReference("folderW/file"); - BlobOutputStream s = blob.openOutputStream(); - s.close(); - assertTrue(fs.exists(new Path("/folderW"))); - assertTrue(fs.exists(new Path("/folderW/file"))); - assertTrue(fs.delete(new Path("/folderW"), true)); - } - - // scenario for this particular test described at MONARCH-HADOOP-764 - @Test - public void outOfBandFolder_siblingCreate() throws Exception { - - // NOTE: manual use of CloubBlockBlob targets working directory explicitly. - // WASB driver methods prepend working directory implicitly. - String workingDir = "user/" - + UserGroupInformation.getCurrentUser().getShortUserName() + "/"; - CloudBlockBlob blob = testAccount.getBlobReference(workingDir - + "testFolder3/a/input/file"); - BlobOutputStream s = blob.openOutputStream(); - s.close(); - assertTrue(fs.exists(new Path("testFolder3/a/input/file"))); - - Path targetFile = new Path("testFolder3/a/input/file2"); - FSDataOutputStream s2 = fs.create(targetFile); - s2.close(); - } - - // scenario for this particular test described at MONARCH-HADOOP-764 - // creating a new file in the root folder - @Test - public void outOfBandFolder_create_rootDir() throws Exception { - Path targetFile = new Path("/newInRoot"); - FSDataOutputStream s2 = fs.create(targetFile); - s2.close(); - } - - // scenario for this particular test described at MONARCH-HADOOP-764 - @Test - public void outOfBandFolder_rename() throws Exception { - - // NOTE: manual use of CloubBlockBlob targets working directory explicitly. - // WASB driver methods prepend working directory implicitly. - String workingDir = "user/" - + UserGroupInformation.getCurrentUser().getShortUserName() + "/"; - CloudBlockBlob blob = testAccount.getBlobReference(workingDir - + "testFolder4/a/input/file"); - BlobOutputStream s = blob.openOutputStream(); - s.close(); - - Path srcFilePath = new Path("testFolder4/a/input/file"); - assertTrue(fs.exists(srcFilePath)); - - Path destFilePath = new Path("testFolder4/a/input/file2"); - fs.rename(srcFilePath, destFilePath); - } - - // Verify that you can rename a file which is the only file in an implicit folder in the - // WASB file system. - // scenario for this particular test described at MONARCH-HADOOP-892 - @Test - public void outOfBandSingleFile_rename() throws Exception { - - //NOTE: manual use of CloubBlockBlob targets working directory explicitly. - // WASB driver methods prepend working directory implicitly. - String workingDir = "user/" + UserGroupInformation.getCurrentUser().getShortUserName() + "/"; - CloudBlockBlob blob = testAccount.getBlobReference(workingDir + "testFolder5/a/input/file"); - BlobOutputStream s = blob.openOutputStream(); - s.close(); - - Path srcFilePath = new Path("testFolder5/a/input/file"); - assertTrue(fs.exists(srcFilePath)); - - Path destFilePath = new Path("testFolder5/file2"); - fs.rename(srcFilePath, destFilePath); - } - - // WASB must force explicit parent directories in create, delete, mkdirs, rename. - // scenario for this particular test described at MONARCH-HADOOP-764 - @Test - public void outOfBandFolder_rename_rootLevelFiles() throws Exception { - - // NOTE: manual use of CloubBlockBlob targets working directory explicitly. - // WASB driver methods prepend working directory implicitly. - CloudBlockBlob blob = testAccount.getBlobReference("fileX"); - BlobOutputStream s = blob.openOutputStream(); - s.close(); - - Path srcFilePath = new Path("/fileX"); - assertTrue(fs.exists(srcFilePath)); - - Path destFilePath = new Path("/fileXrename"); - fs.rename(srcFilePath, destFilePath); - } -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fbcf763/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestReadAndSeekPageBlobAfterWrite.java ---------------------------------------------------------------------- diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestReadAndSeekPageBlobAfterWrite.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestReadAndSeekPageBlobAfterWrite.java deleted file mode 100644 index 41b8386..0000000 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestReadAndSeekPageBlobAfterWrite.java +++ /dev/null @@ -1,355 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.azure; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assume.assumeNotNull; - -import java.io.IOException; -import java.io.OutputStream; -import java.util.Arrays; -import java.util.Random; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.fs.FSDataInputStream; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.azure.AzureException; -import org.apache.hadoop.util.Time; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -/** - * Write data into a page blob and verify you can read back all of it - * or just a part of it. - */ -public class TestReadAndSeekPageBlobAfterWrite { - private static final Log LOG = LogFactory.getLog(TestReadAndSeekPageBlobAfterWrite.class); - - private FileSystem fs; - private AzureBlobStorageTestAccount testAccount; - private byte[] randomData; - - // Page blob physical page size - private static final int PAGE_SIZE = PageBlobFormatHelpers.PAGE_SIZE; - - // Size of data on page (excluding header) - private static final int PAGE_DATA_SIZE = PAGE_SIZE - PageBlobFormatHelpers.PAGE_HEADER_SIZE; - private static final int MAX_BYTES = 33554432; // maximum bytes in a file that we'll test - private static final int MAX_PAGES = MAX_BYTES / PAGE_SIZE; // maximum number of pages we'll test - private Random rand = new Random(); - - // A key with a prefix under /pageBlobs, which for the test file system will - // force use of a page blob. - private static final String KEY = "/pageBlobs/file.dat"; - private static final Path PATH = new Path(KEY); // path of page blob file to read and write - - protected AzureBlobStorageTestAccount createTestAccount() throws Exception { - return AzureBlobStorageTestAccount.create(); - } - - @Before - public void setUp() throws Exception { - testAccount = createTestAccount(); - if (testAccount != null) { - fs = testAccount.getFileSystem(); - } - assumeNotNull(testAccount); - - // Make sure we are using an integral number of pages. - assertEquals(0, MAX_BYTES % PAGE_SIZE); - - // load an in-memory array of random data - randomData = new byte[PAGE_SIZE * MAX_PAGES]; - rand.nextBytes(randomData); - } - - @After - public void tearDown() throws Exception { - if (testAccount != null) { - testAccount.cleanup(); - testAccount = null; - fs = null; - } - } - - /** - * Make sure the file name (key) is a page blob file name. If anybody changes that, - * we need to come back and update this test class. - */ - @Test - public void testIsPageBlobFileName() { - AzureNativeFileSystemStore store = ((NativeAzureFileSystem) fs).getStore(); - String[] a = KEY.split("/"); - String key2 = a[1] + "/"; - assertTrue(store.isPageBlobKey(key2)); - } - - /** - * For a set of different file sizes, write some random data to a page blob, - * read it back, and compare that what was read is the same as what was written. - */ - @Test - public void testReadAfterWriteRandomData() throws IOException { - - // local shorthand - final int PDS = PAGE_DATA_SIZE; - - // Test for sizes at and near page boundaries - int[] dataSizes = { - - // on first page - 0, 1, 2, 3, - - // Near first physical page boundary (because the implementation - // stores PDS + the page header size bytes on each page). - PDS - 1, PDS, PDS + 1, PDS + 2, PDS + 3, - - // near second physical page boundary - (2 * PDS) - 1, (2 * PDS), (2 * PDS) + 1, (2 * PDS) + 2, (2 * PDS) + 3, - - // near tenth physical page boundary - (10 * PDS) - 1, (10 * PDS), (10 * PDS) + 1, (10 * PDS) + 2, (10 * PDS) + 3, - - // test one big size, >> 4MB (an internal buffer size in the code) - MAX_BYTES - }; - - for (int i : dataSizes) { - testReadAfterWriteRandomData(i); - } - } - - private void testReadAfterWriteRandomData(int size) throws IOException { - writeRandomData(size); - readRandomDataAndVerify(size); - } - - /** - * Read "size" bytes of data and verify that what was read and what was written - * are the same. - */ - private void readRandomDataAndVerify(int size) throws AzureException, IOException { - byte[] b = new byte[size]; - FSDataInputStream stream = fs.open(PATH); - int bytesRead = stream.read(b); - stream.close(); - assertEquals(bytesRead, size); - - // compare the data read to the data written - assertTrue(comparePrefix(randomData, b, size)); - } - - // return true if the beginning "size" values of the arrays are the same - private boolean comparePrefix(byte[] a, byte[] b, int size) { - if (a.length < size || b.length < size) { - return false; - } - for (int i = 0; i < size; i++) { - if (a[i] != b[i]) { - return false; - } - } - return true; - } - - // Write a specified amount of random data to the file path for this test class. - private void writeRandomData(int size) throws IOException { - OutputStream output = fs.create(PATH); - output.write(randomData, 0, size); - output.close(); - } - - /** - * Write data to a page blob, open it, seek, and then read a range of data. - * Then compare that the data read from that range is the same as the data originally written. - */ - @Test - public void testPageBlobSeekAndReadAfterWrite() throws IOException { - writeRandomData(PAGE_SIZE * MAX_PAGES); - int recordSize = 100; - byte[] b = new byte[recordSize]; - FSDataInputStream stream = fs.open(PATH); - - // Seek to a boundary around the middle of the 6th page - int seekPosition = 5 * PAGE_SIZE + 250; - stream.seek(seekPosition); - - // Read a record's worth of bytes and verify results - int bytesRead = stream.read(b); - verifyReadRandomData(b, bytesRead, seekPosition, recordSize); - - // Seek to another spot and read a record greater than a page - seekPosition = 10 * PAGE_SIZE + 250; - stream.seek(seekPosition); - recordSize = 1000; - b = new byte[recordSize]; - bytesRead = stream.read(b); - verifyReadRandomData(b, bytesRead, seekPosition, recordSize); - - // Read the last 100 bytes of the file - recordSize = 100; - seekPosition = PAGE_SIZE * MAX_PAGES - recordSize; - stream.seek(seekPosition); - b = new byte[recordSize]; - bytesRead = stream.read(b); - verifyReadRandomData(b, bytesRead, seekPosition, recordSize); - - // Read past the end of the file and we should get only partial data. - recordSize = 100; - seekPosition = PAGE_SIZE * MAX_PAGES - recordSize + 50; - stream.seek(seekPosition); - b = new byte[recordSize]; - bytesRead = stream.read(b); - assertEquals(50, bytesRead); - - // compare last 50 bytes written with those read - byte[] tail = Arrays.copyOfRange(randomData, seekPosition, randomData.length); - assertTrue(comparePrefix(tail, b, 50)); - } - - // Verify that reading a record of data after seeking gives the expected data. - private void verifyReadRandomData(byte[] b, int bytesRead, int seekPosition, int recordSize) { - byte[] originalRecordData = - Arrays.copyOfRange(randomData, seekPosition, seekPosition + recordSize + 1); - assertEquals(recordSize, bytesRead); - assertTrue(comparePrefix(originalRecordData, b, recordSize)); - } - - // Test many small flushed writes interspersed with periodic hflush calls. - // For manual testing, increase NUM_WRITES to a large number. - // The goal for a long-running manual test is to make sure that it finishes - // and the close() call does not time out. It also facilitates debugging into - // hflush/hsync. - @Test - public void testManySmallWritesWithHFlush() throws IOException { - writeAndReadOneFile(50, 100, 20); - } - - /** - * Write a total of numWrites * recordLength data to a file, read it back, - * and check to make sure what was read is the same as what was written. - * The syncInterval is the number of writes after which to call hflush to - * force the data to storage. - */ - private void writeAndReadOneFile(int numWrites, int recordLength, int syncInterval) throws IOException { - final int NUM_WRITES = numWrites; - final int RECORD_LENGTH = recordLength; - final int SYNC_INTERVAL = syncInterval; - - // A lower bound on the minimum time we think it will take to do - // a write to Azure storage. - final long MINIMUM_EXPECTED_TIME = 20; - LOG.info("Writing " + NUM_WRITES * RECORD_LENGTH + " bytes to " + PATH.getName()); - FSDataOutputStream output = fs.create(PATH); - int writesSinceHFlush = 0; - try { - - // Do a flush and hflush to exercise case for empty write queue in PageBlobOutputStream, - // to test concurrent execution gates. - output.flush(); - output.hflush(); - for (int i = 0; i < NUM_WRITES; i++) { - output.write(randomData, i * RECORD_LENGTH, RECORD_LENGTH); - writesSinceHFlush++; - output.flush(); - if ((i % SYNC_INTERVAL) == 0) { - output.hflush(); - writesSinceHFlush = 0; - } - } - } finally { - long start = Time.monotonicNow(); - output.close(); - long end = Time.monotonicNow(); - LOG.debug("close duration = " + (end - start) + " msec."); - if (writesSinceHFlush > 0) { - assertTrue(String.format( - "close duration with >= 1 pending write is %d, less than minimum expected of %d", - end - start, MINIMUM_EXPECTED_TIME), - end - start >= MINIMUM_EXPECTED_TIME); - } - } - - // Read the data back and check it. - FSDataInputStream stream = fs.open(PATH); - int SIZE = NUM_WRITES * RECORD_LENGTH; - byte[] b = new byte[SIZE]; - try { - stream.seek(0); - stream.read(b, 0, SIZE); - verifyReadRandomData(b, SIZE, 0, SIZE); - } finally { - stream.close(); - } - - // delete the file - fs.delete(PATH, false); - } - - // Test writing to a large file repeatedly as a stress test. - // Set the repetitions to a larger number for manual testing - // for a longer stress run. - @Test - public void testLargeFileStress() throws IOException { - int numWrites = 32; - int recordSize = 1024 * 1024; - int syncInterval = 10; - int repetitions = 1; - for (int i = 0; i < repetitions; i++) { - writeAndReadOneFile(numWrites, recordSize, syncInterval); - } - } - - // Write to a file repeatedly to verify that it extends. - // The page blob file should start out at 128MB and finish at 256MB. - @Test(timeout=300000) - public void testFileSizeExtension() throws IOException { - final int writeSize = 1024 * 1024; - final int numWrites = 129; - final byte dataByte = 5; - byte[] data = new byte[writeSize]; - Arrays.fill(data, dataByte); - FSDataOutputStream output = fs.create(PATH); - try { - for (int i = 0; i < numWrites; i++) { - output.write(data); - output.hflush(); - LOG.debug("total writes = " + (i + 1)); - } - } finally { - output.close(); - } - - // Show that we wrote more than the default page blob file size. - assertTrue(numWrites * writeSize > PageBlobOutputStream.PAGE_BLOB_MIN_SIZE); - - // Verify we can list the new size. That will prove we expanded the file. - FileStatus[] status = fs.listStatus(PATH); - assertTrue(status[0].getLen() == numWrites * writeSize); - LOG.debug("Total bytes written to " + PATH + " = " + status[0].getLen()); - fs.delete(PATH, false); - } - -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fbcf763/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestShellDecryptionKeyProvider.java ---------------------------------------------------------------------- diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestShellDecryptionKeyProvider.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestShellDecryptionKeyProvider.java index 2284d1f..75b102e 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestShellDecryptionKeyProvider.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestShellDecryptionKeyProvider.java @@ -18,21 +18,23 @@ package org.apache.hadoop.fs.azure; -import static org.junit.Assert.assertEquals; - import java.io.File; import org.apache.commons.io.FileUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.Shell; import org.junit.Assert; import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -public class TestShellDecryptionKeyProvider { - public static final Log LOG = LogFactory - .getLog(TestShellDecryptionKeyProvider.class); +/** + * Windows only tests of shell scripts to provide decryption keys. + */ +public class TestShellDecryptionKeyProvider + extends AbstractWasbTestWithTimeout { + public static final Logger LOG = LoggerFactory + .getLogger(TestShellDecryptionKeyProvider.class); private static File TEST_ROOT_DIR = new File(System.getProperty( "test.build.data", "/tmp"), "TestShellDecryptionKeyProvider"); http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fbcf763/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbFsck.java ---------------------------------------------------------------------- diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbFsck.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbFsck.java index 467424b..9d32fb2 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbFsck.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbFsck.java @@ -18,10 +18,6 @@ package org.apache.hadoop.fs.azure; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; @@ -32,7 +28,10 @@ import org.junit.Before; import org.junit.Ignore; import org.junit.Test; -public class TestWasbFsck { +/** + * Tests which look at fsck recovery. + */ +public class TestWasbFsck extends AbstractWasbTestWithTimeout { private AzureBlobStorageTestAccount testAccount; private FileSystem fs; private InMemoryBlockBlobStore backingStore; http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fbcf763/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java ---------------------------------------------------------------------- diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java deleted file mode 100644 index ded25b9..0000000 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbRemoteCallHelper.java +++ /dev/null @@ -1,569 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.azure; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.contract.ContractTestUtils; -import org.apache.hadoop.io.retry.RetryUtils; -import org.apache.http.Header; -import org.apache.http.HttpResponse; -import org.apache.http.HttpEntity; -import org.apache.http.HttpStatus; -import org.apache.http.StatusLine; -import org.apache.http.ProtocolVersion; -import org.apache.http.ParseException; -import org.apache.http.HeaderElement; -import org.apache.http.client.HttpClient; -import org.apache.http.client.methods.HttpGet; -import org.hamcrest.Description; -import org.hamcrest.TypeSafeMatcher; -import org.junit.Assume; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.mockito.ArgumentMatcher; -import org.mockito.Mockito; - -import java.io.ByteArrayInputStream; -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.nio.charset.StandardCharsets; - -import static org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.KEY_USE_SECURE_MODE; -import static org.mockito.Matchers.argThat; -import static org.mockito.Mockito.atLeast; -import static org.mockito.Mockito.times; - -/** - * Test class to hold all WasbRemoteCallHelper tests - */ -public class TestWasbRemoteCallHelper - extends AbstractWasbTestBase { - public static final String EMPTY_STRING = ""; - private static final int INVALID_HTTP_STATUS_CODE_999 = 999; - - @Override - protected AzureBlobStorageTestAccount createTestAccount() throws Exception { - Configuration conf = new Configuration(); - conf.set(NativeAzureFileSystem.KEY_AZURE_AUTHORIZATION, "true"); - conf.set(RemoteWasbAuthorizerImpl.KEY_REMOTE_AUTH_SERVICE_URLS, "http://localhost1/,http://localhost2/,http://localhost:8080"); - return AzureBlobStorageTestAccount.create(conf); - } - - @Before - public void beforeMethod() { - boolean useSecureMode = fs.getConf().getBoolean(KEY_USE_SECURE_MODE, false); - boolean useAuthorization = fs.getConf().getBoolean(NativeAzureFileSystem.KEY_AZURE_AUTHORIZATION, false); - Assume.assumeTrue("Test valid when both SecureMode and Authorization are enabled .. skipping", - useSecureMode && useAuthorization); - - Assume.assumeTrue( - useSecureMode && useAuthorization - ); - } - - @Rule - public ExpectedException expectedEx = ExpectedException.none(); - - /** - * Test invalid status-code - * @throws Throwable - */ - @Test // (expected = WasbAuthorizationException.class) - public void testInvalidStatusCode() throws Throwable { - - setupExpectations(); - - // set up mocks - HttpClient mockHttpClient = Mockito.mock(HttpClient.class); - HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class); - Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse); - Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(INVALID_HTTP_STATUS_CODE_999)); - // finished setting up mocks - - performop(mockHttpClient); - } - - /** - * Test invalid Content-Type - * @throws Throwable - */ - @Test // (expected = WasbAuthorizationException.class) - public void testInvalidContentType() throws Throwable { - - setupExpectations(); - - // set up mocks - HttpClient mockHttpClient = Mockito.mock(HttpClient.class); - HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class); - Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse); - Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(HttpStatus.SC_OK)); - Mockito.when(mockHttpResponse.getFirstHeader("Content-Type")) - .thenReturn(newHeader("Content-Type", "text/plain")); - // finished setting up mocks - - performop(mockHttpClient); - } - - /** - * Test missing Content-Length - * @throws Throwable - */ - @Test // (expected = WasbAuthorizationException.class) - public void testMissingContentLength() throws Throwable { - - setupExpectations(); - - // set up mocks - HttpClient mockHttpClient = Mockito.mock(HttpClient.class); - HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class); - Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse); - Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(HttpStatus.SC_OK)); - Mockito.when(mockHttpResponse.getFirstHeader("Content-Type")) - .thenReturn(newHeader("Content-Type", "application/json")); - // finished setting up mocks - - performop(mockHttpClient); - } - - /** - * Test Content-Length exceeds max - * @throws Throwable - */ - @Test // (expected = WasbAuthorizationException.class) - public void testContentLengthExceedsMax() throws Throwable { - - setupExpectations(); - - // set up mocks - HttpClient mockHttpClient = Mockito.mock(HttpClient.class); - HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class); - Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse); - Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(HttpStatus.SC_OK)); - Mockito.when(mockHttpResponse.getFirstHeader("Content-Type")) - .thenReturn(newHeader("Content-Type", "application/json")); - Mockito.when(mockHttpResponse.getFirstHeader("Content-Length")) - .thenReturn(newHeader("Content-Length", "2048")); - // finished setting up mocks - - performop(mockHttpClient); - } - - /** - * Test invalid Content-Length value - * @throws Throwable - */ - @Test // (expected = WasbAuthorizationException.class) - public void testInvalidContentLengthValue() throws Throwable { - - setupExpectations(); - - // set up mocks - HttpClient mockHttpClient = Mockito.mock(HttpClient.class); - HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class); - Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse); - Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(HttpStatus.SC_OK)); - Mockito.when(mockHttpResponse.getFirstHeader("Content-Type")) - .thenReturn(newHeader("Content-Type", "application/json")); - Mockito.when(mockHttpResponse.getFirstHeader("Content-Length")) - .thenReturn(newHeader("Content-Length", "20abc48")); - // finished setting up mocks - - performop(mockHttpClient); - } - - /** - * Test valid JSON response - * @throws Throwable - */ - @Test - public void testValidJSONResponse() throws Throwable { - - // set up mocks - HttpClient mockHttpClient = Mockito.mock(HttpClient.class); - - HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class); - HttpEntity mockHttpEntity = Mockito.mock(HttpEntity.class); - - Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse); - Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(HttpStatus.SC_OK)); - Mockito.when(mockHttpResponse.getFirstHeader("Content-Type")) - .thenReturn(newHeader("Content-Type", "application/json")); - Mockito.when(mockHttpResponse.getFirstHeader("Content-Length")) - .thenReturn(newHeader("Content-Length", "1024")); - Mockito.when(mockHttpResponse.getEntity()).thenReturn(mockHttpEntity); - Mockito.when(mockHttpEntity.getContent()) - .thenReturn(new ByteArrayInputStream(validJsonResponse().getBytes(StandardCharsets.UTF_8))) - .thenReturn(new ByteArrayInputStream(validJsonResponse().getBytes(StandardCharsets.UTF_8))) - .thenReturn(new ByteArrayInputStream(validJsonResponse().getBytes(StandardCharsets.UTF_8))); - // finished setting up mocks - - performop(mockHttpClient); - } - - /** - * Test malformed JSON response - * @throws Throwable - */ - @Test // (expected = WasbAuthorizationException.class) - public void testMalFormedJSONResponse() throws Throwable { - - expectedEx.expect(WasbAuthorizationException.class); - expectedEx.expectMessage("org.codehaus.jackson.JsonParseException: Unexpected end-of-input within/between OBJECT entries"); - - // set up mocks - HttpClient mockHttpClient = Mockito.mock(HttpClient.class); - - HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class); - HttpEntity mockHttpEntity = Mockito.mock(HttpEntity.class); - - Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse); - Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(HttpStatus.SC_OK)); - Mockito.when(mockHttpResponse.getFirstHeader("Content-Type")) - .thenReturn(newHeader("Content-Type", "application/json")); - Mockito.when(mockHttpResponse.getFirstHeader("Content-Length")) - .thenReturn(newHeader("Content-Length", "1024")); - Mockito.when(mockHttpResponse.getEntity()).thenReturn(mockHttpEntity); - Mockito.when(mockHttpEntity.getContent()) - .thenReturn(new ByteArrayInputStream(malformedJsonResponse().getBytes(StandardCharsets.UTF_8))); - // finished setting up mocks - - performop(mockHttpClient); - } - - /** - * Test valid JSON response failure response code - * @throws Throwable - */ - @Test // (expected = WasbAuthorizationException.class) - public void testFailureCodeJSONResponse() throws Throwable { - - expectedEx.expect(WasbAuthorizationException.class); - expectedEx.expectMessage("Remote authorization service encountered an error Unauthorized"); - - // set up mocks - HttpClient mockHttpClient = Mockito.mock(HttpClient.class); - - HttpResponse mockHttpResponse = Mockito.mock(HttpResponse.class); - HttpEntity mockHttpEntity = Mockito.mock(HttpEntity.class); - - Mockito.when(mockHttpClient.execute(Mockito.<HttpGet>any())).thenReturn(mockHttpResponse); - Mockito.when(mockHttpResponse.getStatusLine()).thenReturn(newStatusLine(HttpStatus.SC_OK)); - Mockito.when(mockHttpResponse.getFirstHeader("Content-Type")) - .thenReturn(newHeader("Content-Type", "application/json")); - Mockito.when(mockHttpResponse.getFirstHeader("Content-Length")) - .thenReturn(newHeader("Content-Length", "1024")); - Mockito.when(mockHttpResponse.getEntity()).thenReturn(mockHttpEntity); - Mockito.when(mockHttpEntity.getContent()) - .thenReturn(new ByteArrayInputStream(failureCodeJsonResponse().getBytes(StandardCharsets.UTF_8))); - // finished setting up mocks - - performop(mockHttpClient); - } - - @Test - public void testWhenOneInstanceIsDown() throws Throwable { - - boolean isAuthorizationCachingEnabled = fs.getConf().getBoolean(CachingAuthorizer.KEY_AUTH_SERVICE_CACHING_ENABLE, false); - - // set up mocks - HttpClient mockHttpClient = Mockito.mock(HttpClient.class); - HttpEntity mockHttpEntity = Mockito.mock(HttpEntity.class); - - HttpResponse mockHttpResponseService1 = Mockito.mock(HttpResponse.class); - Mockito.when(mockHttpResponseService1.getStatusLine()) - .thenReturn(newStatusLine(HttpStatus.SC_INTERNAL_SERVER_ERROR)); - Mockito.when(mockHttpResponseService1.getFirstHeader("Content-Type")) - .thenReturn(newHeader("Content-Type", "application/json")); - Mockito.when(mockHttpResponseService1.getFirstHeader("Content-Length")) - .thenReturn(newHeader("Content-Length", "1024")); - Mockito.when(mockHttpResponseService1.getEntity()) - .thenReturn(mockHttpEntity); - - HttpResponse mockHttpResponseService2 = Mockito.mock(HttpResponse.class); - Mockito.when(mockHttpResponseService2.getStatusLine()) - .thenReturn(newStatusLine(HttpStatus.SC_OK)); - Mockito.when(mockHttpResponseService2.getFirstHeader("Content-Type")) - .thenReturn(newHeader("Content-Type", "application/json")); - Mockito.when(mockHttpResponseService2.getFirstHeader("Content-Length")) - .thenReturn(newHeader("Content-Length", "1024")); - Mockito.when(mockHttpResponseService2.getEntity()) - .thenReturn(mockHttpEntity); - - HttpResponse mockHttpResponseServiceLocal = Mockito.mock(HttpResponse.class); - Mockito.when(mockHttpResponseServiceLocal.getStatusLine()) - .thenReturn(newStatusLine(HttpStatus.SC_INTERNAL_SERVER_ERROR)); - Mockito.when(mockHttpResponseServiceLocal.getFirstHeader("Content-Type")) - .thenReturn(newHeader("Content-Type", "application/json")); - Mockito.when(mockHttpResponseServiceLocal.getFirstHeader("Content-Length")) - .thenReturn(newHeader("Content-Length", "1024")); - Mockito.when(mockHttpResponseServiceLocal.getEntity()) - .thenReturn(mockHttpEntity); - - - - class HttpGetForService1 extends ArgumentMatcher<HttpGet>{ - @Override public boolean matches(Object o) { - return checkHttpGetMatchHost((HttpGet) o, "localhost1"); - } - } - class HttpGetForService2 extends ArgumentMatcher<HttpGet>{ - @Override public boolean matches(Object o) { - return checkHttpGetMatchHost((HttpGet) o, "localhost2"); - } - } - class HttpGetForServiceLocal extends ArgumentMatcher<HttpGet>{ - @Override public boolean matches(Object o) { - try { - return checkHttpGetMatchHost((HttpGet) o, InetAddress.getLocalHost().getCanonicalHostName()); - } catch (UnknownHostException e) { - return checkHttpGetMatchHost((HttpGet) o, "localhost"); - } - } - } - Mockito.when(mockHttpClient.execute(argThat(new HttpGetForService1()))) - .thenReturn(mockHttpResponseService1); - Mockito.when(mockHttpClient.execute(argThat(new HttpGetForService2()))) - .thenReturn(mockHttpResponseService2); - Mockito.when(mockHttpClient.execute(argThat(new HttpGetForServiceLocal()))) - .thenReturn(mockHttpResponseServiceLocal); - - //Need 2 times because performop() does 2 fs operations. - Mockito.when(mockHttpEntity.getContent()) - .thenReturn(new ByteArrayInputStream(validJsonResponse() - .getBytes(StandardCharsets.UTF_8))) - .thenReturn(new ByteArrayInputStream(validJsonResponse() - .getBytes(StandardCharsets.UTF_8))) - .thenReturn(new ByteArrayInputStream(validJsonResponse() - .getBytes(StandardCharsets.UTF_8))); - // finished setting up mocks - - performop(mockHttpClient); - - int expectedNumberOfInvocations = isAuthorizationCachingEnabled ? 2 : 3; - Mockito.verify(mockHttpClient, times(expectedNumberOfInvocations)).execute(Mockito.argThat(new HttpGetForServiceLocal())); - Mockito.verify(mockHttpClient, times(expectedNumberOfInvocations)).execute(Mockito.argThat(new HttpGetForService2())); - } - - @Test - public void testWhenServiceInstancesAreDown() throws Throwable { - //expectedEx.expect(WasbAuthorizationException.class); - // set up mocks - HttpClient mockHttpClient = Mockito.mock(HttpClient.class); - HttpEntity mockHttpEntity = Mockito.mock(HttpEntity.class); - - HttpResponse mockHttpResponseService1 = Mockito.mock(HttpResponse.class); - Mockito.when(mockHttpResponseService1.getStatusLine()) - .thenReturn(newStatusLine(HttpStatus.SC_INTERNAL_SERVER_ERROR)); - Mockito.when(mockHttpResponseService1.getFirstHeader("Content-Type")) - .thenReturn(newHeader("Content-Type", "application/json")); - Mockito.when(mockHttpResponseService1.getFirstHeader("Content-Length")) - .thenReturn(newHeader("Content-Length", "1024")); - Mockito.when(mockHttpResponseService1.getEntity()) - .thenReturn(mockHttpEntity); - - HttpResponse mockHttpResponseService2 = Mockito.mock(HttpResponse.class); - Mockito.when(mockHttpResponseService2.getStatusLine()) - .thenReturn(newStatusLine( - HttpStatus.SC_INTERNAL_SERVER_ERROR)); - Mockito.when(mockHttpResponseService2.getFirstHeader("Content-Type")) - .thenReturn(newHeader("Content-Type", "application/json")); - Mockito.when(mockHttpResponseService2.getFirstHeader("Content-Length")) - .thenReturn(newHeader("Content-Length", "1024")); - Mockito.when(mockHttpResponseService2.getEntity()) - .thenReturn(mockHttpEntity); - - HttpResponse mockHttpResponseService3 = Mockito.mock(HttpResponse.class); - Mockito.when(mockHttpResponseService3.getStatusLine()) - .thenReturn(newStatusLine( - HttpStatus.SC_INTERNAL_SERVER_ERROR)); - Mockito.when(mockHttpResponseService3.getFirstHeader("Content-Type")) - .thenReturn(newHeader("Content-Type", "application/json")); - Mockito.when(mockHttpResponseService3.getFirstHeader("Content-Length")) - .thenReturn(newHeader("Content-Length", "1024")); - Mockito.when(mockHttpResponseService3.getEntity()) - .thenReturn(mockHttpEntity); - - class HttpGetForService1 extends ArgumentMatcher<HttpGet>{ - @Override public boolean matches(Object o) { - return checkHttpGetMatchHost((HttpGet) o, "localhost1"); - } - } - class HttpGetForService2 extends ArgumentMatcher<HttpGet>{ - @Override public boolean matches(Object o) { - return checkHttpGetMatchHost((HttpGet) o, "localhost2"); - } - } - class HttpGetForService3 extends ArgumentMatcher<HttpGet> { - @Override public boolean matches(Object o){ - try { - return checkHttpGetMatchHost((HttpGet) o, InetAddress.getLocalHost().getCanonicalHostName()); - } catch (UnknownHostException e) { - return checkHttpGetMatchHost((HttpGet) o, "localhost"); - } - } - } - Mockito.when(mockHttpClient.execute(argThat(new HttpGetForService1()))) - .thenReturn(mockHttpResponseService1); - Mockito.when(mockHttpClient.execute(argThat(new HttpGetForService2()))) - .thenReturn(mockHttpResponseService2); - Mockito.when(mockHttpClient.execute(argThat(new HttpGetForService3()))) - .thenReturn(mockHttpResponseService3); - - //Need 3 times because performop() does 3 fs operations. - Mockito.when(mockHttpEntity.getContent()) - .thenReturn(new ByteArrayInputStream( - validJsonResponse().getBytes(StandardCharsets.UTF_8))) - .thenReturn(new ByteArrayInputStream( - validJsonResponse().getBytes(StandardCharsets.UTF_8))) - .thenReturn(new ByteArrayInputStream( - validJsonResponse().getBytes(StandardCharsets.UTF_8))); - // finished setting up mocks - try { - performop(mockHttpClient); - }catch (WasbAuthorizationException e){ - e.printStackTrace(); - Mockito.verify(mockHttpClient, atLeast(2)) - .execute(argThat(new HttpGetForService1())); - Mockito.verify(mockHttpClient, atLeast(2)) - .execute(argThat(new HttpGetForService2())); - Mockito.verify(mockHttpClient, atLeast(3)) - .execute(argThat(new HttpGetForService3())); - Mockito.verify(mockHttpClient, times(7)).execute(Mockito.<HttpGet>any()); - } - } - - private void setupExpectations() { - expectedEx.expect(WasbAuthorizationException.class); - - class MatchesPattern extends TypeSafeMatcher<String> { - private String pattern; - - MatchesPattern(String pattern) { - this.pattern = pattern; - } - - @Override protected boolean matchesSafely(String item) { - return item.matches(pattern); - } - - @Override public void describeTo(Description description) { - description.appendText("matches pattern ").appendValue(pattern); - } - - @Override protected void describeMismatchSafely(String item, - Description mismatchDescription) { - mismatchDescription.appendText("does not match"); - } - } - - expectedEx.expectMessage(new MatchesPattern( - "org\\.apache\\.hadoop\\.fs\\.azure\\.WasbRemoteCallException: " - + "Encountered error while making remote call to " - + "http:\\/\\/localhost1\\/,http:\\/\\/localhost2\\/,http:\\/\\/localhost:8080 retried 6 time\\(s\\)\\.")); - } - - private void performop(HttpClient mockHttpClient) throws Throwable { - - Path testPath = new Path("/", "test.dat"); - - RemoteWasbAuthorizerImpl authorizer = new RemoteWasbAuthorizerImpl(); - authorizer.init(fs.getConf()); - WasbRemoteCallHelper mockWasbRemoteCallHelper = new WasbRemoteCallHelper( - RetryUtils.getMultipleLinearRandomRetry(new Configuration(), - EMPTY_STRING, true, - EMPTY_STRING, "1000,3,10000,2")); - mockWasbRemoteCallHelper.updateHttpClient(mockHttpClient); - authorizer.updateWasbRemoteCallHelper(mockWasbRemoteCallHelper); - fs.updateWasbAuthorizer(authorizer); - - fs.create(testPath); - ContractTestUtils.assertPathExists(fs, "testPath was not created", testPath); - fs.delete(testPath, false); - } - - private String validJsonResponse() { - return "{" - + "\"responseCode\": 0," - + "\"authorizationResult\": true," - + "\"responseMessage\": \"Authorized\"" - + "}"; - } - - private String malformedJsonResponse() { - return "{" - + "\"responseCode\": 0," - + "\"authorizationResult\": true," - + "\"responseMessage\":"; - } - - private String failureCodeJsonResponse() { - return "{" - + "\"responseCode\": 1," - + "\"authorizationResult\": false," - + "\"responseMessage\": \"Unauthorized\"" - + "}"; - } - - private StatusLine newStatusLine(final int statusCode) { - return new StatusLine() { - @Override - public ProtocolVersion getProtocolVersion() { - return new ProtocolVersion("HTTP", 1, 1); - } - - @Override - public int getStatusCode() { - return statusCode; - } - - @Override - public String getReasonPhrase() { - return "Reason Phrase"; - } - }; - } - - private Header newHeader(final String name, final String value) { - return new Header() { - @Override - public String getName() { - return name; - } - - @Override - public String getValue() { - return value; - } - - @Override - public HeaderElement[] getElements() throws ParseException { - return new HeaderElement[0]; - } - }; - } - - /** Check that a HttpGet request is with given remote host. */ - private static boolean checkHttpGetMatchHost(HttpGet g, String h) { - return g != null && g.getURI().getHost().equals(h); - } - -} --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
