Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java?rev=1539898&r1=1539897&r2=1539898&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java Fri Nov 8 01:44:24 2013 @@ -42,6 +42,7 @@ import org.apache.commons.logging.LogFac import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystemTestHelper; +import org.apache.hadoop.fs.IdNotFoundException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.permission.FsPermission; @@ -49,17 +50,12 @@ import org.apache.hadoop.hdfs.DFSConfigK import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.InvalidPathNameError; -import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.InvalidPoolNameError; -import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.PoolWritePermissionDeniedError; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; -import org.apache.hadoop.hdfs.protocol.PathBasedCacheDescriptor; import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; -import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.InvalidIdException; -import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.NoSuchIdException; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.io.nativeio.NativeIO; +import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.GSet; @@ -86,7 +82,7 @@ public class TestPathBasedCacheRequests conf = new HdfsConfiguration(); // set low limits here for testing purposes conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES, 2); - conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DESCRIPTORS_NUM_RESPONSES, 2); + conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES, 2); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); dfs = cluster.getFileSystem(); @@ -296,21 +292,21 @@ public class TestPathBasedCacheRequests } private static void validateListAll( - RemoteIterator<PathBasedCacheDescriptor> iter, - PathBasedCacheDescriptor... descriptors) throws Exception { - for (PathBasedCacheDescriptor descriptor: descriptors) { + RemoteIterator<PathBasedCacheDirective> iter, + Long... ids) throws Exception { + for (Long id: ids) { assertTrue("Unexpectedly few elements", iter.hasNext()); - assertEquals("Unexpected descriptor", descriptor, iter.next()); + assertEquals("Unexpected directive ID", id, iter.next().getId()); } assertFalse("Unexpectedly many list elements", iter.hasNext()); } - private static PathBasedCacheDescriptor addAsUnprivileged( + private static long addAsUnprivileged( final PathBasedCacheDirective directive) throws Exception { return unprivilegedUser - .doAs(new PrivilegedExceptionAction<PathBasedCacheDescriptor>() { + .doAs(new PrivilegedExceptionAction<Long>() { @Override - public PathBasedCacheDescriptor run() throws IOException { + public Long run() throws IOException { DistributedFileSystem myDfs = (DistributedFileSystem) FileSystem.get(conf); return myDfs.addPathBasedCacheDirective(directive); @@ -342,12 +338,12 @@ public class TestPathBasedCacheRequests setPool("pool1"). build(); - PathBasedCacheDescriptor alphaD = addAsUnprivileged(alpha); - PathBasedCacheDescriptor alphaD2 = addAsUnprivileged(alpha); - assertFalse("Expected to get unique descriptors when re-adding an " + long alphaId = addAsUnprivileged(alpha); + long alphaId2 = addAsUnprivileged(alpha); + assertFalse("Expected to get unique directives when re-adding an " + "existing PathBasedCacheDirective", - alphaD.getEntryId() == alphaD2.getEntryId()); - PathBasedCacheDescriptor betaD = addAsUnprivileged(beta); + alphaId == alphaId2); + long betaId = addAsUnprivileged(beta); try { addAsUnprivileged(new PathBasedCacheDirective.Builder(). @@ -355,8 +351,8 @@ public class TestPathBasedCacheRequests setPool("no_such_pool"). build()); fail("expected an error when adding to a non-existent pool."); - } catch (IOException ioe) { - assertTrue(ioe instanceof InvalidPoolNameError); + } catch (IdNotFoundException ioe) { + GenericTestUtils.assertExceptionContains("no such pool as", ioe); } try { @@ -366,8 +362,9 @@ public class TestPathBasedCacheRequests build()); fail("expected an error when adding to a pool with " + "mode 0 (no permissions for anyone)."); - } catch (IOException ioe) { - assertTrue(ioe instanceof PoolWritePermissionDeniedError); + } catch (AccessControlException e) { + GenericTestUtils. + assertExceptionContains("permission denied for pool", e); } try { @@ -378,7 +375,7 @@ public class TestPathBasedCacheRequests fail("expected an error when adding a malformed path " + "to the cache directives."); } catch (IllegalArgumentException e) { - // expected + GenericTestUtils.assertExceptionContains("is not a valid DFS filename", e); } try { @@ -389,59 +386,74 @@ public class TestPathBasedCacheRequests build()); Assert.fail("expected an error when adding a PathBasedCache " + "directive with an empty pool name."); - } catch (IOException ioe) { - Assert.assertTrue(ioe instanceof InvalidPoolNameError); + } catch (IdNotFoundException e) { + GenericTestUtils.assertExceptionContains("pool name was empty", e); } - PathBasedCacheDescriptor deltaD = addAsUnprivileged(delta); + long deltaId = addAsUnprivileged(delta); // We expect the following to succeed, because DistributedFileSystem // qualifies the path. - PathBasedCacheDescriptor relativeD = addAsUnprivileged( + long relativeId = addAsUnprivileged( new PathBasedCacheDirective.Builder(). setPath(new Path("relative")). setPool("pool1"). build()); - RemoteIterator<PathBasedCacheDescriptor> iter; - iter = dfs.listPathBasedCacheDescriptors(null, null); - validateListAll(iter, alphaD, alphaD2, betaD, deltaD, relativeD); - iter = dfs.listPathBasedCacheDescriptors("pool3", null); + RemoteIterator<PathBasedCacheDirective> iter; + iter = dfs.listPathBasedCacheDirectives(null); + validateListAll(iter, alphaId, alphaId2, betaId, deltaId, relativeId ); + iter = dfs.listPathBasedCacheDirectives( + new PathBasedCacheDirective.Builder().setPool("pool3").build()); Assert.assertFalse(iter.hasNext()); - iter = dfs.listPathBasedCacheDescriptors("pool1", null); - validateListAll(iter, alphaD, alphaD2, deltaD, relativeD); - iter = dfs.listPathBasedCacheDescriptors("pool2", null); - validateListAll(iter, betaD); - - dfs.removePathBasedCacheDescriptor(betaD); - iter = dfs.listPathBasedCacheDescriptors("pool2", null); + iter = dfs.listPathBasedCacheDirectives( + new PathBasedCacheDirective.Builder().setPool("pool1").build()); + validateListAll(iter, alphaId, alphaId2, deltaId, relativeId ); + iter = dfs.listPathBasedCacheDirectives( + new PathBasedCacheDirective.Builder().setPool("pool2").build()); + validateListAll(iter, betaId); + + dfs.removePathBasedCacheDirective(betaId); + iter = dfs.listPathBasedCacheDirectives( + new PathBasedCacheDirective.Builder().setPool("pool2").build()); Assert.assertFalse(iter.hasNext()); try { - dfs.removePathBasedCacheDescriptor(betaD); + dfs.removePathBasedCacheDirective(betaId); Assert.fail("expected an error when removing a non-existent ID"); - } catch (IOException ioe) { - Assert.assertTrue(ioe instanceof NoSuchIdException); + } catch (IdNotFoundException e) { + GenericTestUtils.assertExceptionContains("id not found", e); } try { - proto.removePathBasedCacheDescriptor(-42l); + proto.removePathBasedCacheDirective(-42l); Assert.fail("expected an error when removing a negative ID"); - } catch (IOException ioe) { - Assert.assertTrue(ioe instanceof InvalidIdException); + } catch (IdNotFoundException e) { + GenericTestUtils.assertExceptionContains( + "invalid non-positive directive ID", e); } try { - proto.removePathBasedCacheDescriptor(43l); + proto.removePathBasedCacheDirective(43l); Assert.fail("expected an error when removing a non-existent ID"); - } catch (IOException ioe) { - Assert.assertTrue(ioe instanceof NoSuchIdException); + } catch (IdNotFoundException e) { + GenericTestUtils.assertExceptionContains("id not found", e); } - dfs.removePathBasedCacheDescriptor(alphaD); - dfs.removePathBasedCacheDescriptor(alphaD2); - dfs.removePathBasedCacheDescriptor(deltaD); - dfs.removePathBasedCacheDescriptor(relativeD); - iter = dfs.listPathBasedCacheDescriptors(null, null); + dfs.removePathBasedCacheDirective(alphaId); + dfs.removePathBasedCacheDirective(alphaId2); + dfs.removePathBasedCacheDirective(deltaId); + + dfs.modifyPathBasedCacheDirective(new PathBasedCacheDirective.Builder(). + setId(relativeId). + setReplication((short)555). + build()); + iter = dfs.listPathBasedCacheDirectives(null); + assertTrue(iter.hasNext()); + PathBasedCacheDirective modified = iter.next(); + assertEquals(relativeId, modified.getId().longValue()); + assertEquals((short)555, modified.getReplication().shortValue()); + dfs.removePathBasedCacheDirective(relativeId); + iter = dfs.listPathBasedCacheDirectives(null); assertFalse(iter.hasNext()); } @@ -481,16 +493,16 @@ public class TestPathBasedCacheRequests new PathBasedCacheDirective.Builder(). setPath(new Path(entryPrefix + i)).setPool(pool).build()); } - RemoteIterator<PathBasedCacheDescriptor> dit - = dfs.listPathBasedCacheDescriptors(null, null); + RemoteIterator<PathBasedCacheDirective> dit + = dfs.listPathBasedCacheDirectives(null); for (int i=0; i<numEntries; i++) { assertTrue("Unexpected # of cache entries: " + i, dit.hasNext()); - PathBasedCacheDescriptor cd = dit.next(); - assertEquals(i+1, cd.getEntryId()); + PathBasedCacheDirective cd = dit.next(); + assertEquals(i+1, cd.getId().longValue()); assertEquals(entryPrefix + i, cd.getPath().toUri().getPath()); assertEquals(pool, cd.getPool()); } - assertFalse("Unexpected # of cache descriptors found", dit.hasNext()); + assertFalse("Unexpected # of cache directives found", dit.hasNext()); // Restart namenode cluster.restartNameNode(); @@ -506,15 +518,15 @@ public class TestPathBasedCacheRequests assertEquals(weight, (int)info.getWeight()); assertFalse("Unexpected # of cache pools found", pit.hasNext()); - dit = dfs.listPathBasedCacheDescriptors(null, null); + dit = dfs.listPathBasedCacheDirectives(null); for (int i=0; i<numEntries; i++) { assertTrue("Unexpected # of cache entries: " + i, dit.hasNext()); - PathBasedCacheDescriptor cd = dit.next(); - assertEquals(i+1, cd.getEntryId()); + PathBasedCacheDirective cd = dit.next(); + assertEquals(i+1, cd.getId().longValue()); assertEquals(entryPrefix + i, cd.getPath().toUri().getPath()); assertEquals(pool, cd.getPool()); } - assertFalse("Unexpected # of cache descriptors found", dit.hasNext()); + assertFalse("Unexpected # of cache directives found", dit.hasNext()); } private static void waitForCachedBlocks(NameNode nn, @@ -625,21 +637,16 @@ public class TestPathBasedCacheRequests setPath(new Path(paths.get(i))). setPool(pool). build(); - PathBasedCacheDescriptor descriptor = - nnRpc.addPathBasedCacheDirective(directive); - assertEquals("Descriptor does not match requested path", - new Path(paths.get(i)), descriptor.getPath()); - assertEquals("Descriptor does not match requested pool", pool, - descriptor.getPool()); + nnRpc.addPathBasedCacheDirective(directive); expected += numBlocksPerFile; waitForCachedBlocks(namenode, expected, expected); } // Uncache and check each path in sequence - RemoteIterator<PathBasedCacheDescriptor> entries = - nnRpc.listPathBasedCacheDescriptors(0, null, null); + RemoteIterator<PathBasedCacheDirective> entries = + nnRpc.listPathBasedCacheDirectives(0, null); for (int i=0; i<numFiles; i++) { - PathBasedCacheDescriptor descriptor = entries.next(); - nnRpc.removePathBasedCacheDescriptor(descriptor.getEntryId()); + PathBasedCacheDirective directive = entries.next(); + nnRpc.removePathBasedCacheDirective(directive.getId()); expected -= numBlocksPerFile; waitForCachedBlocks(namenode, expected, expected); } @@ -723,17 +730,15 @@ public class TestPathBasedCacheRequests } waitForCachedBlocks(namenode, 0, 0); // cache entire directory - PathBasedCacheDescriptor descriptor = dfs.addPathBasedCacheDirective( + long id = dfs.addPathBasedCacheDirective( new PathBasedCacheDirective.Builder(). setPath(new Path("/foo")). setReplication((short)2). setPool(pool). build()); - assertEquals("Descriptor does not match requested pool", pool, - descriptor.getPool()); waitForCachedBlocks(namenode, 4, 8); // remove and watch numCached go to 0 - dfs.removePathBasedCacheDescriptor(descriptor); + dfs.removePathBasedCacheDirective(id); waitForCachedBlocks(namenode, 0, 0); } finally { cluster.shutdown();
Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java?rev=1539898&r1=1539897&r2=1539898&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java Fri Nov 8 01:44:24 2013 @@ -61,7 +61,6 @@ import org.apache.hadoop.hdfs.protocol.E import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; -import org.apache.hadoop.hdfs.protocol.PathBasedCacheDescriptor; import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; @@ -151,7 +150,7 @@ public class TestRetryCacheWithHA { FSNamesystem fsn0 = cluster.getNamesystem(0); LightWeightCache<CacheEntry, CacheEntry> cacheSet = (LightWeightCache<CacheEntry, CacheEntry>) fsn0.getRetryCache().getCacheSet(); - assertEquals(19, cacheSet.size()); + assertEquals(20, cacheSet.size()); Map<CacheEntry, CacheEntry> oldEntries = new HashMap<CacheEntry, CacheEntry>(); @@ -172,7 +171,7 @@ public class TestRetryCacheWithHA { FSNamesystem fsn1 = cluster.getNamesystem(1); cacheSet = (LightWeightCache<CacheEntry, CacheEntry>) fsn1 .getRetryCache().getCacheSet(); - assertEquals(19, cacheSet.size()); + assertEquals(20, cacheSet.size()); iter = cacheSet.iterator(); while (iter.hasNext()) { CacheEntry entry = iter.next(); @@ -740,35 +739,34 @@ public class TestRetryCacheWithHA { /** addPathBasedCacheDirective */ class AddPathBasedCacheDirectiveOp extends AtMostOnceOp { - private String pool; - private String path; - private PathBasedCacheDescriptor descriptor; + private PathBasedCacheDirective directive; + private Long result; - AddPathBasedCacheDirectiveOp(DFSClient client, String pool, String path) { + AddPathBasedCacheDirectiveOp(DFSClient client, + PathBasedCacheDirective directive) { super("addPathBasedCacheDirective", client); - this.pool = pool; - this.path = path; + this.directive = directive; } @Override void prepare() throws Exception { - dfs.addCachePool(new CachePoolInfo(pool)); + dfs.addCachePool(new CachePoolInfo(directive.getPool())); } @Override void invoke() throws Exception { - descriptor = client.addPathBasedCacheDirective( - new PathBasedCacheDirective.Builder(). - setPath(new Path(path)). - setPool(pool). - build()); + result = client.addPathBasedCacheDirective(directive); } @Override boolean checkNamenodeBeforeReturn() throws Exception { for (int i = 0; i < CHECKTIMES; i++) { - RemoteIterator<PathBasedCacheDescriptor> iter = - dfs.listPathBasedCacheDescriptors(pool, new Path(path)); + RemoteIterator<PathBasedCacheDirective> iter = + dfs.listPathBasedCacheDirectives( + new PathBasedCacheDirective.Builder(). + setPool(directive.getPool()). + setPath(directive.getPath()). + build()); if (iter.hasNext()) { return true; } @@ -779,43 +777,99 @@ public class TestRetryCacheWithHA { @Override Object getResult() { - return descriptor; + return result; } } - /** removePathBasedCacheDescriptor */ - class RemovePathBasedCacheDescriptorOp extends AtMostOnceOp { - private String pool; - private String path; - private PathBasedCacheDescriptor descriptor; + /** modifyPathBasedCacheDirective */ + class ModifyPathBasedCacheDirectiveOp extends AtMostOnceOp { + private final PathBasedCacheDirective directive; + private final short newReplication; + private long id; - RemovePathBasedCacheDescriptorOp(DFSClient client, String pool, - String path) { - super("removePathBasedCacheDescriptor", client); - this.pool = pool; - this.path = path; + ModifyPathBasedCacheDirectiveOp(DFSClient client, + PathBasedCacheDirective directive, short newReplication) { + super("modifyPathBasedCacheDirective", client); + this.directive = directive; + this.newReplication = newReplication; } @Override void prepare() throws Exception { - dfs.addCachePool(new CachePoolInfo(pool)); - descriptor = dfs.addPathBasedCacheDirective( + dfs.addCachePool(new CachePoolInfo(directive.getPool())); + id = client.addPathBasedCacheDirective(directive); + } + + @Override + void invoke() throws Exception { + client.modifyPathBasedCacheDirective( new PathBasedCacheDirective.Builder(). - setPath(new Path(path)). - setPool(pool). + setId(id). + setReplication(newReplication). build()); } @Override + boolean checkNamenodeBeforeReturn() throws Exception { + for (int i = 0; i < CHECKTIMES; i++) { + RemoteIterator<PathBasedCacheDirective> iter = + dfs.listPathBasedCacheDirectives( + new PathBasedCacheDirective.Builder(). + setPool(directive.getPool()). + setPath(directive.getPath()). + build()); + while (iter.hasNext()) { + PathBasedCacheDirective result = iter.next(); + if ((result.getId() == id) && + (result.getReplication().shortValue() == newReplication)) { + return true; + } + } + Thread.sleep(1000); + } + return false; + } + + @Override + Object getResult() { + return null; + } + } + + /** removePathBasedCacheDirective */ + class RemovePathBasedCacheDirectiveOp extends AtMostOnceOp { + private PathBasedCacheDirective directive; + private long id; + + RemovePathBasedCacheDirectiveOp(DFSClient client, String pool, + String path) { + super("removePathBasedCacheDirective", client); + this.directive = new PathBasedCacheDirective.Builder(). + setPool(pool). + setPath(new Path(path)). + build(); + } + + @Override + void prepare() throws Exception { + dfs.addCachePool(new CachePoolInfo(directive.getPool())); + id = dfs.addPathBasedCacheDirective(directive); + } + + @Override void invoke() throws Exception { - client.removePathBasedCacheDescriptor(descriptor.getEntryId()); + client.removePathBasedCacheDirective(id); } @Override boolean checkNamenodeBeforeReturn() throws Exception { for (int i = 0; i < CHECKTIMES; i++) { - RemoteIterator<PathBasedCacheDescriptor> iter = - dfs.listPathBasedCacheDescriptors(pool, new Path(path)); + RemoteIterator<PathBasedCacheDirective> iter = + dfs.listPathBasedCacheDirectives( + new PathBasedCacheDirective.Builder(). + setPool(directive.getPool()). + setPath(directive.getPath()). + build()); if (!iter.hasNext()) { return true; } @@ -1020,14 +1074,30 @@ public class TestRetryCacheWithHA { @Test (timeout=60000) public void testAddPathBasedCacheDirective() throws Exception { DFSClient client = genClientWithDummyHandler(); - AtMostOnceOp op = new AddPathBasedCacheDirectiveOp(client, "pool", "/path"); + AtMostOnceOp op = new AddPathBasedCacheDirectiveOp(client, + new PathBasedCacheDirective.Builder(). + setPool("pool"). + setPath(new Path("/path")). + build()); + testClientRetryWithFailover(op); + } + + @Test (timeout=60000) + public void testModifyPathBasedCacheDirective() throws Exception { + DFSClient client = genClientWithDummyHandler(); + AtMostOnceOp op = new ModifyPathBasedCacheDirectiveOp(client, + new PathBasedCacheDirective.Builder(). + setPool("pool"). + setPath(new Path("/path")). + setReplication((short)1).build(), + (short)555); testClientRetryWithFailover(op); } @Test (timeout=60000) public void testRemovePathBasedCacheDescriptor() throws Exception { DFSClient client = genClientWithDummyHandler(); - AtMostOnceOp op = new RemovePathBasedCacheDescriptorOp(client, "pool", + AtMostOnceOp op = new RemovePathBasedCacheDirectiveOp(client, "pool", "/path"); testClientRetryWithFailover(op); } Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java?rev=1539898&r1=1539897&r2=1539898&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java Fri Nov 8 01:44:24 2013 @@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.DFSConfigK import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.namenode.FSDirectory; @@ -274,4 +275,76 @@ public class TestSnapshotBlocksMap { "s2/bar"); DFSTestUtil.readFile(hdfs, new Path(bar2SnapshotPath)); } + + /** + * Make sure we delete 0-sized block when deleting an INodeFileUCWithSnapshot + */ + @Test + public void testDeletionWithZeroSizeBlock() throws Exception { + final Path foo = new Path("/foo"); + final Path bar = new Path(foo, "bar"); + DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L); + + SnapshotTestHelper.createSnapshot(hdfs, foo, "s0"); + hdfs.append(bar); + + INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile(); + BlockInfo[] blks = barNode.getBlocks(); + assertEquals(1, blks.length); + assertEquals(BLOCKSIZE, blks[0].getNumBytes()); + ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]); + cluster.getNameNodeRpc() + .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous, + null, barNode.getId(), null); + + SnapshotTestHelper.createSnapshot(hdfs, foo, "s1"); + + barNode = fsdir.getINode4Write(bar.toString()).asFile(); + blks = barNode.getBlocks(); + assertEquals(2, blks.length); + assertEquals(BLOCKSIZE, blks[0].getNumBytes()); + assertEquals(0, blks[1].getNumBytes()); + + hdfs.delete(bar, true); + final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1", + bar.getName()); + barNode = fsdir.getINode(sbar.toString()).asFile(); + blks = barNode.getBlocks(); + assertEquals(1, blks.length); + assertEquals(BLOCKSIZE, blks[0].getNumBytes()); + } + + /** Make sure we delete 0-sized block when deleting an INodeFileUC */ + @Test + public void testDeletionWithZeroSizeBlock2() throws Exception { + final Path foo = new Path("/foo"); + final Path subDir = new Path(foo, "sub"); + final Path bar = new Path(subDir, "bar"); + DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L); + + hdfs.append(bar); + + INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile(); + BlockInfo[] blks = barNode.getBlocks(); + assertEquals(1, blks.length); + ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]); + cluster.getNameNodeRpc() + .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous, + null, barNode.getId(), null); + + SnapshotTestHelper.createSnapshot(hdfs, foo, "s1"); + + barNode = fsdir.getINode4Write(bar.toString()).asFile(); + blks = barNode.getBlocks(); + assertEquals(2, blks.length); + assertEquals(BLOCKSIZE, blks[0].getNumBytes()); + assertEquals(0, blks[1].getNumBytes()); + + hdfs.delete(subDir, true); + final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1", "sub/bar"); + barNode = fsdir.getINode(sbar.toString()).asFile(); + blks = barNode.getBlocks(); + assertEquals(1, blks.length); + assertEquals(BLOCKSIZE, blks[0].getNumBytes()); + } } Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored?rev=1539898&r1=1539897&r2=1539898&view=diff ============================================================================== Binary files - no diff available. Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml?rev=1539898&r1=1539897&r2=1539898&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml Fri Nov 8 01:44:24 2013 @@ -843,6 +843,7 @@ <OPCODE>OP_ADD_PATH_BASED_CACHE_DIRECTIVE</OPCODE> <DATA> <TXID>63</TXID> + <ID>1</ID> <PATH>/bar</PATH> <REPLICATION>1</REPLICATION> <POOL>poolparty</POOL> @@ -851,10 +852,20 @@ </DATA> </RECORD> <RECORD> - <OPCODE>OP_REMOVE_PATH_BASED_CACHE_DESCRIPTOR</OPCODE> + <OPCODE>OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE</OPCODE> <DATA> <TXID>64</TXID> <ID>1</ID> + <REPLICATION>2</REPLICATION> + <RPC_CLIENTID></RPC_CLIENTID> + <RPC_CALLID>-2</RPC_CALLID> + </DATA> + </RECORD> + <RECORD> + <OPCODE>OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE</OPCODE> + <DATA> + <TXID>65</TXID> + <ID>1</ID> <RPC_CLIENTID>27ac79f0-d378-4933-824b-c2a188968d97</RPC_CLIENTID> <RPC_CALLID>78</RPC_CALLID> </DATA> @@ -862,7 +873,7 @@ <RECORD> <OPCODE>OP_REMOVE_CACHE_POOL</OPCODE> <DATA> - <TXID>65</TXID> + <TXID>66</TXID> <POOLNAME>poolparty</POOLNAME> <RPC_CLIENTID>27ac79f0-d378-4933-824b-c2a188968d97</RPC_CLIENTID> <RPC_CALLID>79</RPC_CALLID> @@ -871,7 +882,7 @@ <RECORD> <OPCODE>OP_ADD</OPCODE> <DATA> - <TXID>66</TXID> + <TXID>67</TXID> <LENGTH>0</LENGTH> <INODEID>16393</INODEID> <PATH>/hard-lease-recovery-test</PATH> @@ -893,21 +904,21 @@ <RECORD> <OPCODE>OP_ALLOCATE_BLOCK_ID</OPCODE> <DATA> - <TXID>67</TXID> + <TXID>68</TXID> <BLOCK_ID>1073741834</BLOCK_ID> </DATA> </RECORD> <RECORD> <OPCODE>OP_SET_GENSTAMP_V2</OPCODE> <DATA> - <TXID>68</TXID> + <TXID>69</TXID> <GENSTAMPV2>1010</GENSTAMPV2> </DATA> </RECORD> <RECORD> <OPCODE>OP_UPDATE_BLOCKS</OPCODE> <DATA> - <TXID>69</TXID> + <TXID>70</TXID> <PATH>/hard-lease-recovery-test</PATH> <BLOCK> <BLOCK_ID>1073741834</BLOCK_ID> @@ -921,7 +932,7 @@ <RECORD> <OPCODE>OP_UPDATE_BLOCKS</OPCODE> <DATA> - <TXID>70</TXID> + <TXID>71</TXID> <PATH>/hard-lease-recovery-test</PATH> <BLOCK> <BLOCK_ID>1073741834</BLOCK_ID> @@ -935,14 +946,14 @@ <RECORD> <OPCODE>OP_SET_GENSTAMP_V2</OPCODE> <DATA> - <TXID>71</TXID> + <TXID>72</TXID> <GENSTAMPV2>1011</GENSTAMPV2> </DATA> </RECORD> <RECORD> <OPCODE>OP_REASSIGN_LEASE</OPCODE> <DATA> - <TXID>72</TXID> + <TXID>73</TXID> <LEASEHOLDER>DFSClient_NONMAPREDUCE_-134124999_1</LEASEHOLDER> <PATH>/hard-lease-recovery-test</PATH> <NEWHOLDER>HDFS_NameNode</NEWHOLDER> @@ -951,14 +962,14 @@ <RECORD> <OPCODE>OP_SET_GENSTAMP_V2</OPCODE> <DATA> - <TXID>73</TXID> + <TXID>74</TXID> <GENSTAMPV2>1012</GENSTAMPV2> </DATA> </RECORD> <RECORD> <OPCODE>OP_REASSIGN_LEASE</OPCODE> <DATA> - <TXID>74</TXID> + <TXID>75</TXID> <LEASEHOLDER>HDFS_NameNode</LEASEHOLDER> <PATH>/hard-lease-recovery-test</PATH> <NEWHOLDER>HDFS_NameNode</NEWHOLDER> @@ -967,7 +978,7 @@ <RECORD> <OPCODE>OP_CLOSE</OPCODE> <DATA> - <TXID>75</TXID> + <TXID>76</TXID> <LENGTH>0</LENGTH> <INODEID>0</INODEID> <PATH>/hard-lease-recovery-test</PATH> @@ -992,7 +1003,7 @@ <RECORD> <OPCODE>OP_END_LOG_SEGMENT</OPCODE> <DATA> - <TXID>76</TXID> + <TXID>77</TXID> </DATA> </RECORD> </EDITS> Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml?rev=1539898&r1=1539897&r2=1539898&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml Fri Nov 8 01:44:24 2013 @@ -358,5 +358,20 @@ </comparators> </test> + <test> + <description>Testing the help usage</description> + <test-commands> + <cache-admin-command>-help -addPool</cache-admin-command> + </test-commands> + <cleanup-commands> + </cleanup-commands> + <comparators> + <comparator> + <type>SubstringComparator</type> + <expected-output>Add a new cache pool.</expected-output> + </comparator> + </comparators> + </test> + </tests> </configuration>