Updated Branches: refs/heads/master 4c51fb26d -> a54dbe2e2
ACCUMULO-1629 skip the table itself when looking for other references Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/a54dbe2e Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/a54dbe2e Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/a54dbe2e Branch: refs/heads/master Commit: a54dbe2e288918f167ffe527a4c61fbc7a38cdbf Parents: 4c51fb2 Author: Eric Newton <[email protected]> Authored: Mon Aug 5 09:43:22 2013 -0400 Committer: Eric Newton <[email protected]> Committed: Mon Aug 5 09:43:22 2013 -0400 ---------------------------------------------------------------------- .../server/master/tableOps/DeleteTable.java | 7 ++++- .../accumulo/test/functional/CloneTestIT.java | 33 +++++++++++++++++++- 2 files changed, 38 insertions(+), 2 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/accumulo/blob/a54dbe2e/server/src/main/java/org/apache/accumulo/server/master/tableOps/DeleteTable.java ---------------------------------------------------------------------- diff --git a/server/src/main/java/org/apache/accumulo/server/master/tableOps/DeleteTable.java b/server/src/main/java/org/apache/accumulo/server/master/tableOps/DeleteTable.java index 6f3b49a..6c6b0d6 100644 --- a/server/src/main/java/org/apache/accumulo/server/master/tableOps/DeleteTable.java +++ b/server/src/main/java/org/apache/accumulo/server/master/tableOps/DeleteTable.java @@ -17,6 +17,7 @@ package org.apache.accumulo.server.master.tableOps; import java.io.IOException; +import java.util.Arrays; import java.util.Collections; import java.util.Map.Entry; @@ -130,7 +131,11 @@ class CleanUp extends MasterRepo { Connector conn = master.getConnector(); BatchScanner bs = conn.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 8); try { - bs.setRanges(Collections.singleton(MetadataSchema.TabletsSection.getRange())); + Range allTables = MetadataSchema.TabletsSection.getRange(); + Range tableRange = MetadataSchema.TabletsSection.getRange(tableId); + Range beforeTable = new Range(allTables.getStartKey(), true, tableRange.getStartKey(), false); + Range afterTable = new Range(tableRange.getEndKey(), false, allTables.getEndKey(), true); + bs.setRanges(Arrays.asList(beforeTable, afterTable)); bs.fetchColumnFamily(DataFileColumnFamily.NAME); IteratorSetting cfg = new IteratorSetting(40, "grep", GrepIterator.class); GrepIterator.setTerm(cfg, "/" + tableId + "/"); http://git-wip-us.apache.org/repos/asf/accumulo/blob/a54dbe2e/test/src/test/java/org/apache/accumulo/test/functional/CloneTestIT.java ---------------------------------------------------------------------- diff --git a/test/src/test/java/org/apache/accumulo/test/functional/CloneTestIT.java b/test/src/test/java/org/apache/accumulo/test/functional/CloneTestIT.java index ba40b18..ee2b26a 100644 --- a/test/src/test/java/org/apache/accumulo/test/functional/CloneTestIT.java +++ b/test/src/test/java/org/apache/accumulo/test/functional/CloneTestIT.java @@ -16,8 +16,13 @@ */ package org.apache.accumulo.test.functional; +import static org.junit.Assert.*; + +import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Set; @@ -28,11 +33,17 @@ import org.apache.accumulo.core.client.Connector; import org.apache.accumulo.core.client.MutationsRejectedException; import org.apache.accumulo.core.client.Scanner; import org.apache.accumulo.core.client.TableNotFoundException; +import org.apache.accumulo.core.client.admin.DiskUsage; import org.apache.accumulo.core.conf.Property; import org.apache.accumulo.core.data.Key; import org.apache.accumulo.core.data.Mutation; import org.apache.accumulo.core.data.Value; import org.apache.accumulo.core.security.Authorizations; +import org.apache.commons.math.stat.clustering.Cluster; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.junit.Assert; import org.junit.Test; @@ -125,9 +136,29 @@ public class CloneTestIT extends SimpleMacIT { public void testDeleteClone() throws Exception { String table1 = makeTableName(); String table2 = makeTableName(); + String table3 = makeTableName(); Connector c = getConnector(); - + + // verify that deleting a new table removes the files + c.tableOperations().create(table3); + writeData(table3, c).close(); + c.tableOperations().flush(table3, null, null, true); + // check for files + FileSystem fs = FileSystem.get(new Configuration()); + String id = c.tableOperations().tableIdMap().get(table3); + FileStatus[] status = fs.listStatus(new Path(rootPath() + "/accumulo/tables/" + id)); + assertTrue(status.length > 0); + // verify disk usage + List<DiskUsage> diskUsage = c.tableOperations().getDiskUsage(Collections.singleton(table3)); + assertEquals(1, diskUsage.size()); + assertTrue(diskUsage.get(0).getUsage() > 100); + // delete the table + c.tableOperations().delete(table3); + // verify its gone from the file system + status = fs.listStatus(new Path(rootPath() + "/accumulo/tables/" + id)); + assertTrue(status == null || status.length == 0); + c.tableOperations().create(table1); BatchWriter bw = writeData(table1, c);
