Fixed flacky BlacklistingCompactionsTest, switched to fixed size types and increased corruption size
patch by Stefania Alborghetti; reviewed by Joel Knighton for CASSANDRA-12359 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/f5c9d6e4 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/f5c9d6e4 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/f5c9d6e4 Branch: refs/heads/cassandra-3.9 Commit: f5c9d6e49c2bec23d0eace78c64866fa3dafaac1 Parents: 5acfce6 Author: Stefania Alborghetti <[email protected]> Authored: Tue Aug 2 12:05:11 2016 +0800 Committer: Stefania Alborghetti <[email protected]> Committed: Wed Aug 3 10:59:47 2016 +0800 ---------------------------------------------------------------------- CHANGES.txt | 1 + .../compaction/BlacklistingCompactionsTest.java | 33 ++++++++++++++------ 2 files changed, 25 insertions(+), 9 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/cassandra/blob/f5c9d6e4/CHANGES.txt ---------------------------------------------------------------------- diff --git a/CHANGES.txt b/CHANGES.txt index c2d7a4d..d78345a 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -1,4 +1,5 @@ 3.0.9 + * Fixed flacky BlacklistingCompactionsTest, switched to fixed size types and increased corruption size (CASSANDRA-12359) * Rerun ReplicationAwareTokenAllocatorTest on failure to avoid flakiness (CASSANDRA-12277) * Exception when computing read-repair for range tombstones (CASSANDRA-12263) * Lost counter writes in compact table and static columns (CASSANDRA-12219) http://git-wip-us.apache.org/repos/asf/cassandra/blob/f5c9d6e4/test/unit/org/apache/cassandra/db/compaction/BlacklistingCompactionsTest.java ---------------------------------------------------------------------- diff --git a/test/unit/org/apache/cassandra/db/compaction/BlacklistingCompactionsTest.java b/test/unit/org/apache/cassandra/db/compaction/BlacklistingCompactionsTest.java index df2d8a9..6378e09 100644 --- a/test/unit/org/apache/cassandra/db/compaction/BlacklistingCompactionsTest.java +++ b/test/unit/org/apache/cassandra/db/compaction/BlacklistingCompactionsTest.java @@ -39,6 +39,7 @@ import org.apache.cassandra.SchemaLoader; import org.apache.cassandra.Util; import org.apache.cassandra.config.*; import org.apache.cassandra.db.*; +import org.apache.cassandra.db.marshal.LongType; import org.apache.cassandra.exceptions.ConfigurationException; import org.apache.cassandra.io.sstable.format.SSTableReader; import org.apache.cassandra.io.util.FileUtils; @@ -71,20 +72,29 @@ public class BlacklistingCompactionsTest { long seed = System.nanoTime(); //long seed = 754271160974509L; // CASSANDRA-9530: use this seed to reproduce compaction failures if reading empty rows + //long seed = 2080431860597L; // CASSANDRA-12359: use this seed to reproduce undetected corruptions logger.info("Seed {}", seed); random = new Random(seed); SchemaLoader.prepareServer(); SchemaLoader.createKeyspace(KEYSPACE1, KeyspaceParams.simple(1), - SchemaLoader.standardCFMD(KEYSPACE1, STANDARD_STCS).compaction(CompactionParams.DEFAULT), - SchemaLoader.standardCFMD(KEYSPACE1, STANDARD_LCS).compaction(CompactionParams.lcs(Collections.emptyMap()))); + makeTable(STANDARD_STCS).compaction(CompactionParams.DEFAULT), + makeTable(STANDARD_LCS).compaction(CompactionParams.lcs(Collections.emptyMap()))); maxValueSize = DatabaseDescriptor.getMaxValueSize(); DatabaseDescriptor.setMaxValueSize(1024 * 1024); closeStdErr(); } + /** + * Return a table metadata, we use types with fixed size to increase the chance of detecting corrupt data + */ + private static CFMetaData makeTable(String tableName) + { + return SchemaLoader.standardCFMD(KEYSPACE1, tableName, 1, LongType.instance, LongType.instance, LongType.instance); + } + @AfterClass public static void tearDown() { @@ -121,6 +131,10 @@ public class BlacklistingCompactionsTest final int ROWS_PER_SSTABLE = 10; final int SSTABLES = cfs.metadata.params.minIndexInterval * 2 / ROWS_PER_SSTABLE; + final int SSTABLES_TO_CORRUPT = 8; + + assertTrue(String.format("Not enough sstables (%d), expected at least %d sstables to corrupt", SSTABLES, SSTABLES_TO_CORRUPT), + SSTABLES > SSTABLES_TO_CORRUPT); // disable compaction while flushing cfs.disableAutoCompaction(); @@ -136,8 +150,8 @@ public class BlacklistingCompactionsTest DecoratedKey key = Util.dk(String.valueOf(i)); long timestamp = j * ROWS_PER_SSTABLE + i; new RowUpdateBuilder(cfs.metadata, timestamp, key.getKey()) - .clustering("cols" + "i") - .add("val", "val" + i) + .clustering(Long.valueOf(i)) + .add("val", Long.valueOf(i)) .build() .applyUnsafe(); maxTimestampExpected = Math.max(timestamp, maxTimestampExpected); @@ -150,23 +164,24 @@ public class BlacklistingCompactionsTest Collection<SSTableReader> sstables = cfs.getLiveSSTables(); int currentSSTable = 0; - int sstablesToCorrupt = 8; // corrupt first 'sstablesToCorrupt' SSTables for (SSTableReader sstable : sstables) { - if (currentSSTable + 1 > sstablesToCorrupt) + if (currentSSTable + 1 > SSTABLES_TO_CORRUPT) break; RandomAccessFile raf = null; try { - int corruptionSize = 50; + int corruptionSize = 100; raf = new RandomAccessFile(sstable.getFilename(), "rw"); assertNotNull(raf); assertTrue(raf.length() > corruptionSize); - raf.seek(random.nextInt((int)(raf.length() - corruptionSize))); + long pos = random.nextInt((int)(raf.length() - corruptionSize)); + logger.info("Corrupting sstable {} [{}] at pos {} / {}", currentSSTable, sstable.getFilename(), pos, raf.length()); + raf.seek(pos); // We want to write something large enough that the corruption cannot get undetected // (even without compression) byte[] corruption = new byte[corruptionSize]; @@ -203,6 +218,6 @@ public class BlacklistingCompactionsTest } cfs.truncateBlocking(); - assertEquals(sstablesToCorrupt, failures); + assertEquals(SSTABLES_TO_CORRUPT, failures); } }
