This is an automated email from the ASF dual-hosted git repository. marcuse pushed a commit to branch cassandra-4.0 in repository https://gitbox.apache.org/repos/asf/cassandra.git
commit 2687cf1edf5d61379475f5a4e65a112239649b8a Merge: a87055d eeec360 Author: Marcus Eriksson <[email protected]> AuthorDate: Wed Feb 16 10:17:14 2022 +0100 Merge branch 'cassandra-3.11' into cassandra-4.0 CHANGES.txt | 3 + .../org/apache/cassandra/db/lifecycle/LogFile.java | 22 +++--- .../cassandra/db/lifecycle/LogTransactionTest.java | 86 ++++++++++++++++------ 3 files changed, 76 insertions(+), 35 deletions(-) diff --cc CHANGES.txt index 937bedf,402048f..c64b602 --- a/CHANGES.txt +++ b/CHANGES.txt @@@ -1,43 -1,20 +1,46 @@@ -3.11.13 -Merged from 3.0: ++4.0.4 + * Lazy transaction log replica creation allows incorrect replica content divergence during anticompaction (CASSANDRA-17273) + +4.0.3 + * Deprecate otc_coalescing_strategy, otc_coalescing_window_us, otc_coalescing_enough_coalesced_messages, + otc_backlog_expiration_interval_ms (CASSANDRA-17377) + * Improve start up processing of Incremental Repair information read from system.repairs (CASSANDRA-17342) -3.11.12 - * Upgrade snakeyaml to 1.26 in 3.11 (CASSANDRA=17028) +4.0.2 + * Full Java 11 support (CASSANDRA-16894) + * Remove unused 'geomet' package from cqlsh path (CASSANDRA-17271) + * Removed unused 'cql' dependency (CASSANDRA-17247) + * Don't block gossip when clearing repair snapshots (CASSANDRA-17168) + * Deduplicate warnings for deprecated parameters (changed names) (CASSANDRA-17160) + * Update ant-junit to version 1.10.12 (CASSANDRA-17218) + * Add droppable tombstone metrics to nodetool tablestats (CASSANDRA-16308) + * Fix disk failure triggered when enabling FQL on an unclean directory (CASSANDRA-17136) + * Fixed broken classpath when multiple jars in build directory (CASSANDRA-17129) + * DebuggableThreadPoolExecutor does not propagate client warnings (CASSANDRA-17072) + * internode_send_buff_size_in_bytes and internode_recv_buff_size_in_bytes have new names. Backward compatibility with the old names added (CASSANDRA-17141) + * Remove unused configuration parameters from cassandra.yaml (CASSANDRA-17132) + * Queries performed with NODE_LOCAL consistency level do not update request metrics (CASSANDRA-17052) + * Fix multiple full sources can be select unexpectedly for bootstrap streaming (CASSANDRA-16945) + * Fix cassandra.yaml formatting of parameters (CASSANDRA-17131) + * Add backward compatibility for CQLSSTableWriter Date fields (CASSANDRA-17117) + * Push initial client connection messages to trace (CASSANDRA-17038) + * Correct the internode message timestamp if sending node has wrapped (CASSANDRA-16997) + * Avoid race causing us to return null in RangesAtEndpoint (CASSANDRA-16965) + * Avoid rewriting all sstables during cleanup when transient replication is enabled (CASSANDRA-16966) + * Prevent CQLSH from failure on Python 3.10 (CASSANDRA-16987) + * Avoid trying to acquire 0 permits from the rate limiter when taking snapshot (CASSANDRA-16872) + * Upgrade Caffeine to 2.5.6 (CASSANDRA-15153) + * Include SASI components to snapshots (CASSANDRA-15134) + * Fix missed wait latencies in the output of `nodetool tpstats -F` (CASSANDRA-16938) + * Remove all the state pollution between tests in SSTableReaderTest (CASSANDRA-16888) + * Delay auth setup until after gossip has settled to avoid unavailables on startup (CASSANDRA-16783) + * Fix clustering order logic in CREATE MATERIALIZED VIEW (CASSANDRA-16898) + * org.apache.cassandra.db.rows.ArrayCell#unsharedHeapSizeExcludingData includes data twice (CASSANDRA-16900) + * Exclude Jackson 1.x transitive dependency of hadoop* provided dependencies (CASSANDRA-16854) +Merged from 3.11: * Add key validation to ssstablescrub (CASSANDRA-16969) * Update Jackson from 2.9.10 to 2.12.5 (CASSANDRA-16851) - * Include SASI components to snapshots (CASSANDRA-15134) * Make assassinate more resilient to missing tokens (CASSANDRA-16847) - * Exclude Jackson 1.x transitive dependency of hadoop* provided dependencies (CASSANDRA-16854) - * Validate SASI tokenizer options before adding index to schema (CASSANDRA-15135) - * Fixup scrub output when no data post-scrub and clear up old use of row, which really means partition (CASSANDRA-16835) - * Fix ant-junit dependency issue (CASSANDRA-16827) - * Reduce thread contention in CommitLogSegment and HintsBuffer (CASSANDRA-16072) - * Avoid sending CDC column if not enabled (CASSANDRA-16770) Merged from 3.0: * Fix conversion from megabits to bytes in streaming rate limiter (CASSANDRA-17243) * Upgrade logback to 1.2.9 (CASSANDRA-17204) diff --cc src/java/org/apache/cassandra/db/lifecycle/LogFile.java index a91af73,3550d66..9053034 --- a/src/java/org/apache/cassandra/db/lifecycle/LogFile.java +++ b/src/java/org/apache/cassandra/db/lifecycle/LogFile.java @@@ -66,7 -66,10 +66,8 @@@ final class LogFile implements AutoClos private final LogReplicaSet replicas = new LogReplicaSet(); // The transaction records, this set must be ORDER PRESERVING - private final LinkedHashSet<LogRecord> records = new LinkedHashSet<>(); - // the transaction records we have written to disk - used to guarantee that the - // on-disk log files become identical when creating a new replica - private final LinkedHashSet<LogRecord> onDiskRecords = new LinkedHashSet<>(); + private final Set<LogRecord> records = Collections.synchronizedSet(new LinkedHashSet<>()); // TODO: Hack until we fix CASSANDRA-14554 ++ private final Set<LogRecord> onDiskRecords = Collections.synchronizedSet(new LinkedHashSet<>()); // The type of the transaction private final OperationType type; diff --cc test/unit/org/apache/cassandra/db/lifecycle/LogTransactionTest.java index a41365b,09c75e1..a4e74ce --- a/test/unit/org/apache/cassandra/db/lifecycle/LogTransactionTest.java +++ b/test/unit/org/apache/cassandra/db/lifecycle/LogTransactionTest.java @@@ -32,8 -38,9 +38,7 @@@ import com.google.common.collect.Iterab import com.google.common.collect.Sets; import org.junit.BeforeClass; import org.junit.Test; - import org.junit.Assert; -import junit.framework.Assert; -import org.apache.cassandra.MockSchema; import org.apache.cassandra.db.ColumnFamilyStore; import org.apache.cassandra.db.Directories; import org.apache.cassandra.db.SerializationHeader; @@@ -96,7 -104,7 +103,7 @@@ public class LogTransactionTest extend assertNotNull(txnLogs); assertNotNull(txnLogs.id()); -- Assert.assertEquals(OperationType.COMPACTION, txnLogs.type()); ++ assertEquals(OperationType.COMPACTION, txnLogs.type()); txnLogs.trackNew(sstableNew); tidier = txnLogs.obsoleted(sstableOld); @@@ -422,13 -461,13 +460,13 @@@ sstableNew.selfRef().release(); sstableOld.selfRef().release(); -- Assert.assertEquals(tmpFiles, getTemporaryFiles(sstableNew.descriptor.directory)); ++ assertEquals(tmpFiles, getTemporaryFiles(sstableNew.descriptor.directory)); // normally called at startup - LogTransaction.removeUnfinishedLeftovers(cfs.metadata); + LogTransaction.removeUnfinishedLeftovers(cfs.metadata()); // sstableOld should be only table left - Directories directories = new Directories(cfs.metadata); + Directories directories = new Directories(cfs.metadata()); Map<Descriptor, Set<Component>> sstables = directories.sstableLister(Directories.OnTxnErr.THROW).list(); assertEquals(1, sstables.size()); @@@ -462,13 -501,13 +500,13 @@@ sstableNew.selfRef().release(); sstableOld.selfRef().release(); -- Assert.assertEquals(tmpFiles, getTemporaryFiles(sstableOld.descriptor.directory)); ++ assertEquals(tmpFiles, getTemporaryFiles(sstableOld.descriptor.directory)); // normally called at startup - LogTransaction.removeUnfinishedLeftovers(cfs.metadata); + LogTransaction.removeUnfinishedLeftovers(cfs.metadata()); // sstableNew should be only table left - Directories directories = new Directories(cfs.metadata); + Directories directories = new Directories(cfs.metadata()); Map<Descriptor, Set<Component>> sstables = directories.sstableLister(Directories.OnTxnErr.THROW).list(); assertEquals(1, sstables.size()); @@@ -505,7 -544,7 +543,7 @@@ log.trackNew(sstables[3]); Collection<File> logFiles = log.logFiles(); -- Assert.assertEquals(2, logFiles.size()); ++ assertEquals(2, logFiles.size()); // fake a commit log.txnFile().commit(); @@@ -513,9 -552,9 +551,9 @@@ Arrays.stream(sstables).forEach(s -> s.selfRef().release()); // test listing -- Assert.assertEquals(sstables[0].getAllFilePaths().stream().map(File::new).collect(Collectors.toSet()), ++ assertEquals(sstables[0].getAllFilePaths().stream().map(File::new).collect(Collectors.toSet()), getTemporaryFiles(dataFolder1)); -- Assert.assertEquals(sstables[2].getAllFilePaths().stream().map(File::new).collect(Collectors.toSet()), ++ assertEquals(sstables[2].getAllFilePaths().stream().map(File::new).collect(Collectors.toSet()), getTemporaryFiles(dataFolder2)); // normally called at startup @@@ -556,7 -595,7 +594,7 @@@ log.trackNew(sstables[3]); Collection<File> logFiles = log.logFiles(); -- Assert.assertEquals(2, logFiles.size()); ++ assertEquals(2, logFiles.size()); // fake an abort log.txnFile().abort(); @@@ -564,9 -603,9 +602,9 @@@ Arrays.stream(sstables).forEach(s -> s.selfRef().release()); // test listing -- Assert.assertEquals(sstables[1].getAllFilePaths().stream().map(File::new).collect(Collectors.toSet()), ++ assertEquals(sstables[1].getAllFilePaths().stream().map(File::new).collect(Collectors.toSet()), getTemporaryFiles(dataFolder1)); -- Assert.assertEquals(sstables[3].getAllFilePaths().stream().map(File::new).collect(Collectors.toSet()), ++ assertEquals(sstables[3].getAllFilePaths().stream().map(File::new).collect(Collectors.toSet()), getTemporaryFiles(dataFolder2)); // normally called at startup @@@ -586,7 -625,7 +624,7 @@@ { testRemoveUnfinishedLeftovers_multipleFolders_errorConditions(txn -> { List<File> logFiles = txn.logFiles(); -- Assert.assertEquals(2, logFiles.size()); ++ assertEquals(2, logFiles.size()); // insert mismatched records FileUtils.append(logFiles.get(0), LogRecord.makeCommit(System.currentTimeMillis()).raw); @@@ -600,7 -639,7 +638,7 @@@ { testRemoveUnfinishedLeftovers_multipleFolders_errorConditions(txn -> { List<File> logFiles = txn.logFiles(); -- Assert.assertEquals(2, logFiles.size()); ++ assertEquals(2, logFiles.size()); // insert a full record and a partial one String finalRecord = LogRecord.makeCommit(System.currentTimeMillis()).raw; @@@ -616,7 -655,7 +654,7 @@@ { testRemoveUnfinishedLeftovers_multipleFolders_errorConditions(txn -> { List<File> logFiles = txn.logFiles(); -- Assert.assertEquals(2, logFiles.size()); ++ assertEquals(2, logFiles.size()); // insert a full record and a partial one String finalRecord = LogRecord.makeCommit(System.currentTimeMillis()).raw; @@@ -632,10 -671,10 +670,10 @@@ { testRemoveUnfinishedLeftovers_multipleFolders_errorConditions(txn -> { List<File> logFiles = txn.logFiles(); -- Assert.assertEquals(2, logFiles.size()); ++ assertEquals(2, logFiles.size()); // insert a partial sstable record and a full commit record - String sstableRecord = LogRecord.make(LogRecord.Type.ADD, Collections.emptyList(), 0, "abc-").raw; + String sstableRecord = LogRecord.make(LogRecord.Type.ADD, Collections.emptyList(), 0, "abc").raw; int toChop = sstableRecord.length() / 2; FileUtils.append(logFiles.get(0), sstableRecord.substring(0, sstableRecord.length() - toChop)); FileUtils.append(logFiles.get(1), sstableRecord); @@@ -651,10 -690,10 +689,10 @@@ { testRemoveUnfinishedLeftovers_multipleFolders_errorConditions(txn -> { List<File> logFiles = txn.logFiles(); -- Assert.assertEquals(2, logFiles.size()); ++ assertEquals(2, logFiles.size()); // insert a partial sstable record and a full commit record - String sstableRecord = LogRecord.make(LogRecord.Type.ADD, Collections.emptyList(), 0, "abc-").raw; + String sstableRecord = LogRecord.make(LogRecord.Type.ADD, Collections.emptyList(), 0, "abc").raw; int toChop = sstableRecord.length() / 2; FileUtils.append(logFiles.get(0), sstableRecord); FileUtils.append(logFiles.get(1), sstableRecord.substring(0, sstableRecord.length() - toChop)); @@@ -670,7 -709,7 +708,7 @@@ { testRemoveUnfinishedLeftovers_multipleFolders_errorConditions(txn -> { List<File> logFiles = txn.logFiles(); -- Assert.assertEquals(2, logFiles.size()); ++ assertEquals(2, logFiles.size()); // insert only one commit record FileUtils.append(logFiles.get(0), LogRecord.makeCommit(System.currentTimeMillis()).raw); @@@ -683,7 -722,7 +721,7 @@@ { testRemoveUnfinishedLeftovers_multipleFolders_errorConditions(txn -> { List<File> logFiles = txn.logFiles(); -- Assert.assertEquals(2, logFiles.size()); ++ assertEquals(2, logFiles.size()); // insert only one commit record FileUtils.append(logFiles.get(1), LogRecord.makeCommit(System.currentTimeMillis()).raw); @@@ -696,7 -735,7 +734,7 @@@ { testRemoveUnfinishedLeftovers_multipleFolders_errorConditions(txn -> { List<File> logFiles = txn.logFiles(); -- Assert.assertEquals(2, logFiles.size()); ++ assertEquals(2, logFiles.size()); // insert mismatched records FileUtils.append(logFiles.get(0), LogRecord.makeCommit(System.currentTimeMillis()).raw); --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
