This is an automated email from the ASF dual-hosted git repository. smiklosovic pushed a commit to branch trunk in repository https://gitbox.apache.org/repos/asf/cassandra.git
commit 99246fb24ff611ff1ea929867f8d9bf3dc7747ed Merge: 8a91751 57c1c61 Author: Stefan Miklosovic <[email protected]> AuthorDate: Thu Sep 16 14:09:46 2021 +0200 Merge branch 'cassandra-4.0' into trunk CHANGES.txt | 1 + .../cassandra/index/sasi/conf/DataTracker.java | 5 ++ .../org/apache/cassandra/io/sstable/Component.java | 4 +- .../apache/cassandra/index/sasi/SASIIndexTest.java | 88 +++++++++++++++++++++- 4 files changed, 96 insertions(+), 2 deletions(-) diff --cc CHANGES.txt index 927c423,59d8266..6b95485 --- a/CHANGES.txt +++ b/CHANGES.txt @@@ -1,34 -1,10 +1,35 @@@ -4.0.2 +4.1 + * Include SASI components to snapshots (CASSANDRA-15134) * Fix missed wait latencies in the output of `nodetool tpstats -F` (CASSANDRA-16938) + * Reduce native transport max frame size to 16MB (CASSANDRA-16886) + * Add support for filtering using IN restrictions (CASSANDRA-14344) + * Provide a nodetool command to invalidate auth caches (CASSANDRA-16404) + * Catch read repair timeout exceptions and add metric (CASSANDRA-16880) + * Exclude Jackson 1.x transitive dependency of hadoop* provided dependencies (CASSANDRA-16854) + * Add client warnings and abort to tombstone and coordinator reads which go past a low/high watermark (CASSANDRA-16850) + * Add TTL support to nodetool snapshots (CASSANDRA-16789) + * Allow CommitLogSegmentReader to optionally skip sync marker CRC checks (CASSANDRA-16842) + * allow blocking IPs from updating metrics about traffic (CASSANDRA-16859) + * Request-Based Native Transport Rate-Limiting (CASSANDRA-16663) + * Implement nodetool getauditlog command (CASSANDRA-16725) + * Clean up repair code (CASSANDRA-13720) + * Background schedule to clean up orphaned hints files (CASSANDRA-16815) + * Modify SecondaryIndexManager#indexPartition() to retrieve only columns for which indexes are actually being built (CASSANDRA-16776) + * Batch the token metadata update to improve the speed (CASSANDRA-15291) + * Reduce the log level on "expected" repair exceptions (CASSANDRA-16775) + * Make JMXTimer expose attributes using consistent time unit (CASSANDRA-16760) + * Remove check on gossip status from DynamicEndpointSnitch::updateScores (CASSANDRA-11671) + * Fix AbstractReadQuery::toCQLString not returning valid CQL (CASSANDRA-16510) + * Log when compacting many tombstones (CASSANDRA-16780) + * Display bytes per level in tablestats for LCS tables (CASSANDRA-16799) + * Add isolated flush timer to CommitLogMetrics and ensure writes correspond to single WaitingOnCommit data points (CASSANDRA-16701) + * Add a system property to set hostId if not yet initialized (CASSANDRA-14582) + * GossiperTest.testHasVersion3Nodes didn't take into account trunk version changes, fixed to rely on latest version (CASSANDRA-16651) +Merged from 4.0: * Remove all the state pollution between tests in SSTableReaderTest (CASSANDRA-16888) * Delay auth setup until after gossip has settled to avoid unavailables on startup (CASSANDRA-16783) - * Fix clustering order logic in CREATE MATERIALIZED VIEW (CASSANDRA-16898) * org.apache.cassandra.db.rows.ArrayCell#unsharedHeapSizeExcludingData includes data twice (CASSANDRA-16900) + * Fix clustering order logic in CREATE MATERIALIZED VIEW (CASSANDRA-16898) * Exclude Jackson 1.x transitive dependency of hadoop* provided dependencies (CASSANDRA-16854) Merged from 3.11: * Make assassinate more resilient to missing tokens (CASSANDRA-16847) diff --cc test/unit/org/apache/cassandra/index/sasi/SASIIndexTest.java index 4ff5921,364acdb..70948fe --- a/test/unit/org/apache/cassandra/index/sasi/SASIIndexTest.java +++ b/test/unit/org/apache/cassandra/index/sasi/SASIIndexTest.java @@@ -79,6 -85,6 +85,8 @@@ import org.apache.cassandra.schema.Inde import org.apache.cassandra.schema.KeyspaceParams; import org.apache.cassandra.serializers.MarshalException; import org.apache.cassandra.serializers.TypeSerializer; ++import org.apache.cassandra.service.snapshot.SnapshotManifest; ++import org.apache.cassandra.service.snapshot.TableSnapshot; import org.apache.cassandra.utils.ByteBufferUtil; import org.apache.cassandra.utils.FBUtilities; import org.apache.cassandra.utils.Pair; @@@ -132,7 -141,84 +143,82 @@@ public class SASIIndexTes } @Test - public void testSingleExpressionQueries() + public void testSASIComponentsAddedToSnapshot() throws Throwable + { + String snapshotName = "sasi_test"; + Map<String, Pair<String, Integer>> data = new HashMap<>(); + Random r = new Random(); + + for (int i = 0; i < 100; i++) + data.put(UUID.randomUUID().toString(), Pair.create(UUID.randomUUID().toString(), r.nextInt())); + + ColumnFamilyStore store = loadData(data, true); + store.forceMajorCompaction(); + + Set<SSTableReader> ssTableReaders = store.getLiveSSTables(); + Set<Component> sasiComponents = new HashSet<>(); + + for (Index index : store.indexManager.listIndexes()) + if (index instanceof SASIIndex) + sasiComponents.add(((SASIIndex) index).getIndex().getComponent()); + + Assert.assertFalse(sasiComponents.isEmpty()); + + try + { + store.snapshot(snapshotName); - FileReader reader = new FileReader(store.getDirectories().getSnapshotManifestFile(snapshotName)); - JSONObject manifest = (JSONObject) new JSONParser().parse(reader); - JSONArray files = (JSONArray) manifest.get("files"); ++ SnapshotManifest manifest = SnapshotManifest.deserializeFromJsonFile(store.getDirectories().getSnapshotManifestFile(snapshotName)); + + Assert.assertFalse(ssTableReaders.isEmpty()); - Assert.assertFalse(files.isEmpty()); - Assert.assertEquals(ssTableReaders.size(), files.size()); ++ Assert.assertFalse(manifest.files.isEmpty()); ++ Assert.assertEquals(ssTableReaders.size(), manifest.files.size()); + + Map<Descriptor, Set<Component>> snapshotSSTables = store.getDirectories() + .sstableLister(Directories.OnTxnErr.IGNORE) + .snapshots(snapshotName) + .list(); + + long indexSize = 0; + long tableSize = 0; + + for (SSTableReader sstable : ssTableReaders) + { + File snapshotDirectory = Directories.getSnapshotDirectory(sstable.descriptor, snapshotName); + Descriptor snapshotSSTable = new Descriptor(snapshotDirectory, + sstable.getKeyspaceName(), + sstable.getColumnFamilyName(), + sstable.descriptor.generation, + sstable.descriptor.formatType); + + Set<Component> components = snapshotSSTables.get(snapshotSSTable); + + Assert.assertNotNull(components); + Assert.assertTrue(components.containsAll(sasiComponents)); + + for (Component c : components) + { + Path componentPath = Paths.get(sstable.descriptor + "-" + c.name); + long componentSize = Files.size(componentPath); + if (Component.Type.fromRepresentation(c.name) == Component.Type.SECONDARY_INDEX) + indexSize += componentSize; + else + tableSize += componentSize; + } + } - - Map<String, Directories.SnapshotSizeDetails> details = store.getSnapshotDetails(); ++ ++ TableSnapshot details = store.listSnapshots().get(snapshotName); + + // check that SASI components are included in the computation of snapshot size - Assert.assertEquals((long) details.get(snapshotName).dataSizeBytes, tableSize + indexSize); ++ Assert.assertEquals(details.computeTrueSizeBytes(), tableSize + indexSize); + } + finally + { + store.clearSnapshot(snapshotName); + } + } + + @Test + public void testSingleExpressionQueries() throws Exception { testSingleExpressionQueries(false); cleanupData(); --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
