This is an automated email from the ASF dual-hosted git repository. blambov pushed a commit to branch trunk in repository https://gitbox.apache.org/repos/asf/cassandra-dtest.git
The following commit(s) were added to refs/heads/trunk by this push: new bd5e29c7 Fix test expectations for Memtable API (CEP-11) bd5e29c7 is described below commit bd5e29c7ca8e0d6987ba9d180d97766cb30eb0fa Author: Branimir Lambov <branimir.lam...@datastax.com> AuthorDate: Wed Apr 20 10:57:47 2022 +0300 Fix test expectations for Memtable API (CEP-11) patch by Branimir Lambov; reviewed by Andrés de la Peña and Caleb Rackliffe for CASSANDRA-17034 --- cqlsh_tests/test_cqlsh.py | 77 +++++++++++++++++++++++++++++++++++++++++++++-- snapshot_test.py | 22 +++++++++----- 2 files changed, 88 insertions(+), 11 deletions(-) diff --git a/cqlsh_tests/test_cqlsh.py b/cqlsh_tests/test_cqlsh.py index 9ee4ea98..69ca0a93 100644 --- a/cqlsh_tests/test_cqlsh.py +++ b/cqlsh_tests/test_cqlsh.py @@ -1130,7 +1130,28 @@ CREATE TYPE test.address_type ( PRIMARY KEY (id, col) """ - if self.cluster.version() >= LooseVersion('4.0'): + if self.cluster.version() >= LooseVersion('4.1'): + create_table += """ + ) WITH CLUSTERING ORDER BY (col ASC) + AND additional_write_policy = '99p' + AND bloom_filter_fp_chance = 0.01 + AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} + AND cdc = false + AND comment = '' + AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} + AND compression = {'chunk_length_in_kb': '16', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} + AND memtable = 'default' + AND crc_check_chance = 1.0 + AND default_time_to_live = 0 + AND extensions = {} + AND gc_grace_seconds = 864000 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0 + AND min_index_interval = 128 + AND read_repair = 'BLOCKING' + AND speculative_retry = '99p'; + """ + elif self.cluster.version() >= LooseVersion('4.0'): create_table += """ ) WITH CLUSTERING ORDER BY (col ASC) AND additional_write_policy = '99p' @@ -1215,7 +1236,32 @@ CREATE TYPE test.address_type ( myindex_output = self.get_index_output('myindex', 'test', 'users', 'age') create_table = None - if self.cluster.version() >= LooseVersion('4.0'): + if self.cluster.version() >= LooseVersion('4.1'): + create_table = """ + CREATE TABLE test.users ( + userid text PRIMARY KEY, + age int, + firstname text, + lastname text + ) WITH additional_write_policy = '99p' + AND bloom_filter_fp_chance = 0.01 + AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} + AND cdc = false + AND comment = '' + AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} + AND compression = {'chunk_length_in_kb': '16', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} + AND memtable = 'default' + AND crc_check_chance = 1.0 + AND default_time_to_live = 0 + AND extensions = {} + AND gc_grace_seconds = 864000 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0 + AND min_index_interval = 128 + AND read_repair = 'BLOCKING' + AND speculative_retry = '99p'; + """ + elif self.cluster.version() >= LooseVersion('4.0'): create_table = """ CREATE TABLE test.users ( userid text PRIMARY KEY, @@ -1320,7 +1366,32 @@ CREATE TYPE test.address_type ( return "CREATE INDEX {} ON {}.{} ({});".format(index, ks, table, col) def get_users_by_state_mv_output(self): - if self.cluster.version() >= LooseVersion('4.0'): + if self.cluster.version() >= LooseVersion('4.1'): + return """ + CREATE MATERIALIZED VIEW test.users_by_state AS + SELECT * + FROM test.users + WHERE state IS NOT NULL AND username IS NOT NULL + PRIMARY KEY (state, username) + WITH CLUSTERING ORDER BY (username ASC) + AND additional_write_policy = '99p' + AND bloom_filter_fp_chance = 0.01 + AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'} + AND cdc = false + AND comment = '' + AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} + AND compression = {'chunk_length_in_kb': '16', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} + AND memtable = 'default' + AND crc_check_chance = 1.0 + AND extensions = {} + AND gc_grace_seconds = 864000 + AND max_index_interval = 2048 + AND memtable_flush_period_in_ms = 0 + AND min_index_interval = 128 + AND read_repair = 'BLOCKING' + AND speculative_retry = '99p'; + """ + elif self.cluster.version() >= LooseVersion('4.0'): return """ CREATE MATERIALIZED VIEW test.users_by_state AS SELECT * diff --git a/snapshot_test.py b/snapshot_test.py index 7110fa72..2825ffbd 100644 --- a/snapshot_test.py +++ b/snapshot_test.py @@ -244,7 +244,7 @@ class TestArchiveCommitlog(SnapshotTester): """ Copy the active commitlogs to the archive directory before restoration """ - self.run_archive_commitlog(restore_point_in_time=False, archive_active_commitlogs=True) + self.run_archive_commitlog(restore_point_in_time=False) def test_dont_archive_commitlog(self): """ @@ -258,19 +258,20 @@ class TestArchiveCommitlog(SnapshotTester): """ self.run_archive_commitlog(restore_point_in_time=True) - def test_archive_commitlog_point_in_time_with_active_commitlog(self): + def test_archive_commitlog_point_in_time_ln(self): """ Test archive commit log with restore_point_in_time setting """ - self.run_archive_commitlog(restore_point_in_time=True, archive_active_commitlogs=True) + self.run_archive_commitlog(restore_point_in_time=True, archive_command='ln') - def test_archive_commitlog_point_in_time_with_active_commitlog_ln(self): + @since('4.1') + def test_archive_commitlog_restore_skip_by_position(self): """ - Test archive commit log with restore_point_in_time setting + Test archive commit log not restored because of specified snapshot commit log position """ - self.run_archive_commitlog(restore_point_in_time=True, archive_active_commitlogs=True, archive_command='ln') + self.run_archive_commitlog(restore_point_in_time=True, specify_commitlog_position=True, archive_command='ln') - def run_archive_commitlog(self, restore_point_in_time=False, restore_archived_commitlog=True, archive_active_commitlogs=False, archive_command='cp'): + def run_archive_commitlog(self, restore_point_in_time=False, restore_archived_commitlog=True, specify_commitlog_position=False, archive_command='cp'): """ Run archive commit log restoration test """ @@ -430,6 +431,11 @@ class TestArchiveCommitlog(SnapshotTester): replace_in_file(os.path.join(node1.get_path(), 'conf', 'commitlog_archiving.properties'), [(r'^restore_point_in_time=.*$', 'restore_point_in_time={restore_time}'.format(restore_time=restore_time))]) + if specify_commitlog_position: + # specify a high commit log position to skip replaying any commit log data + replace_in_file(os.path.join(node1.get_path(), 'conf', 'commitlog_archiving.properties'), + [(r'^snapshot_commitlog_position=.*$', 'snapshot_commitlog_position={cl_position}'.format(cl_position="9223372036854775807, 0"))]) + logger.debug("Restarting node1..") node1.stop() node1.start(wait_for_binary_proto=True) @@ -441,7 +447,7 @@ class TestArchiveCommitlog(SnapshotTester): rows = session.execute('SELECT count(*) from ks.cf') # Now we should have 30000 rows from the snapshot + 30000 rows # from the commitlog backups: - if not restore_archived_commitlog: + if not restore_archived_commitlog or specify_commitlog_position: assert rows[0][0] == 30000 elif restore_point_in_time: assert rows[0][0] == 60000 --------------------------------------------------------------------- To unsubscribe, e-mail: commits-unsubscr...@cassandra.apache.org For additional commands, e-mail: commits-h...@cassandra.apache.org