This is an automated email from the ASF dual-hosted git repository.

mck pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/cassandra-dtest.git


The following commit(s) were added to refs/heads/trunk by this push:
     new ad8ecc8  incremental_repair_test's test_manual_session_fail, 
test_manual_session_cancel_non_coordinator_failure and 
test_manual_session_force_cancel,  migrated to in-jvm dtests
ad8ecc8 is described below

commit ad8ecc8ba492ad2492b57ead65593e0221182198
Author: Marcus Eriksson <[email protected]>
AuthorDate: Mon Feb 22 14:21:09 2021 +0100

    incremental_repair_test's test_manual_session_fail, 
test_manual_session_cancel_non_coordinator_failure and 
test_manual_session_force_cancel,  migrated to in-jvm dtests
    
     patch by Marcus Eriksson; reviewed by Adam Holmberg, Mick Semb Wever for 
CASSANDRA-16425
---
 repair_tests/incremental_repair_test.py | 158 --------------------------------
 1 file changed, 158 deletions(-)

diff --git a/repair_tests/incremental_repair_test.py 
b/repair_tests/incremental_repair_test.py
index bff0957..6da75d6 100644
--- a/repair_tests/incremental_repair_test.py
+++ b/repair_tests/incremental_repair_test.py
@@ -177,164 +177,6 @@ class TestIncRepair(Tester):
             node.nodetool('compact ks tbl')
             self.assertAllRepairedSSTables(node, 'ks')
 
-    def _make_fake_session(self, keyspace, table):
-        node1 = self.cluster.nodelist()[0]
-        session = self.patient_exclusive_cql_connection(node1)
-        session_id = uuid1()
-        cfid = list(session.execute("SELECT * FROM system_schema.tables WHERE 
keyspace_name='{}' AND table_name='{}'".format(keyspace, table)))[0].id
-        now = datetime.now()
-        # pulled from a repairs table
-        ranges = 
{'\x00\x00\x00\x08K\xc2\xed\\<\xd3{X\x00\x00\x00\x08r\x04\x89[j\x81\xc4\xe6',
-                  
'\x00\x00\x00\x08r\x04\x89[j\x81\xc4\xe6\x00\x00\x00\x08\xd8\xcdo\x9e\xcbl\x83\xd4',
-                  
'\x00\x00\x00\x08\xd8\xcdo\x9e\xcbl\x83\xd4\x00\x00\x00\x08K\xc2\xed\\<\xd3{X'}
-        ranges = {bytes(b, "Latin-1") for b in ranges}
-
-        for node in self.cluster.nodelist():
-            session = self.patient_exclusive_cql_connection(node)
-            session.execute("INSERT INTO system.repairs "
-                            "(parent_id, cfids, coordinator, coordinator_port, 
last_update, participants, participants_wp, ranges, repaired_at, started_at, 
state) "
-                            "VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, 
%s)",
-                            [session_id, {cfid}, node1.address(), 7000, now, 
{n.address() for n in self.cluster.nodelist()},
-                             {str(n.address()) + ":7000" for n in 
self.cluster.nodelist()},
-                             ranges, now, now, ConsistentState.REPAIRING])  # 
2=REPAIRING
-
-        # as we faked repairs and inserted directly into system.repairs table, 
the current
-        # implementation in trunk (LocalSessions) only pulls the sessions via 
callbacks or
-        # from the system.repairs table once at startup. we need to stop and 
start the nodes
-        # as a way to force the repair sessions to get populated into the 
correct in-memory objects
-        time.sleep(1)
-        for node in self.cluster.nodelist():
-            node.flush()
-            node.stop(gently=False)
-
-        for node in self.cluster.nodelist():
-            node.start()
-
-        return session_id
-
-    @since('4.0')
-    def test_manual_session_fail(self):
-        """ check manual failing of repair sessions via nodetool works 
properly """
-        self.fixture_dtest_setup.setup_overrides.cluster_options = 
ImmutableMapping({'hinted_handoff_enabled': 'false',
-                                                                               
      'num_tokens': 1,
-                                                                               
      'commitlog_sync_period_in_ms': 500})
-        self.init_default_config()
-        self.cluster.populate(3).start()
-        node1, node2, node3 = self.cluster.nodelist()
-
-        # make data inconsistent between nodes
-        session = self.patient_exclusive_cql_connection(node3)
-        session.execute("CREATE KEYSPACE ks WITH 
REPLICATION={'class':'SimpleStrategy', 'replication_factor': 3}")
-        session.execute("CREATE TABLE ks.tbl (k INT PRIMARY KEY, v INT)")
-
-        for node in self.cluster.nodelist():
-            out = node.nodetool('repair_admin')
-            assert "no sessions" in out.stdout
-
-        session_id = self._make_fake_session('ks', 'tbl')
-
-        for node in self.cluster.nodelist():
-            out = node.nodetool('repair_admin')
-            lines = out.stdout.split('\n')
-            assert len(lines) > 1
-            line = lines[1]
-            assert re.match(str(session_id), line)
-            assert "REPAIRING" in line
-
-        node1.nodetool("repair_admin cancel --session {}".format(session_id))
-
-        for node in self.cluster.nodelist():
-            out = node.nodetool('repair_admin list --all')
-            lines = out.stdout.split('\n')
-            assert len(lines) > 1
-            line = lines[1]
-            assert re.match(str(session_id), line)
-            assert "FAILED" in line
-
-    @since('4.0')
-    def test_manual_session_cancel_non_coordinator_failure(self):
-        """ check manual failing of repair sessions via a node other than the 
coordinator fails """
-        self.fixture_dtest_setup.setup_overrides.cluster_options = 
ImmutableMapping({'hinted_handoff_enabled': 'false',
-                                                                               
      'num_tokens': 1,
-                                                                               
      'commitlog_sync_period_in_ms': 500})
-
-        self.init_default_config()
-        self.cluster.populate(3).start()
-        node1, node2, node3 = self.cluster.nodelist()
-
-        # make data inconsistent between nodes
-        session = self.patient_exclusive_cql_connection(node3)
-        session.execute("CREATE KEYSPACE ks WITH 
REPLICATION={'class':'SimpleStrategy', 'replication_factor': 3}")
-        session.execute("CREATE TABLE ks.tbl (k INT PRIMARY KEY, v INT)")
-
-        for node in self.cluster.nodelist():
-            out = node.nodetool('repair_admin')
-            assert "no sessions" in out.stdout
-
-        session_id = self._make_fake_session('ks', 'tbl')
-
-        for node in self.cluster.nodelist():
-            out = node.nodetool('repair_admin')
-            lines = out.stdout.split('\n')
-            assert len(lines) > 1
-            line = lines[1]
-            assert re.match(str(session_id), line)
-            assert "REPAIRING" in line
-
-        try:
-            node2.nodetool("repair_admin --cancel --session 
{}".format(session_id))
-            self.fail("cancel from a non coordinator should fail")
-        except ToolError:
-            pass  # expected
-
-        # nothing should have changed
-        for node in self.cluster.nodelist():
-            out = node.nodetool('repair_admin')
-            lines = out.stdout.split('\n')
-            assert len(lines) > 1
-            line = lines[1]
-            assert re.match(str(session_id), line)
-            assert "REPAIRING" in line
-
-    @since('4.0')
-    def test_manual_session_force_cancel(self):
-        """ check manual failing of repair sessions via a non-coordinator 
works if the --force flag is set """
-        self.fixture_dtest_setup.setup_overrides.cluster_options = 
ImmutableMapping({'hinted_handoff_enabled': 'false',
-                                                                               
      'num_tokens': 1,
-                                                                               
      'commitlog_sync_period_in_ms': 500})
-        self.init_default_config()
-        self.cluster.populate(3).start()
-        node1, node2, node3 = self.cluster.nodelist()
-
-        # make data inconsistent between nodes
-        session = self.patient_exclusive_cql_connection(node3)
-        session.execute("CREATE KEYSPACE ks WITH 
REPLICATION={'class':'SimpleStrategy', 'replication_factor': 3}")
-        session.execute("CREATE TABLE ks.tbl (k INT PRIMARY KEY, v INT)")
-
-        for node in self.cluster.nodelist():
-            out = node.nodetool('repair_admin')
-            assert "no sessions" in out.stdout
-
-        session_id = self._make_fake_session('ks', 'tbl')
-
-        for node in self.cluster.nodelist():
-            out = node.nodetool('repair_admin')
-            lines = out.stdout.split('\n')
-            assert len(lines) > 1
-            line = lines[1]
-            assert re.match(str(session_id), line)
-            assert "REPAIRING" in line
-
-        node2.nodetool("repair_admin cancel --session {} 
--force".format(session_id))
-
-        for node in self.cluster.nodelist():
-            out = node.nodetool('repair_admin list --all')
-            lines = out.stdout.split('\n')
-            assert len(lines) > 1
-            line = lines[1]
-            assert re.match(str(session_id), line)
-            assert "FAILED" in line
-
     def test_sstable_marking(self):
         """
         * Launch a three node cluster


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to