adelapena commented on a change in pull request #165:
URL: https://github.com/apache/cassandra-dtest/pull/165#discussion_r728128453



##########
File path: write_failures_test.py
##########
@@ -228,3 +229,46 @@ def test_thrift(self):
                           thrift_types.ConsistencyLevel.ALL)
 
         client.transport.close()
+
+
+@since('3.0')
+class TestMultiDCWriteFailures(Tester):
+    @pytest.fixture(autouse=True)
+    def fixture_add_additional_log_patterns(self, fixture_dtest_setup):
+        fixture_dtest_setup.ignore_log_patterns = (
+            "is too large for the maximum size of",  # 3.0+
+            "Encountered an oversized mutation",     # 4.0+
+            "ERROR WRITE_FAILURE",     # Logged in DEBUG mode for write 
failures
+            "MigrationStage"           # This occurs sometimes due to node 
down (because of restart)
+        )
+
+    def _test_oversized_mutation(self, consistency_level):
+        """
+        Test that multi-DC write failures return operation failed rather than 
a timeout.
+        @jira_ticket CASSANDRA-16334.
+        """
+
+        cluster = self.cluster
+        cluster.populate([3, 3])
+        cluster.set_configuration_options(values={'max_mutation_size_in_kb': 
128})
+        cluster.start()
+
+        node1 = cluster.nodelist()[0]
+        session = self.patient_exclusive_cql_connection(node1)
+
+        session.execute("CREATE KEYSPACE test WITH replication = {'class': 
'NetworkTopologyStrategy', 'dc1': 3, 'dc2': 3};")
+        session.execute("CREATE TABLE test.test (key int PRIMARY KEY, val 
blob);")
+
+        payload = '1' * 1024 * 256
+        statement = SimpleStatement("""
+            INSERT INTO test.test (key, val) VALUES (1, textAsBlob('{}'))
+            """.format(payload), consistency_level=consistency_level)
+
+        with pytest.raises(WriteFailure) as cm:

Review comment:
       Nit: we don't use the `as cm` part.

##########
File path: write_failures_test.py
##########
@@ -228,3 +229,46 @@ def test_thrift(self):
                           thrift_types.ConsistencyLevel.ALL)
 
         client.transport.close()
+
+
+@since('3.0')
+class TestMultiDCWriteFailures(Tester):
+    @pytest.fixture(autouse=True)
+    def fixture_add_additional_log_patterns(self, fixture_dtest_setup):
+        fixture_dtest_setup.ignore_log_patterns = (
+            "is too large for the maximum size of",  # 3.0+
+            "Encountered an oversized mutation",     # 4.0+
+            "ERROR WRITE_FAILURE",     # Logged in DEBUG mode for write 
failures
+            "MigrationStage"           # This occurs sometimes due to node 
down (because of restart)
+        )
+
+    def _test_oversized_mutation(self, consistency_level):
+        """
+        Test that multi-DC write failures return operation failed rather than 
a timeout.
+        @jira_ticket CASSANDRA-16334.
+        """
+
+        cluster = self.cluster
+        cluster.populate([3, 3])

Review comment:
       Do we need 3+3 nodes? I think we could use less nodes here to save some 
resources

##########
File path: write_failures_test.py
##########
@@ -228,3 +229,46 @@ def test_thrift(self):
                           thrift_types.ConsistencyLevel.ALL)
 
         client.transport.close()
+
+
+@since('3.0')
+class TestMultiDCWriteFailures(Tester):
+    @pytest.fixture(autouse=True)
+    def fixture_add_additional_log_patterns(self, fixture_dtest_setup):
+        fixture_dtest_setup.ignore_log_patterns = (
+            "is too large for the maximum size of",  # 3.0+
+            "Encountered an oversized mutation",     # 4.0+
+            "ERROR WRITE_FAILURE",     # Logged in DEBUG mode for write 
failures
+            "MigrationStage"           # This occurs sometimes due to node 
down (because of restart)
+        )
+
+    def _test_oversized_mutation(self, consistency_level):
+        """
+        Test that multi-DC write failures return operation failed rather than 
a timeout.
+        @jira_ticket CASSANDRA-16334.
+        """
+
+        cluster = self.cluster
+        cluster.populate([3, 3])
+        cluster.set_configuration_options(values={'max_mutation_size_in_kb': 
128})
+        cluster.start()
+
+        node1 = cluster.nodelist()[0]
+        session = self.patient_exclusive_cql_connection(node1)
+
+        session.execute("CREATE KEYSPACE test WITH replication = {'class': 
'NetworkTopologyStrategy', 'dc1': 3, 'dc2': 3};")
+        session.execute("CREATE TABLE test.test (key int PRIMARY KEY, val 
blob);")
+
+        payload = '1' * 1024 * 256
+        statement = SimpleStatement("""
+            INSERT INTO test.test (key, val) VALUES (1, textAsBlob('{}'))
+            """.format(payload), consistency_level=consistency_level)
+
+        with pytest.raises(WriteFailure) as cm:
+            session.execute(statement)
+
+    def test_oversized_mutation_local_one(self):
+        self._test_oversized_mutation(ConsistencyLevel.LOCAL_ONE)
+
+    def test_oversized_mutation_one(self):

Review comment:
       Having a test per consistency level is nice and clear, but maybe we 
could combine both tests to use a single cluster and so save some resources, 
for example [this 
way](https://github.com/adelapena/cassandra-dtest/commit/19fb3e719d208fa286fa80f86567ddd99628d18b).
 This and the reduction of nodes make the test 3-4 times faster, at least 
locally. wdyt? 

##########
File path: write_failures_test.py
##########
@@ -228,3 +229,46 @@ def test_thrift(self):
                           thrift_types.ConsistencyLevel.ALL)
 
         client.transport.close()
+
+
+@since('3.0')
+class TestMultiDCWriteFailures(Tester):
+    @pytest.fixture(autouse=True)
+    def fixture_add_additional_log_patterns(self, fixture_dtest_setup):
+        fixture_dtest_setup.ignore_log_patterns = (
+            "is too large for the maximum size of",  # 3.0+
+            "Encountered an oversized mutation",     # 4.0+
+            "ERROR WRITE_FAILURE",     # Logged in DEBUG mode for write 
failures
+            "MigrationStage"           # This occurs sometimes due to node 
down (because of restart)
+        )
+
+    def _test_oversized_mutation(self, consistency_level):
+        """
+        Test that multi-DC write failures return operation failed rather than 
a timeout.
+        @jira_ticket CASSANDRA-16334.
+        """
+
+        cluster = self.cluster
+        cluster.populate([3, 3])
+        cluster.set_configuration_options(values={'max_mutation_size_in_kb': 
128})
+        cluster.start()
+
+        node1 = cluster.nodelist()[0]
+        session = self.patient_exclusive_cql_connection(node1)
+
+        session.execute("CREATE KEYSPACE test WITH replication = {'class': 
'NetworkTopologyStrategy', 'dc1': 3, 'dc2': 3};")
+        session.execute("CREATE TABLE test.test (key int PRIMARY KEY, val 
blob);")
+
+        payload = '1' * 1024 * 256
+        statement = SimpleStatement("""
+            INSERT INTO test.test (key, val) VALUES (1, textAsBlob('{}'))
+            """.format(payload), consistency_level=consistency_level)
+
+        with pytest.raises(WriteFailure) as cm:
+            session.execute(statement)

Review comment:
       We could also check that no hints have been created after the failed 
write, for example using metrics [this 
way](https://github.com/adelapena/cassandra-dtest/commit/897c2c39b74c73111009ed71204ac9d2c73424f5).




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to