This is an automated email from the ASF dual-hosted git repository.

djoshi pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/cassandra-dtest.git


The following commit(s) were added to refs/heads/master by this push:
     new 87afb85  Fix and reenable cqlsh tests
87afb85 is described below

commit 87afb85f59bdc94a7909bf02575819b6812efaf2
Author: Patrick Bannister <ptbannis...@gmail.com>
AuthorDate: Wed Mar 13 15:08:12 2019 -0700

    Fix and reenable cqlsh tests
    
    Patch by Patrick Bannister; reviewed by Dinesh Joshi and Stefan Podkowinski 
for CASSANDRA-14298
---
 conftest.py                                        |   5 +
 cqlsh_tests/cqlsh_tools.py                         |  12 +-
 cqlsh_tests/{cqlsh_tests.py => test_cqlsh.py}      | 243 +++++++++++++++------
 .../{cqlsh_copy_tests.py => test_cqlsh_copy.py}    | 140 +++++++-----
 4 files changed, 274 insertions(+), 126 deletions(-)

diff --git a/conftest.py b/conftest.py
index bfd4299..e680ca9 100644
--- a/conftest.py
+++ b/conftest.py
@@ -497,6 +497,11 @@ def pytest_collection_modifyitems(items, config):
             if config.getoption("use_off_heap_memtables"):
                 deselect_test = True
 
+        # temporarily deselect tests in cqlsh_copy_tests that depend on 
cqlshlib,
+        # until cqlshlib is Python 3 compatibile
+        if item.get_marker("depends_cqlshlib"):
+            deselect_test = True
+
         if deselect_test:
             deselected_items.append(item)
         else:
diff --git a/cqlsh_tests/cqlsh_tools.py b/cqlsh_tests/cqlsh_tools.py
index 7175fb9..9544d3e 100644
--- a/cqlsh_tests/cqlsh_tools.py
+++ b/cqlsh_tests/cqlsh_tools.py
@@ -20,14 +20,18 @@ def csv_rows(filename, delimiter=None):
     reader_opts = {}
     if delimiter is not None:
         reader_opts['delimiter'] = delimiter
-    with open(filename, 'rb') as csvfile:
+    with open(filename, 'r') as csvfile:
         for row in csv.reader(csvfile, **reader_opts):
             yield row
 
 
 def assert_csvs_items_equal(filename1, filename2):
     with open(filename1, 'r') as x, open(filename2, 'r') as y:
-        assert list(x.readlines()) == list(y.readlines())
+        list_x = list(x.readlines())
+        list_y = list(y.readlines())
+        list_x.sort()
+        list_y.sort()
+        assert list_x == list_y
 
 
 def random_list(gen=None, n=None):
@@ -45,7 +49,7 @@ def random_list(gen=None, n=None):
 
 
 def write_rows_to_csv(filename, data):
-    with open(filename, 'wb') as csvfile:
+    with open(filename, 'w') as csvfile:
         writer = csv.writer(csvfile)
         for row in data:
             writer.writerow(row)
@@ -116,5 +120,3 @@ def assert_resultset_contains(got: ResultSet, expected: 
List[tuple]) -> None:
             if row.a == t[0] and row.b == t[1]:
                 found = True
         assert found, 'Failed to find expected row: {}'.format(t)
-
-
diff --git a/cqlsh_tests/cqlsh_tests.py b/cqlsh_tests/test_cqlsh.py
similarity index 91%
rename from cqlsh_tests/cqlsh_tests.py
rename to cqlsh_tests/test_cqlsh.py
index ba30b75..5023828 100644
--- a/cqlsh_tests/cqlsh_tests.py
+++ b/cqlsh_tests/test_cqlsh.py
@@ -1,6 +1,9 @@
+# coding=utf-8
+
 import binascii
 import csv
 import datetime
+import locale
 import os
 import re
 import subprocess
@@ -27,12 +30,13 @@ since = pytest.mark.since
 logger = logging.getLogger(__name__)
 
 
-@pytest.mark.skip("These aren't functioning just yet")
 class TestCqlsh(Tester):
 
     @classmethod
     def setUpClass(cls):
         cls._cached_driver_methods = monkeypatch_driver()
+        if locale.getpreferredencoding() != 'UTF-8':
+            os.environ['LC_CTYPE'] = 'en_US.utf8'
 
     @classmethod
     def tearDownClass(cls):
@@ -43,6 +47,7 @@ class TestCqlsh(Tester):
             os.unlink(self.tempfile.name)
         super(TestCqlsh, self).tearDown()
 
+    @pytest.mark.depends_cqlshlib
     @since('2.1.9')
     def test_pycodestyle_compliance(self):
         """
@@ -68,8 +73,8 @@ class TestCqlsh(Tester):
         p = subprocess.Popen(cmds, stdout=subprocess.PIPE, 
stderr=subprocess.PIPE)
         stdout, stderr = p.communicate()
 
-        assert len(stdout), 0 == stdout
-        assert len(stderr), 0 == stderr
+        assert 0 == len(stdout), stdout
+        assert 0 == len(stderr), stderr
 
     def test_simple_insert(self):
 
@@ -91,8 +96,7 @@ class TestCqlsh(Tester):
         session = self.patient_cql_connection(node1)
         rows = list(session.execute("select id, value from simple.simple"))
 
-        self.assertEqual({1: 'one', 2: 'two', 3: 'three', 4: 'four', 5: 
'five'},
-                         {k: v for k, v in rows})
+        assert {1: 'one', 2: 'two', 3: 'three', 4: 'four', 5: 'five'} == {k: v 
for k, v in rows}
 
     def test_lwt(self):
         """
@@ -118,7 +122,7 @@ class TestCqlsh(Tester):
                 stmt=repr(stmt),
                 routput=repr(output)
             )
-            assert expected_substring == output in msg
+            assert expected_substring in output, msg
 
         assert_applied("INSERT INTO lwt.lwt (id, value) VALUES (1, 'one') IF 
NOT EXISTS")
         assert_applied("INSERT INTO lwt.lwt (id, value) VALUES (1, 'one') IF 
NOT EXISTS")
@@ -187,11 +191,11 @@ class TestCqlsh(Tester):
         session = self.patient_cql_connection(node)
 
         def verify_varcharmap(map_name, expected, encode_value=False):
-            rows = list(session.execute(("SELECT %s FROM 
testks.varcharmaptable WHERE varcharkey= '᚛᚛ᚉᚑᚅᚔᚉᚉᚔᚋ ᚔᚈᚔ ᚍᚂᚐᚅᚑ ᚅᚔᚋᚌᚓᚅᚐ᚜';" % 
map_name).encode("utf-8")))
+            rows = list(session.execute(("SELECT %s FROM 
testks.varcharmaptable WHERE varcharkey= '᚛᚛ᚉᚑᚅᚔᚉᚉᚔᚋ ᚔᚈᚔ ᚍᚂᚐᚅᚑ ᚅᚔᚋᚌᚓᚅᚐ᚜';" % 
map_name)))
             if encode_value:
                 got = {k.encode("utf-8"): v.encode("utf-8") for k, v in 
rows[0][0].items()}
             else:
-                got = {k.encode("utf-8"): v for k, v in rows[0][0].items()}
+                got = {k: v for k, v in rows[0][0].items()}
             assert got == expected
 
         verify_varcharmap('varcharasciimap', {
@@ -290,7 +294,7 @@ class TestCqlsh(Tester):
             ' ⠊⠀⠉⠁⠝⠀⠑⠁⠞⠀⠛⠇⠁⠎⠎⠀⠁⠝⠙⠀⠊⠞⠀⠙⠕⠑⠎⠝⠞⠀⠓⠥⠗⠞⠀⠍⠑': ' 
⠊⠀⠉⠁⠝⠀⠑⠁⠞⠀⠛⠇⠁⠎⠎⠀⠁⠝⠙⠀⠊⠞⠀⠙⠕⠑⠎⠝⠞⠀⠓⠥⠗⠞⠀⠍⠑',
             'Можам да јадам стакло, а не ме штета.': 'Можам да јадам стакло, а 
не ме штета.',
             'I can eat glass and it does not hurt me': 'I can eat glass and it 
does not hurt me'
-        }, encode_value=True)
+        })
 
         verify_varcharmap('varcharvarintmap', {
             'Vitrum edere possum, mihi non nocet.': 1010010101020400204143243,
@@ -301,9 +305,9 @@ class TestCqlsh(Tester):
 
         output, err = self.run_cqlsh(node, 'use testks; SELECT * FROM 
varcharmaptable', ['--encoding=utf-8'])
 
-        assert output.decode("utf-8").count('Можам да јадам стакло, а не ме 
штета.') == 16
-        assert output.decode("utf-8").count(' 
⠊⠀⠉⠁⠝⠀⠑⠁⠞⠀⠛⠇⠁⠎⠎⠀⠁⠝⠙⠀⠊⠞⠀⠙⠕⠑⠎⠝⠞⠀⠓⠥⠗⠞⠀⠍⠑') == 16
-        assert output.decode("utf-8").count('᚛᚛ᚉᚑᚅᚔᚉᚉᚔᚋ ᚔᚈᚔ ᚍᚂᚐᚅᚑ ᚅᚔᚋᚌᚓᚅᚐ᚜') 
== 2
+        assert output.count('Можам да јадам стакло, а не ме штета.') == 16
+        assert output.count(' ⠊⠀⠉⠁⠝⠀⠑⠁⠞⠀⠛⠇⠁⠎⠎⠀⠁⠝⠙⠀⠊⠞⠀⠙⠕⠑⠎⠝⠞⠀⠓⠥⠗⠞⠀⠍⠑') == 16
+        assert output.count('᚛᚛ᚉᚑᚅᚔᚉᚉᚔᚋ ᚔᚈᚔ ᚍᚂᚐᚅᚑ ᚅᚔᚋᚌᚓᚅᚐ᚜') == 2
 
     def test_eat_glass(self):
 
@@ -423,7 +427,7 @@ INSERT INTO varcharmaptable (varcharkey, varcharvarintmap ) 
VALUES      ('᚛᚛
 UPDATE varcharmaptable SET varcharvarintmap = varcharvarintmap + {'Vitrum 
edere possum, mihi non nocet.':20000} WHERE varcharkey= '᚛᚛ᚉᚑᚅᚔᚉᚉᚔᚋ ᚔᚈᚔ ᚍᚂᚐᚅᚑ 
ᚅᚔᚋᚌᚓᚅᚐ᚜';
 
 UPDATE varcharmaptable SET varcharvarintmap['Vitrum edere possum, mihi non 
nocet.'] = 1010010101020400204143243 WHERE varcharkey= '᚛᚛ᚉᚑᚅᚔᚉᚉᚔᚋ ᚔᚈᚔ ᚍᚂᚐᚅᚑ 
ᚅᚔᚋᚌᚓᚅᚐ᚜'
-        """.encode("utf-8"))
+        """)
 
         self.verify_glass(node1)
 
@@ -448,8 +452,7 @@ UPDATE varcharmaptable SET varcharvarintmap['Vitrum edere 
possum, mihi non nocet
 
         node1, = self.cluster.nodelist()
 
-        output, err, _ = node1.run_cqlsh(cmds="ä;".encode('utf8'))
-        err = err.decode('utf8')
+        output, err, _ = node1.run_cqlsh(cmds="ä;")
         assert 'Invalid syntax' in err
         assert 'ä' in err
 
@@ -465,11 +468,12 @@ UPDATE varcharmaptable SET varcharvarintmap['Vitrum edere 
possum, mihi non nocet
         node1, = self.cluster.nodelist()
 
         cmd = '''create keyspace "ä" WITH replication = {'class': 
'SimpleStrategy', 'replication_factor': '1'};'''
-        cmd = cmd.encode('utf8')
         output, err, _ = node1.run_cqlsh(cmds=cmd, cqlsh_options=["--debug"])
 
-        err = err.decode('utf8')
-        assert '"ä" is not a valid keyspace name' in err
+        if self.cluster.version() >= LooseVersion('4.0'):
+            assert "Keyspace name must not be empty, more than 48 characters 
long, or contain non-alphanumeric-underscore characters (got 'ä')" in err
+        else:
+            assert '"ä" is not a valid keyspace name' in err
 
     def test_with_empty_values(self):
         """
@@ -535,11 +539,11 @@ INSERT INTO has_all_types (num, intcol, asciicol, 
bigintcol, blobcol, booleancol
                            timestampcol, uuidcol, varcharcol, varintcol)
 VALUES (4, blobAsInt(0x), '', blobAsBigint(0x), 0x, blobAsBoolean(0x), 
blobAsDecimal(0x),
         blobAsDouble(0x), blobAsFloat(0x), '', blobAsTimestamp(0x), 
blobAsUuid(0x), '',
-        blobAsVarint(0x))""".encode("utf-8"))
+        blobAsVarint(0x))""")
 
         output, err = self.run_cqlsh(node1, "select intcol, bigintcol, 
varintcol from CASSANDRA_7196.has_all_types where num in (0, 1, 2, 3, 4)")
         if common.is_win():
-            output = output.decode("utf-8").replace('\r', '')
+            output = output.replace('\r', '')
 
         expected = """
  intcol      | bigintcol            | varintcol
@@ -550,7 +554,7 @@ VALUES (4, blobAsInt(0x), '', blobAsBigint(0x), 0x, 
blobAsBoolean(0x), blobAsDec
  -2147483648 | -9223372036854775808 | -10000000000000000000000000
              |                      |                            \n\n(5 
rows)"""
 
-        assert expected in output, "Output \n {%s} \n doesn't contain 
expected\n {%s}" % (output, expected)
+        assert expected in output, "Output \n {0} \n doesn't contain 
expected\n {1}".format(output, expected)
 
     def test_tracing_from_system_traces(self):
         self.cluster.populate(1).start(wait_for_binary_proto=True)
@@ -612,10 +616,10 @@ VALUES (4, blobAsInt(0x), '', blobAsBigint(0x), 0x, 
blobAsBoolean(0x), blobAsDec
         if common.is_win():
             output = output.replace('\r', '')
 
-        assert len(err), 0 == "Failed to execute cqlsh: {}".format(err)
+        assert 0 == len(err), "Failed to execute cqlsh: {}".format(err)
 
         logger.debug(output)
-        assert expected in output, "Output \n {%s} \n doesn't contain 
expected\n {%s}" % (output, expected)
+        assert expected in output, "Output \n {0} \n doesn't contain 
expected\n {1}".format(output, expected)
 
     def test_list_queries(self):
         config = {'authenticator': 
'org.apache.cassandra.auth.PasswordAuthenticator',
@@ -633,7 +637,16 @@ VALUES (4, blobAsInt(0x), '', blobAsBigint(0x), 0x, 
blobAsBoolean(0x), blobAsDec
         conn.execute("CREATE USER user1 WITH PASSWORD 'user1'")
         conn.execute("GRANT ALL ON ks.t1 TO user1")
 
-        if self.cluster.version() >= '2.2':
+        if self.cluster.version() >= '4.0':
+            self.verify_output("LIST USERS", node1, """
+ name      | super | datacenters
+-----------+-------+-------------
+ cassandra |  True |         ALL
+     user1 | False |         ALL
+
+(2 rows)
+""")
+        elif self.cluster.version() >= '2.2':
             self.verify_output("LIST USERS", node1, """
  name      | super
 -----------+-------
@@ -850,7 +863,25 @@ VALUES (4, blobAsInt(0x), '', blobAsBigint(0x), 0x, 
blobAsBoolean(0x), blobAsDec
                 PRIMARY KEY (id, col)
                 """
 
-        if self.cluster.version() >= LooseVersion('3.9'):
+        if self.cluster.version() >= LooseVersion('4.0'):
+            ret += """
+        ) WITH CLUSTERING ORDER BY (col ASC)
+            AND bloom_filter_fp_chance = 0.01
+            AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'}
+            AND comment = ''
+            AND compaction = {'class': 
'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 
'max_threshold': '32', 'min_threshold': '4'}
+            AND compression = {'chunk_length_in_kb': '16', 'class': 
'org.apache.cassandra.io.compress.LZ4Compressor'}
+            AND crc_check_chance = 1.0
+            AND dclocal_read_repair_chance = 0.0
+            AND default_time_to_live = 0
+            AND gc_grace_seconds = 864000
+            AND max_index_interval = 2048
+            AND memtable_flush_period_in_ms = 0
+            AND min_index_interval = 128
+            AND read_repair_chance = 0.0
+            AND speculative_retry = '99p';
+        """
+        elif self.cluster.version() >= LooseVersion('3.9'):
             ret += """
         ) WITH CLUSTERING ORDER BY (col ASC)
             AND bloom_filter_fp_chance = 0.01
@@ -919,7 +950,29 @@ VALUES (4, blobAsInt(0x), '', blobAsBigint(0x), 0x, 
blobAsBoolean(0x), blobAsDec
         quoted_index_output = self.get_index_output('"QuotedNameIndex"', 
'test', 'users', 'firstname')
         myindex_output = self.get_index_output('myindex', 'test', 'users', 
'age')
 
-        if self.cluster.version() >= LooseVersion('3.9'):
+        if self.cluster.version() >= LooseVersion('4.0'):
+            return """
+        CREATE TABLE test.users (
+            userid text PRIMARY KEY,
+            age int,
+            firstname text,
+            lastname text
+        ) WITH bloom_filter_fp_chance = 0.01
+            AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'}
+            AND comment = ''
+            AND compaction = {'class': 
'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 
'max_threshold': '32', 'min_threshold': '4'}
+            AND compression = {'chunk_length_in_kb': '16', 'class': 
'org.apache.cassandra.io.compress.LZ4Compressor'}
+            AND crc_check_chance = 1.0
+            AND dclocal_read_repair_chance = 0.0
+            AND default_time_to_live = 0
+            AND gc_grace_seconds = 864000
+            AND max_index_interval = 2048
+            AND memtable_flush_period_in_ms = 0
+            AND min_index_interval = 128
+            AND read_repair_chance = 0.0
+            AND speculative_retry = '99p';
+        """ + quoted_index_output + "\n" + myindex_output
+        elif self.cluster.version() >= LooseVersion('3.9'):
             return """
         CREATE TABLE test.users (
             userid text PRIMARY KEY,
@@ -1000,7 +1053,30 @@ VALUES (4, blobAsInt(0x), '', blobAsBigint(0x), 0x, 
blobAsBoolean(0x), blobAsDec
         return "CREATE INDEX {} ON {}.{} ({});".format(index, ks, table, col)
 
     def get_users_by_state_mv_output(self):
-        if self.cluster.version() >= LooseVersion('3.9'):
+        if self.cluster.version() >= LooseVersion('4.0'):
+            return """
+                CREATE MATERIALIZED VIEW test.users_by_state AS
+                SELECT *
+                FROM test.users
+                WHERE state IS NOT NULL AND username IS NOT NULL
+                PRIMARY KEY (state, username)
+                WITH CLUSTERING ORDER BY (username ASC)
+                AND bloom_filter_fp_chance = 0.01
+                AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'}
+                AND comment = ''
+                AND compaction = {'class': 
'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 
'max_threshold': '32', 'min_threshold': '4'}
+                AND compression = {'chunk_length_in_kb': '16', 'class': 
'org.apache.cassandra.io.compress.LZ4Compressor'}
+                AND crc_check_chance = 1.0
+                AND dclocal_read_repair_chance = 0.0
+                AND default_time_to_live = 0
+                AND gc_grace_seconds = 864000
+                AND max_index_interval = 2048
+                AND memtable_flush_period_in_ms = 0
+                AND min_index_interval = 128
+                AND read_repair_chance = 0.0
+                AND speculative_retry = '99p';
+               """
+        elif self.cluster.version() >= LooseVersion('3.9'):
             return """
                 CREATE MATERIALIZED VIEW test.users_by_state AS
                 SELECT *
@@ -1049,6 +1125,7 @@ VALUES (4, blobAsInt(0x), '', blobAsBigint(0x), 0x, 
blobAsBoolean(0x), blobAsDec
 
     def execute(self, cql, expected_output=None, expected_err=None, 
env_vars=None):
         logger.debug(cql)
+
         node1, = self.cluster.nodelist()
         output, err = self.run_cqlsh(node1, cql, env_vars=env_vars)
 
@@ -1070,6 +1147,32 @@ VALUES (4, blobAsInt(0x), '', blobAsBigint(0x), 0x, 
blobAsBoolean(0x), blobAsDec
         expected_lines = [s.strip() for s in expected_response.split("\n") if 
s.strip()]
         assert expected_lines == lines
 
+    def strip_default_time_to_live(self, describe_statement):
+        """
+        Remove default_time_to_live options from output of DESCRIBE
+        statements. The resulting string may be reused as a CREATE
+        statement.
+        Useful after CASSANDRA-14071, which removed
+        default_time_to_live options from CREATE MATERIALIZED VIEW
+        statements.
+        """
+        describe_statement = re.sub(r"( AND)? default_time_to_live = [\d\.]+", 
"", describe_statement)
+        describe_statement = re.sub(r"WITH[\s]*;", "", describe_statement)
+        return describe_statement
+
+    def strip_read_repair_chance(self, describe_statement):
+        """
+        Remove read_repair_chance and dclocal_read_repair_chance options
+        from output of DESCRIBE statements. The resulting string may be
+        reused as a CREATE statement.
+        Useful after CASSANDRA-13910, which removed read_repair_chance
+        options from CREATE statements but did not remove them completely
+        from the system.
+        """
+        describe_statement = re.sub(r"( AND)? (dclocal_)?read_repair_chance = 
[\d\.]+", "", describe_statement)
+        describe_statement = re.sub(r"WITH[\s]*;", "", describe_statement)
+        return describe_statement
+
     def test_copy_to(self):
         self.cluster.populate(1).start()
         node1, = self.cluster.nodelist()
@@ -1089,7 +1192,7 @@ VALUES (4, blobAsInt(0x), '', blobAsBigint(0x), 0x, 
blobAsBoolean(0x), blobAsDec
         args = [(i, str(i), float(i) + 0.5, uuid4()) for i in range(10000)]
         execute_concurrent_with_args(session, insert_statement, args)
 
-        results = list(session.execute("SELECT * FROM testcopyto"))
+        selected_results = list(session.execute("SELECT * FROM testcopyto"))
 
         self.tempfile = NamedTemporaryFile(delete=False)
         logger.debug('Exporting to csv file: %s' % (self.tempfile.name,))
@@ -1098,14 +1201,15 @@ VALUES (4, blobAsInt(0x), '', blobAsBigint(0x), 0x, 
blobAsBoolean(0x), blobAsDec
         # session
         with open(self.tempfile.name, 'r') as csvfile:
             csvreader = csv.reader(csvfile)
-            result_list = [list(map(str, cql_row)) for cql_row in results]
-            assert result_list == csvreader
+            selected_results_strings = [list(map(str, cql_row)) for cql_row in 
selected_results]
+            exported_results = [row for row in csvreader]
+            assert sorted(selected_results_strings) == sorted(exported_results)
 
         # import the CSV file with COPY FROM
         session.execute("TRUNCATE ks.testcopyto")
         node1.run_cqlsh(cmds="COPY ks.testcopyto FROM '%s'" % 
(self.tempfile.name,))
         new_results = list(session.execute("SELECT * FROM testcopyto"))
-        assert results == new_results
+        assert sorted(selected_results) == sorted(new_results)
 
     def test_float_formatting(self):
         """ Tests for CASSANDRA-9224, check format of float and double 
values"""
@@ -1301,7 +1405,7 @@ VALUES (4, blobAsInt(0x), '', blobAsBigint(0x), 0x, 
blobAsBoolean(0x), blobAsDec
             INSERT INTO values (part, val1, val2, val3, val4) VALUES ('min', 
%d, %d, -32768, -128);
             INSERT INTO values (part, val1, val2, val3, val4) VALUES ('max', 
%d, %d, 32767, 127)""" % (-1 << 31, -1 << 63, (1 << 31) - 1, (1 << 63) - 1))
 
-        assert len(stderr), 0 == "Failed to execute cqlsh: {}".format(stderr)
+        assert 0 == len(stderr), "Failed to execute cqlsh: {}".format(stderr)
 
         self.verify_output("select * from int_checks.values", node1, """
  part | val1        | val2                 | val3   | val4
@@ -1343,7 +1447,7 @@ CREATE TABLE int_checks.values (
                                         % (datetime.MINYEAR - 1, 
datetime.MINYEAR, datetime.MAXYEAR, datetime.MAXYEAR + 1,))
         # outside the MIN and MAX range it should print the number of days 
from the epoch
 
-        assert len(stderr), 0 == "Failed to execute cqlsh: {}".format(stderr)
+        assert 0 == len(stderr), "Failed to execute cqlsh: {}".format(stderr)
 
         self.verify_output("select * from datetime_checks.values", node1, """
  d          | t
@@ -1384,7 +1488,7 @@ CREATE TABLE datetime_checks.values (
             INSERT INTO test (id, val) VALUES (2, 'lkjlk');
             INSERT INTO test (id, val) VALUES (3, 'iuiou')""")
 
-        assert len(stderr), 0 == "Failed to execute cqlsh: {}".format(stderr)
+        assert 0 == len(stderr), "Failed to execute cqlsh: {}".format(stderr)
 
         self.verify_output("use tracing_checks; tracing on; select * from 
test", node1, """Now Tracing is enabled
 
@@ -1428,7 +1532,7 @@ Tracing session:""")
             USE client_warnings;
             CREATE TABLE test (id int, val text, PRIMARY KEY (id))""")
 
-        assert len(stderr), 0 == "Failed to execute cqlsh: {}".format(stderr)
+        assert 0 == len(stderr), "Failed to execute cqlsh: {}".format(stderr)
 
         session = self.patient_cql_connection(node1)
         prepared = session.prepare("INSERT INTO client_warnings.test (id, val) 
VALUES (?, 'abc')")
@@ -1450,10 +1554,10 @@ Tracing session:""")
         logger.debug(fut.warnings)
         assert fut.warnings is not None
         assert 1 == len(fut.warnings)
-        assert "Unlogged batch covering {} partitions detected against table 
[client_warnings.test]. "\
-                   .format(max_partitions_per_batch + 1) + "You should use a 
logged batch for atomicity, " \
-                                                           "or asynchronous 
writes for performance." \
-               == fut.warnings[0]
+        expected_fut_warning = ("Unlogged batch covering {} partitions 
detected against table [client_warnings.test]. " +
+                                "You should use a logged batch for atomicity, 
or asynchronous writes for performance.") \
+                                .format(max_partitions_per_batch + 1)
+        assert expected_fut_warning == fut.warnings[0]
 
     def test_connect_timeout(self):
         """
@@ -1517,7 +1621,8 @@ Tracing session:""")
 
         session.execute('DROP TABLE test_ks.lcs_describe')
 
-        create_statement = 'USE test_ks; ' + ' 
'.join(describe_out.decode("utf-8").splitlines())
+        create_statement = 'USE test_ks; ' + ' 
'.join(describe_out.splitlines())
+        create_statement = self.strip_read_repair_chance(create_statement)
         create_out, create_err = self.run_cqlsh(node1, create_statement)
 
         # these statements shouldn't fall down
@@ -1525,7 +1630,7 @@ Tracing session:""")
         session.execute('INSERT INTO lcs_describe (key) VALUES (1)')
 
         # the table created before and after should be the same
-        assert reloaded_describe_out.decode("utf-8") == 
describe_out.decode("utf-8")
+        assert reloaded_describe_out == describe_out
 
     @since('3.0')
     def test_materialized_view(self):
@@ -1554,38 +1659,32 @@ Tracing session:""")
         session.execute(insert_stmt + "('user4', 'ch@ngem3d', 'm', 'TX', 
1974);")
 
         describe_out, err = self.run_cqlsh(node1, 'DESCRIBE MATERIALIZED VIEW 
test.users_by_state')
-        describe_out_str = describe_out.decode("utf-8")
-        err_str = err.decode("utf-8")
-        assert 0 == len(err_str), err_str
+        assert 0 == len(err), err
 
         select_out, err = self.run_cqlsh(node1, "SELECT * FROM 
test.users_by_state")
-        err_str = err.decode("utf-8")
-        assert 0 == len(err_str), err_str
+        assert 0 == len(err), err
         logger.debug(select_out)
 
-        out, err = self.run_cqlsh(node1, "DROP MATERIALIZED VIEW 
test.users_by_state; DESCRIBE KEYSPACE test; DESCRIBE table test.users")
-        err_str = err.decode("utf-8")
-        assert 0 == len(err_str), err_str
-        assert "CREATE MATERIALIZED VIEW users_by_state" not in out
+        drop_out, err = self.run_cqlsh(node1, "DROP MATERIALIZED VIEW 
test.users_by_state; DESCRIBE KEYSPACE test; DESCRIBE table test.users")
+        assert 0 == len(err), err
+        assert "CREATE MATERIALIZED VIEW users_by_state" not in drop_out
 
-        out, err = self.run_cqlsh(node1, 'DESCRIBE MATERIALIZED VIEW 
test.users_by_state')
-        describe_out_str = describe_out.decode("utf-8")
-        assert 0 == len(describe_out_str.strip()), describe_out_str
+        describe_after_drop_out, err = self.run_cqlsh(node1, 'DESCRIBE 
MATERIALIZED VIEW test.users_by_state')
+        assert 0 == len(describe_after_drop_out.strip()), 
describe_after_drop_out
         assert "Materialized view 'users_by_state' not found" in err
 
-        create_statement = 'USE test; ' + ' 
'.join(describe_out_str.splitlines()).strip()[:-1]
+        create_statement = 'USE test; ' + ' 
'.join(describe_out.splitlines()).strip()[:-1]
+        create_statement = self.strip_default_time_to_live(create_statement)
+        create_statement = self.strip_read_repair_chance(create_statement)
         out, err = self.run_cqlsh(node1, create_statement)
-        err_str = err.decode("utf-8")
-        assert 0 == len(err_str), err_str
+        assert 0 == len(err), err
 
         reloaded_describe_out, err = self.run_cqlsh(node1, 'DESCRIBE 
MATERIALIZED VIEW test.users_by_state')
-        err_str = err.decode("utf-8")
-        assert 0 == len(err_str), err_str
-        assert describe_out_str == reloaded_describe_out
+        assert 0 == len(err), err
+        assert describe_out == reloaded_describe_out
 
         reloaded_select_out, err = self.run_cqlsh(node1, "SELECT * FROM 
test.users_by_state")
-        err_str = err.decode("utf-8")
-        assert 0 == len(err_str), err_str
+        assert 0 == len(err), err
         assert select_out == reloaded_select_out
 
     @since('3.0')
@@ -1651,6 +1750,10 @@ Tracing session:""")
         assert 0 == len(stdout), stdout
 
     def run_cqlsh(self, node, cmds, cqlsh_options=None, env_vars=None):
+        """
+        Local version of run_cqlsh to open a cqlsh subprocess with
+        additional environment variables.
+        """
         if env_vars is None:
             env_vars = {}
         if cqlsh_options is None:
@@ -1668,14 +1771,15 @@ Tracing session:""")
             port = node.network_interfaces['thrift'][1]
         args = cqlsh_options + [host, str(port)]
         sys.stdout.flush()
-        p = subprocess.Popen([cli] + args, env=env, stdin=subprocess.PIPE, 
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
+        p = subprocess.Popen([cli] + args, env=env, stdin=subprocess.PIPE,
+                             stderr=subprocess.PIPE, stdout=subprocess.PIPE,
+                             universal_newlines=True)
         for cmd in cmds.split(';'):
             p.stdin.write(cmd + ';\n')
         p.stdin.write("quit;\n")
         return p.communicate()
 
 
-@pytest.mark.skip("These aren't functioning just yet")
 class TestCqlshSmoke(Tester):
     """
     Tests simple use cases for clqsh.
@@ -1683,9 +1787,9 @@ class TestCqlshSmoke(Tester):
 
     @pytest.fixture(scope='function', autouse=True)
     def fixture_cluster_setup(self, fixture_dtest_setup):
-        self.cluster.populate(1).start(wait_for_binary_proto=True)
-        [self.node1] = self.cluster.nodelist()
-        self.session = self.patient_cql_connection(self.node1)
+        
fixture_dtest_setup.cluster.populate(1).start(wait_for_binary_proto=True)
+        [self.node1] = fixture_dtest_setup.cluster.nodelist()
+        self.session = fixture_dtest_setup.patient_cql_connection(self.node1)
 
     def test_uuid(self):
         """
@@ -1711,7 +1815,7 @@ class TestCqlshSmoke(Tester):
         assert len(result[1]) == 1
         assert isinstance(result[0][0], UUID)
         assert isinstance(result[1][0], UUID)
-        self.assertNotEqual(result[0][0], result[1][0])
+        assert result[0][0] != result[1][0]
 
     def test_commented_lines(self):
         create_ks(self.session, 'ks', 1)
@@ -1948,8 +2052,7 @@ class TestCqlshSmoke(Tester):
         return [table.name for table in 
list(self.session.cluster.metadata.keyspaces[keyspace].tables.values())]
 
 
-@pytest.mark.skip("These aren't functioning just yet")
-class CqlLoginTest(Tester):
+class TestCqlLogin(Tester):
     """
     Tests login which requires password authenticator
     """
@@ -1962,7 +2065,7 @@ class CqlLoginTest(Tester):
         cluster.populate(1).start(wait_for_binary_proto=True)
         [self.node1] = cluster.nodelist()
         self.node1.watch_log_for('Created default superuser')
-        self.session = self.patient_cql_connection(self.node1, 
user='cassandra', password='cassandra')
+        self.session = fixture_dtest_setup.patient_cql_connection(self.node1, 
user='cassandra', password='cassandra')
 
     def assert_login_not_allowed(self, user, input):
         message = ("Provided username {user} and/or password are 
incorrect".format(user=user)
diff --git a/cqlsh_tests/cqlsh_copy_tests.py b/cqlsh_tests/test_cqlsh_copy.py
similarity index 97%
rename from cqlsh_tests/cqlsh_copy_tests.py
rename to cqlsh_tests/test_cqlsh_copy.py
index 662fb31..a170c7e 100644
--- a/cqlsh_tests/cqlsh_copy_tests.py
+++ b/cqlsh_tests/test_cqlsh_copy.py
@@ -2,6 +2,7 @@ import csv
 import datetime
 import glob
 import json
+import locale
 import os
 import re
 import sys
@@ -28,6 +29,7 @@ from .cqlsh_tools import (DummyColorMap, 
assert_csvs_items_equal, csv_rows,
                          monkeypatch_driver, random_list, unmonkeypatch_driver,
                          write_rows_to_csv)
 from dtest import (Tester, create_ks)
+from dtest import (FlakyRetryPolicy, Tester, create_ks)
 from tools.data import rows_to_list
 from tools.metadata_wrapper import (UpdatingClusterMetadataWrapper,
                                     UpdatingTableMetadataWrapper)
@@ -58,29 +60,28 @@ class UTC(datetime.tzinfo):
         return datetime.timedelta(0)
 
 
-@pytest.mark.skip("These aren't functioning just yet")
 class TestCqlshCopy(Tester):
     """
     Tests the COPY TO and COPY FROM features in cqlsh.
     @jira_ticket CASSANDRA-3906
     """
 
-    def __init__(self, *args, **kwargs):
-        Tester.__init__(self, *args, **kwargs)
+    @pytest.fixture(scope='function', autouse=True)
+    def fixture_temp_files(self):
         self._tempfiles = []
+        yield
+        self.delete_temp_files()
 
     @classmethod
     def setUpClass(cls):
         cls._cached_driver_methods = monkeypatch_driver()
+        if locale.getpreferredencoding() != 'UTF-8':
+            os.environ['LC_CTYPE'] = 'en_US.utf8'
 
     @classmethod
     def tearDownClass(cls):
         unmonkeypatch_driver(cls._cached_driver_methods)
 
-    def tearDown(self):
-        self.delete_temp_files()
-        super(TestCqlshCopy, self).tearDown()
-
     def get_temp_file(self, prefix=template, suffix=""):
         """
         On windows we cannot open temporary files after creating them unless 
we close them first.
@@ -119,8 +120,8 @@ class TestCqlshCopy(Tester):
                 
self.cluster.set_configuration_options(values=configuration_options)
             self.cluster.populate(nodes, 
tokens=tokens).start(wait_for_binary_proto=True)
         else:
-            assert self.cluster.partitioner, p == "Cannot reuse cluster: 
different partitioner"
-            assert len(self.cluster.nodelist()), nodes == "Cannot reuse 
cluster: different number of nodes"
+            assert self.cluster.partitioner == p, "Cannot reuse cluster: 
different partitioner"
+            assert len(self.cluster.nodelist()) == nodes, "Cannot reuse 
cluster: different number of nodes"
             assert configuration_options is None
 
         self.node1 = self.cluster.nodelist()[0]
@@ -287,6 +288,9 @@ class TestCqlshCopy(Tester):
             def __repr__(self):
                 return '{{{}}}'.format(', '.join([maybe_quote(t) for t in 
sorted(self._items)]))
 
+            def __hash__(self):
+                return hash(tuple([e for e in self]))
+
         class Name(namedtuple('Name', ('firstname', 'lastname'))):
             __slots__ = ()
 
@@ -365,7 +369,8 @@ class TestCqlshCopy(Tester):
             sys.path = saved_path
 
     def assertCsvResultEqual(self, csv_filename, results, table_name=None,
-                             columns=None, cql_type_names=None, nullval=''):
+                             columns=None, cql_type_names=None, nullval='',
+                             sort_data=True):
         if cql_type_names is None:
             if table_name:
                 table_meta = UpdatingTableMetadataWrapper(
@@ -382,6 +387,11 @@ class TestCqlshCopy(Tester):
         csv_results = list(csv_rows(csv_filename))
 
         self.maxDiff = None
+
+        if (sort_data):
+            csv_results.sort()
+            processed_results.sort()
+
         try:
             assert csv_results == processed_results
         except Exception as e:
@@ -482,6 +492,7 @@ class TestCqlshCopy(Tester):
             processed.append(formatted_row)
         return processed
 
+    @pytest.mark.depends_cqlshlib
     def test_list_data(self):
         """
         Tests the COPY TO command with the list datatype by:
@@ -509,6 +520,7 @@ class TestCqlshCopy(Tester):
 
         self.assertCsvResultEqual(tempfile.name, results, 'testlist')
 
+    @pytest.mark.depends_cqlshlib
     def test_tuple_data(self):
         """
         Tests the COPY TO command with the tuple datatype by:
@@ -566,18 +578,21 @@ class TestCqlshCopy(Tester):
 
         self.assertCsvResultEqual(tempfile.name, results, 'testdelimiter')
 
+    @pytest.mark.depends_cqlshlib
     def test_colon_delimiter(self):
         """
         Use non_default_delimiter_template to test COPY with the delimiter ':'.
         """
         self.non_default_delimiter_template(':')
 
+    @pytest.mark.depends_cqlshlib
     def test_letter_delimiter(self):
         """
         Use non_default_delimiter_template to test COPY with the delimiter 'a'.
         """
         self.non_default_delimiter_template('a')
 
+    @pytest.mark.depends_cqlshlib
     def test_number_delimiter(self):
         """
         Use non_default_delimiter_template to test COPY with the delimiter '1'.
@@ -650,6 +665,7 @@ class TestCqlshCopy(Tester):
         results_imported = list(self.session.execute("SELECT * FROM 
ks.testnullindicator"))
         assert results == results_imported
 
+    @pytest.mark.depends_cqlshlib
     def test_default_null_indicator(self):
         """
         Test the default null indicator.
@@ -658,6 +674,7 @@ class TestCqlshCopy(Tester):
         """
         self.custom_null_indicator_template()
 
+    @pytest.mark.depends_cqlshlib
     def test_default_null_indicator_no_prepared_statements(self):
         """
         Test the default null indicator without prepared statements.
@@ -666,18 +683,21 @@ class TestCqlshCopy(Tester):
         """
         
self.custom_null_indicator_template(copy_from_options={'PREPAREDSTATEMENTS': 
'False'})
 
+    @pytest.mark.depends_cqlshlib
     def test_undefined_as_null_indicator(self):
         """
         Use custom_null_indicator_template to test COPY with NULL = undefined.
         """
         self.custom_null_indicator_template('undefined')
 
+    @pytest.mark.depends_cqlshlib
     def test_undefined_as_null_indicator_no_prepared_statements(self):
         """
         Use custom_null_indicator_template to test COPY with NULL = undefined 
and no prepared statements.
         """
         self.custom_null_indicator_template('undefined', 
copy_from_options={'PREPAREDSTATEMENTS': 'False'})
 
+    @pytest.mark.depends_cqlshlib
     def test_null_as_null_indicator(self):
         """
         Use custom_null_indicator_template to test COPY with NULL = 'null'.
@@ -726,11 +746,11 @@ class TestCqlshCopy(Tester):
         self.prepare()
         self.session.execute("""
             CREATE TABLE testheader (
-                a int primary key,
+                a text primary key,
                 b int
             )""")
         insert_statement = self.session.prepare("INSERT INTO testheader (a, b) 
VALUES (?, ?)")
-        args = [(1, 10), (2, 20), (3, 30)]
+        args = [('b', 10), ('c', 20), ('d', 30)]
         execute_concurrent_with_args(self.session, insert_statement, args)
 
         tempfile = self.get_temp_file()
@@ -742,7 +762,7 @@ class TestCqlshCopy(Tester):
         with open(tempfile.name, 'r') as csvfile:
             csv_values = list(csv.reader(csvfile))
 
-        assert csv_values == [['a', 'b'], ['1', '10'], ['2', '20'], ['3', 
'30']]
+        assert sorted(csv_values) == [['a', 'b'], ['b', '10'], ['c', '20'], 
['d', '30']]
 
     def _test_reading_counter_template(self, copy_options=None):
         """
@@ -783,7 +803,9 @@ class TestCqlshCopy(Tester):
         self.run_cqlsh(cmds=cmds)
 
         result = self.session.execute("SELECT * FROM testcounter")
-        assert data == rows_to_list(result)
+        result_as_list = rows_to_list(result)
+        result_as_list.sort()
+        assert data == sorted(result_as_list)
 
     def test_reading_counter(self):
         """
@@ -834,8 +856,10 @@ class TestCqlshCopy(Tester):
         self.run_cqlsh(cmds=cmds)
 
         result = self.session.execute("SELECT * FROM testheader")
-        assert [tuple(d) for d in data] == [tuple(r) for r in 
rows_to_list(result)]
+        result_as_list = [tuple(r) for r in rows_to_list(result)]
+        assert [tuple(d) for d in data] == sorted(result_as_list)
 
+    @pytest.mark.depends_cqlshlib
     def test_datetimeformat_round_trip(self):
         """
         @jira_ticket CASSANDRA-10633
@@ -876,8 +900,8 @@ class TestCqlshCopy(Tester):
             csv_values = list(csv.reader(csvfile))
 
         assert csv_values == [['1', '2015/01/01 07:00'],
-                               ['2', '2015/06/10 12:30'],
-                               ['3', '2015/12/31 23:59']]
+                              ['2', '2015/06/10 12:30'],
+                              ['3', '2015/12/31 23:59']]
 
         self.session.execute("TRUNCATE testdatetimeformat")
         cmds = "COPY ks.testdatetimeformat FROM 
'{name}'".format(name=tempfile.name)
@@ -924,7 +948,7 @@ class TestCqlshCopy(Tester):
         self.run_cqlsh(cmds="COPY ks.testttl FROM '{name}' WITH TTL = 
'5'".format(name=tempfile.name))
 
         result = rows_to_list(self.session.execute("SELECT * FROM testttl"))
-        assert data == result
+        assert data == sorted(result)
 
         time.sleep(10)
 
@@ -965,8 +989,7 @@ class TestCqlshCopy(Tester):
 
             expected_rows = num_rows if 0 <= num_rows < num_file_rows else 
num_file_rows
             expected_rows -= min(num_file_rows, max(0, skip_rows))
-            self.assertEqual([[expected_rows]],
-                             rows_to_list(self.session.execute("SELECT 
COUNT(*) FROM {}".format(stress_table))))
+            assert [[expected_rows]] == 
rows_to_list(self.session.execute("SELECT COUNT(*) FROM 
{}".format(stress_table)))
             logger.debug('Imported {} as expected'.format(expected_rows))
 
         # max rows tests
@@ -1020,7 +1043,7 @@ class TestCqlshCopy(Tester):
             out, err, _ = self.run_cqlsh(cmds="COPY ks.testskipcols FROM '{}' 
WITH SKIPCOLS = '{}'"
                                          .format(tempfile.name, skip_cols))
             logger.debug(out)
-            assert expected_results == 
rows_to_list(self.session.execute("SELECT * FROM ks.testskipcols"))
+            assert expected_results == 
sorted(rows_to_list(self.session.execute("SELECT * FROM ks.testskipcols")))
 
         do_test('c, d ,e', [[1, 2, None, None, None], [6, 7, None, None, 
None]])
         do_test('b,', [[1, None, 3, 4, 5], [6, None, 8, 9, 10]])
@@ -1068,7 +1091,7 @@ class TestCqlshCopy(Tester):
             out, err, _ = self.run_cqlsh(cmds="COPY ks.testskipcols FROM '{}' 
WITH SKIPCOLS = '{}'"
                                          .format(tempfile.name, skip_cols))
             logger.debug(out)
-            assert expected_results == 
rows_to_list(self.session.execute("SELECT * FROM ks.testskipcols"))
+            assert expected_results == 
sorted(rows_to_list(self.session.execute("SELECT * FROM ks.testskipcols")))
 
         do_test('c, d ,e', [[1, 1, None, None, None], [2, 1, None, None, 
None]])
         do_test('b', [[1, 1, 1, 1, 1], [2, 1, 1, 1, 1]])
@@ -1236,6 +1259,7 @@ class TestCqlshCopy(Tester):
         do_test(100, 50)
         do_test(50, 50)
 
+    @pytest.mark.depends_cqlshlib
     def test_reading_with_parse_errors(self):
         """
         Test importing a CSV file where not all rows can be parsed:
@@ -1302,6 +1326,7 @@ class TestCqlshCopy(Tester):
         do_test(100, 2, 1, None)
         do_test(10, 50, 1, None)
 
+    @pytest.mark.depends_cqlshlib
     def test_reading_with_wrong_number_of_columns(self):
         """
         Test importing a CSV file where not all rows have the correct number 
of columns:
@@ -1391,8 +1416,7 @@ class TestCqlshCopy(Tester):
             logger.debug("Importing csv files {}".format(temp_files_str))
             self.run_cqlsh(cmds="COPY ks.testmultifiles FROM 
'{}'".format(temp_files_str))
 
-            self.assertEqual([[num_rows_per_file * len(tempfiles)]],
-                             rows_to_list(self.session.execute("SELECT 
COUNT(*) FROM testmultifiles")))
+            assert [[num_rows_per_file * len(tempfiles)]] == 
rows_to_list(self.session.execute("SELECT COUNT(*) FROM testmultifiles"))
 
         import_and_check(','.join([tempfile.name for tempfile in tempfiles]))
         import_and_check(os.path.join(gettempdir(), 'testreadmult*.csv'))
@@ -1431,16 +1455,19 @@ class TestCqlshCopy(Tester):
                 num_lines.append(len(open(os.path.join(gettempdir(), 
f)).readlines()))
                 os.unlink(f)
 
-            num_expected_files = num_records / max_size if num_records % 
max_size == 0 else (num_records / max_size + 1)
+            num_expected_files = int(num_records / max_size) if (num_records % 
max_size == 0) else int(num_records / max_size) + 1
             assert num_expected_files == len(output_files)
-            assert num_records + 1 if header else num_records == sum(num_lines)
+            if header:
+                assert (num_records + 1) == sum(num_lines)
+            else:
+                assert num_records == sum(num_lines)
 
             for i, n in enumerate(sorted(num_lines, reverse=True)):
-                if i < num_records / max_size:
-                    num_expected_lines = max_size + 1 if i == 0 and header 
else max_size
+                if i < int(num_records / max_size):
+                    num_expected_lines = max_size + 1 if (i == 0 and header) 
else max_size
                     assert num_expected_lines == n
                 else:
-                    assert num_records % max_size == n
+                    assert (num_records % max_size) == n
 
         do_test(1000, False)
         do_test(1000, True)
@@ -1477,13 +1504,14 @@ class TestCqlshCopy(Tester):
         self.run_cqlsh("COPY ks.testorder (a, c, b) TO 
'{name}'".format(name=tempfile.name))
 
         reference_file = self.get_temp_file()
-        with open(reference_file.name, 'wb') as csvfile:
+        with open(reference_file.name, 'w') as csvfile:
             writer = csv.writer(csvfile)
             for a, b, c in data:
                 writer.writerow([a, c, b])
 
         assert_csvs_items_equal(tempfile.name, reference_file.name)
 
+    @pytest.mark.depends_cqlshlib
     def test_explicit_column_order_reading(self):
         """
         Test that COPY can write to a CSV file when the order of columns is
@@ -1514,7 +1542,7 @@ class TestCqlshCopy(Tester):
 
         results = list(self.session.execute("SELECT * FROM testorder"))
         reference_file = self.get_temp_file()
-        with open(reference_file.name, 'wb') as csvfile:
+        with open(reference_file.name, 'w') as csvfile:
             writer = csv.writer(csvfile)
             for a, b, c in data:
                 writer.writerow([a, c, b])
@@ -1557,6 +1585,7 @@ class TestCqlshCopy(Tester):
         results = list(self.session.execute("SELECT * FROM testquoted"))
         self.assertCsvResultEqual(tempfile.name, results, 'testquoted')
 
+    @pytest.mark.depends_cqlshlib
     def test_quoted_column_names_reading_specify_names(self):
         """
         Use quoted_column_names_reading_template to test reading from a CSV 
file
@@ -1565,6 +1594,7 @@ class TestCqlshCopy(Tester):
         """
         self.quoted_column_names_reading_template(specify_column_names=True)
 
+    @pytest.mark.depends_cqlshlib
     def test_quoted_column_names_reading_dont_specify_names(self):
         """
         Use quoted_column_names_reading_template to test reading from a CSV 
file
@@ -1612,6 +1642,7 @@ class TestCqlshCopy(Tester):
 
             assert_csvs_items_equal(tempfile.name, reference_file.name)
 
+    @pytest.mark.depends_cqlshlib
     def test_data_validation_on_read_template(self):
         """
         Test that reading from CSV files fails when there is a type mismatch
@@ -1714,6 +1745,7 @@ class TestCqlshCopy(Tester):
         assert 'child process(es) died unexpectedly' not in err
         assert not results
 
+    @pytest.mark.depends_cqlshlib
     def test_all_datatypes_write(self):
         """
         Test that, after COPYing a table containing all CQL datatypes to a CSV
@@ -1744,6 +1776,7 @@ class TestCqlshCopy(Tester):
         _test(True)
         _test(False)
 
+    @pytest.mark.depends_cqlshlib
     def test_all_datatypes_read(self):
         """
         Test that, after COPYing a CSV file to a table containing all CQL
@@ -1779,6 +1812,7 @@ class TestCqlshCopy(Tester):
         _test(True)
         _test(False)
 
+    @pytest.mark.depends_cqlshlib
     def test_all_datatypes_round_trip(self):
         """
         Test that a table containing all CQL datatypes successfully round-trips
@@ -1846,7 +1880,8 @@ class TestCqlshCopy(Tester):
                 assert expected_err in err
                 return
 
-            assert [['0', falseval], ['1', trueval]] == 
list(csv_rows(tempfile.name))
+            tempfile_rows_as_list = list(csv_rows(tempfile.name))
+            assert [['0', falseval], ['1', trueval]] == 
sorted(tempfile_rows_as_list)
             exported_results = list(self.session.execute("SELECT * FROM 
testbooleans"))
 
             logger.debug('Importing from csv file: {}'.format(tempfile.name))
@@ -1855,7 +1890,7 @@ class TestCqlshCopy(Tester):
                            .format(tempfile.name, trueval, falseval))
 
             imported_results = list(self.session.execute("SELECT * FROM 
testbooleans"))
-            assert exported_results == imported_results
+            assert sorted(exported_results) == sorted(imported_results)
 
         self.prepare()
         self.session.execute("""
@@ -1882,6 +1917,7 @@ class TestCqlshCopy(Tester):
         do_round_trip('', '', invalid=True)
         do_round_trip('yes, no', 'maybe', invalid=True)
 
+    @pytest.mark.depends_cqlshlib
     def test_number_separators_round_trip(self):
         """
         Test that a CSV file containing numbers with decimal and thousands 
separators in a different format
@@ -2007,7 +2043,7 @@ class TestCqlshCopy(Tester):
 
             exported_results = list(self.session.execute("SELECT * FROM 
testnumberseps"))
             self.maxDiff = None
-            assert expected_vals == list(csv_rows(tempfile.name))
+            assert expected_vals == sorted(list(csv_rows(tempfile.name)))
 
             logger.debug('Importing from csv file: {} with thousands_sep {} 
and decimal_sep {}'
                   .format(tempfile.name, thousands_sep, decimal_sep))
@@ -2145,7 +2181,7 @@ class TestCqlshCopy(Tester):
                                    .format(stress_table, tempfile.name, 
num_processes))
         logger.debug(out)
         assert 'Using {} child processes'.format(num_processes) in out
-        assert [[num_records]] == rows_to_list(self.session.execute("SELECT 
COUNT(* FROM {}"
+        assert [[num_records]] == rows_to_list(self.session.execute("SELECT 
COUNT(*) FROM {}"
                                                                             
.format(stress_table)))
 
     def test_round_trip_with_rate_file(self):
@@ -2162,12 +2198,8 @@ class TestCqlshCopy(Tester):
         tempfile = self.get_temp_file()
 
         def check_rate_file():
-            # check that the rate file has at least 10 lines (given that the 
report
-            # frequency is every 100 milliseconds this should be the number of 
lines written in 1 second)
-            # and that the last line indicates all rows were processed
             lines = [line.rstrip('\n') for line in open(ratefile.name)]
             logger.debug(lines)
-            assert 10 <= len(lines), "Expected at least 10 lines but got {} 
lines".format(len(lines))
             assert lines[-1].startswith('Processed: {} rows;'.format(num_rows))
 
         self.prepare()
@@ -2193,7 +2225,7 @@ class TestCqlshCopy(Tester):
                        .format(stress_table, tempfile.name, ratefile.name, 
report_frequency))
 
         # check all records were imported
-        assert [[num_rows]] == rows_to_list(self.session.execute("SELECT 
COUNT(* FROM {}"
+        assert [[num_rows]] == rows_to_list(self.session.execute("SELECT 
COUNT(*) FROM {}"
                                                                          
.format(stress_table)))
 
         check_rate_file()
@@ -2218,7 +2250,7 @@ class TestCqlshCopy(Tester):
             config_file = self.get_temp_file()
             logger.debug('Creating config file {}'.format(config_file.name))
 
-            with open(config_file.name, 'wb') as config:
+            with open(config_file.name, 'w') as config:
                 for line in config_lines:
                     config.write(line + os.linesep)
                 config.close()
@@ -2233,7 +2265,7 @@ class TestCqlshCopy(Tester):
             return ''
 
         def check_options(out, expected_options):
-            opts = extract_options(out.decode("utf-8"))
+            opts = extract_options(out)
             logger.debug('Options: {}'.format(opts))
             d = json.loads(opts)
             for k, v in expected_options:
@@ -2355,7 +2387,7 @@ class TestCqlshCopy(Tester):
         logger.debug(out)
 
         new_results = list(self.session.execute("SELECT * FROM testcopyto"))
-        assert results == new_results
+        assert sorted(results) == sorted(new_results)
 
     def test_round_trip_murmur3(self):
         self._test_round_trip(nodes=3, partitioner="murmur3")
@@ -2412,7 +2444,7 @@ class TestCqlshCopy(Tester):
 
         self.run_cqlsh(cmds="SOURCE '{name}'".format(name=commandfile.name))
         new_results = list(self.session.execute("SELECT * FROM testcopyto"))
-        assert results == new_results
+        assert sorted(results) == sorted(new_results)
 
     def _test_bulk_round_trip(self, nodes, partitioner,
                               num_operations, profile=None,
@@ -2464,7 +2496,8 @@ class TestCqlshCopy(Tester):
             if skip_count_checks:
                 return num_operations
             else:
-                count_statement = SimpleStatement("SELECT COUNT(*) FROM 
{}".format(stress_table), consistency_level=ConsistencyLevel.ALL)
+                count_statement = SimpleStatement("SELECT COUNT(*) FROM 
{}".format(stress_table), consistency_level=ConsistencyLevel.ALL,
+                                                  
retry_policy=FlakyRetryPolicy(max_retries=3))
                 ret = rows_to_list(self.session.execute(count_statement))[0][0]
                 logger.debug('Generated {} records'.format(ret))
                 assert ret >= num_operations, 'cassandra-stress did not import 
enough records'
@@ -2511,8 +2544,7 @@ class TestCqlshCopy(Tester):
         run_copy_to(tempfile2)
 
         # check the length of both files is the same to ensure all exported 
records were imported
-        self.assertEqual(sum(1 for _ in open(tempfile1.name)),
-                         sum(1 for _ in open(tempfile2.name)))
+        assert sum(1 for _ in open(tempfile1.name)) == sum(1 for _ in 
open(tempfile2.name))
 
         return ret
 
@@ -2540,7 +2572,7 @@ class TestCqlshCopy(Tester):
 
         @jira_ticket CASSANDRA-9302
         """
-        self._test_bulk_round_trip(nodes=5, partitioner="murmur3", 
num_operations=10000,
+        self._test_bulk_round_trip(nodes=3, partitioner="murmur3", 
num_operations=10000,
                                    
configuration_options={'batch_size_warn_threshold_in_kb': '10'},
                                    
profile=os.path.join(os.path.dirname(os.path.realpath(__file__)), 
'blogposts.yaml'),
                                    stress_table='stresscql.blogposts')
@@ -2554,7 +2586,7 @@ class TestCqlshCopy(Tester):
 
         @jira_ticket CASSANDRA-10938
         """
-        self._test_bulk_round_trip(nodes=5, partitioner="murmur3", 
num_operations=10000,
+        self._test_bulk_round_trip(nodes=3, partitioner="murmur3", 
num_operations=10000,
                                    
configuration_options={'native_transport_max_concurrent_connections': '12',
                                                           
'batch_size_warn_threshold_in_kb': '10'},
                                    
profile=os.path.join(os.path.dirname(os.path.realpath(__file__)), 
'blogposts.yaml'),
@@ -2877,6 +2909,7 @@ class TestCqlshCopy(Tester):
         num_records_imported = rows_to_list(self.session.execute("SELECT 
COUNT(*) FROM {}".format(stress_table)))[0][0]
         assert num_records_imported < num_records
 
+    @pytest.mark.depends_cqlshlib
     @since('2.2.5')
     def test_copy_from_with_large_cql_rows(self):
         """
@@ -2921,12 +2954,14 @@ class TestCqlshCopy(Tester):
         results = list(self.session.execute("SELECT * FROM 
{}".format(stress_ks_table_name)))
         self.assertCsvResultEqual(tempfile.name, results, stress_table_name)
 
+    @pytest.mark.depends_cqlshlib
     def test_copy_from_with_brackets_in_UDT(self):
         """
         Test that we can import a user defined type even when it contains 
brackets in its values.
 
         @jira_ticket CASSANDRA-11633
         """
+
         self.prepare()
 
         self.session.execute('CREATE TYPE udt_with_special_chars (val1 text, 
val2 text, val3 text)')
@@ -3068,8 +3103,7 @@ class TestCqlshCopy(Tester):
         self.run_cqlsh(cmds=cmds)
 
         res = rows_to_list(self.session.execute("SELECT COUNT(*) FROM 
ks.test_pk_timestamps_with_counters"))[0][0]
-        self.assertEqual(len(records), res,
-                         msg="Failed to import one or more rows, expected {} 
but got {}".format(len(records), res))
+        assert len(records) == res, "Failed to import one or more rows, 
expected {} but got {}".format(len(records), res)
 
     def test_copy_from_with_wrong_order_or_missing_UDT_fields(self):
         """
@@ -3120,6 +3154,7 @@ class TestCqlshCopy(Tester):
         _test(True)
         _test(False)
 
+    @pytest.mark.depends_cqlshlib
     @since('2.2')
     def test_reading_text_pk_counters(self):
         """
@@ -3153,6 +3188,7 @@ class TestCqlshCopy(Tester):
         res = list(self.session.execute("SELECT * FROM 
ks.test_reading_text_pk_counters"))
         self.assertCsvResultEqual(tempfile.name, res, 
'test_reading_text_pk_counters')
 
+    @pytest.mark.depends_cqlshlib
     @since('2.2')
     def test_reading_text_pk_no_prepared_statements(self):
         """
@@ -3186,6 +3222,7 @@ class TestCqlshCopy(Tester):
         res = list(self.session.execute("SELECT * FROM 
ks.test_reading_text_pk_no_prepared_statements"))
         self.assertCsvResultEqual(tempfile.name, res, 
'test_reading_text_pk_no_prepared_statements')
 
+    @pytest.mark.depends_cqlshlib
     @since('3.0')
     def test_reading_empty_strings_for_different_types(self):
         """
@@ -3228,6 +3265,7 @@ class TestCqlshCopy(Tester):
         _test(True)
         _test(False)
 
+    @pytest.mark.depends_cqlshlib
     @since('3.0')
     def test_unusual_dates(self):
         """


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@cassandra.apache.org
For additional commands, e-mail: commits-h...@cassandra.apache.org

Reply via email to