Hello community,

here is the log from the commit of package python-cassandra-driver for 
openSUSE:Factory checked in at 2018-12-03 10:12:30
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/python-cassandra-driver (Old)
 and      /work/SRC/openSUSE:Factory/.python-cassandra-driver.new.19453 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "python-cassandra-driver"

Mon Dec  3 10:12:30 2018 rev:6 rq:653425 version:3.16.0

Changes:
--------
--- 
/work/SRC/openSUSE:Factory/python-cassandra-driver/python-cassandra-driver.changes
  2018-11-01 14:46:10.550866625 +0100
+++ 
/work/SRC/openSUSE:Factory/.python-cassandra-driver.new.19453/python-cassandra-driver.changes
       2018-12-03 10:12:36.531585796 +0100
@@ -1,0 +2,22 @@
+Sat Dec  1 18:26:17 UTC 2018 - Arun Persaud <[email protected]>
+
+- update to version 3.16.0:
+  * Bug Fixes
+    + Improve and fix socket error-catching code in nonblocking-socket
+      reactors (PYTHON-1024)
+    + Non-ASCII characters in schema break CQL string generation
+      (PYTHON-1008)
+    + Fix OSS driver's virtual table support against DSE 6.0.X and
+      future server releases (PYTHON-1020)
+    + ResultSet.one() fails if the row_factory is using a generator
+      (PYTHON-1026)
+    + Log profile name on attempt to create existing profile
+      (PYTHON-944)
+    + Cluster instantiation fails if any contact points' hostname
+      resolution fails (PYTHON-895)
+  * Other
+    + Fix tests when RF is not maintained if we decomission a node
+      (PYTHON-1017)
+    + Fix wrong use of ResultSet indexing (PYTHON-1015)
+
+-------------------------------------------------------------------

Old:
----
  3.15.1.tar.gz

New:
----
  3.16.0.tar.gz

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ python-cassandra-driver.spec ++++++
--- /var/tmp/diff_new_pack.jgSJiV/_old  2018-12-03 10:12:37.175585199 +0100
+++ /var/tmp/diff_new_pack.jgSJiV/_new  2018-12-03 10:12:37.175585199 +0100
@@ -18,7 +18,7 @@
 
 %{?!python_module:%define python_module() python-%{**} python3-%{**}}
 Name:           python-cassandra-driver
-Version:        3.15.1
+Version:        3.16.0
 Release:        0
 Summary:        Python driver for Cassandra
 License:        Apache-2.0

++++++ 3.15.1.tar.gz -> 3.16.0.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/python-driver-3.15.1/CHANGELOG.rst 
new/python-driver-3.16.0/CHANGELOG.rst
--- old/python-driver-3.15.1/CHANGELOG.rst      2018-09-06 21:55:48.000000000 
+0200
+++ new/python-driver-3.16.0/CHANGELOG.rst      2018-11-12 19:19:38.000000000 
+0100
@@ -1,3 +1,21 @@
+3.16.0
+======
+November 12, 2018
+
+Bug Fixes
+---------
+* Improve and fix socket error-catching code in nonblocking-socket reactors 
(PYTHON-1024)
+* Non-ASCII characters in schema break CQL string generation (PYTHON-1008)
+* Fix OSS driver's virtual table support against DSE 6.0.X and future server 
releases (PYTHON-1020)
+* ResultSet.one() fails if the row_factory is using a generator (PYTHON-1026)
+* Log profile name on attempt to create existing profile (PYTHON-944)
+* Cluster instantiation fails if any contact points' hostname resolution fails 
(PYTHON-895)
+
+Other
+-----
+* Fix tests when RF is not maintained if we decomission a node (PYTHON-1017)
+* Fix wrong use of ResultSet indexing (PYTHON-1015)
+
 3.15.1
 ======
 September 6, 2018
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/python-driver-3.15.1/README-dev.rst 
new/python-driver-3.16.0/README-dev.rst
--- old/python-driver-3.15.1/README-dev.rst     2018-09-06 21:55:48.000000000 
+0200
+++ new/python-driver-3.16.0/README-dev.rst     2018-11-12 19:19:38.000000000 
+0100
@@ -79,35 +79,35 @@
 =================
 In order for the extensions to be built and used in the test, run::
 
-    python setup.py nosetests
+    nosetests
 
 You can run a specific test module or package like so::
 
-    python setup.py nosetests -w tests/unit/
+    nosetests -w tests/unit/
 
 You can run a specific test method like so::
 
-    python setup.py nosetests -w 
tests/unit/test_connection.py:ConnectionTest.test_bad_protocol_version
+    nosetests -w 
tests/unit/test_connection.py:ConnectionTest.test_bad_protocol_version
 
 Seeing Test Logs in Real Time
 -----------------------------
 Sometimes it's useful to output logs for the tests as they run::
 
-    python setup.py nosetests -w tests/unit/ --nocapture --nologcapture
+    nosetests -w tests/unit/ --nocapture --nologcapture
 
 Use tee to capture logs and see them on your terminal::
 
-    python setup.py nosetests -w tests/unit/ --nocapture --nologcapture 2>&1 | 
tee test.log
+    nosetests -w tests/unit/ --nocapture --nologcapture 2>&1 | tee test.log
 
 Specifying a Cassandra Version for Integration Tests
 ----------------------------------------------------
 You can specify a cassandra version with the ``CASSANDRA_VERSION`` environment 
variable::
 
-    CASSANDRA_VERSION=2.0.9 python setup.py nosetests -w 
tests/integration/standard
+    CASSANDRA_VERSION=2.0.9 nosetests -w tests/integration/standard
 
 You can also specify a cassandra directory (to test unreleased versions)::
 
-    CASSANDRA_DIR=/home/thobbs/cassandra python setup.py nosetests -w 
tests/integration/standard
+    CASSANDRA_DIR=/home/thobbs/cassandra nosetests -w 
tests/integration/standard
 
 Specifying the usage of an already running Cassandra cluster
 ----------------------------------------------------
@@ -120,7 +120,7 @@
 The protocol version defaults to 1 for cassandra 1.2 and 2 otherwise.  You can 
explicitly set
 it with the ``PROTOCOL_VERSION`` environment variable::
 
-    PROTOCOL_VERSION=3 python setup.py nosetests -w tests/integration/standard
+    PROTOCOL_VERSION=3 nosetests -w tests/integration/standard
 
 Testing Multiple Python Versions
 --------------------------------
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/python-driver-3.15.1/build.yaml 
new/python-driver-3.16.0/build.yaml
--- old/python-driver-3.15.1/build.yaml 2018-09-06 21:55:48.000000000 +0200
+++ new/python-driver-3.16.0/build.yaml 2018-11-12 19:19:38.000000000 +0100
@@ -20,6 +20,17 @@
       exclude:
         - python: [3.4, 3.6]
 
+  commit_long_test:
+    schedule: per_commit
+    branches:
+      include: [/long-python.*/]
+    env_vars: |
+      EVENT_LOOP_MANAGER='libev'
+    matrix:
+      exclude:
+        - python: [3.4, 3.6]
+        - cassandra: ['2.0', '2.1', '3.0']
+
   commit_branches:
     schedule: per_commit
     branches:
@@ -107,6 +118,7 @@
   - '3.0'
   - '3.11'
   - 'test-dse'
+  - 'dse-6.7'
 
 env:
   CYTHON:
@@ -148,6 +160,13 @@
         exit 0
       fi
 
+      if [[ $CCM_IS_DSE == 'true' ]]; then
+        # We only use a DSE version for unreleased DSE versions, so we only 
need to run the smoke tests here
+        echo "CCM_IS_DSE: $CCM_IS_DSE"
+        echo "==========RUNNING SMOKE TESTS==========="
+        EVENT_LOOP_MANAGER=$EVENT_LOOP_MANAGER CCM_ARGS="$CCM_ARGS" 
CASSANDRA_VERSION=$CCM_CASSANDRA_VERSION DSE_VERSION='6.7.0' 
MAPPED_CASSANDRA_VERSION=$MAPPED_CASSANDRA_VERSION VERIFY_CYTHON=$FORCE_CYTHON 
nosetests -s -v --logging-format="[%(levelname)s] %(asctime)s %(thread)d: 
%(message)s" --with-ignore-docstrings --with-xunit 
--xunit-file=standard_results.xml tests/integration/standard/test_dse.py || true
+        exit 0
+      fi
 
       # Run the unit tests, this is not done in travis because
       # it takes too much time for the whole matrix to build with cython
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/python-driver-3.15.1/cassandra/__init__.py 
new/python-driver-3.16.0/cassandra/__init__.py
--- old/python-driver-3.15.1/cassandra/__init__.py      2018-09-06 
21:55:48.000000000 +0200
+++ new/python-driver-3.16.0/cassandra/__init__.py      2018-11-12 
19:19:38.000000000 +0100
@@ -22,7 +22,7 @@
 
 logging.getLogger('cassandra').addHandler(NullHandler())
 
-__version_info__ = (3, 15, 1)
+__version_info__ = (3, 16, 0)
 __version__ = '.'.join(map(str, __version_info__))
 
 
@@ -686,3 +686,13 @@
     for more details.
     """
     pass
+
+
+class UnresolvableContactPoints(DriverException):
+    """
+    The driver was unable to resolve any provided hostnames.
+
+    Note that this is *not* raised when a :class:`.Cluster` is created with no
+    contact points, only when lookup fails for all hosts
+    """
+    pass
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/python-driver-3.15.1/cassandra/cluster.py 
new/python-driver-3.16.0/cassandra/cluster.py
--- old/python-driver-3.15.1/cassandra/cluster.py       2018-09-06 
21:55:48.000000000 +0200
+++ new/python-driver-3.16.0/cassandra/cluster.py       2018-11-12 
19:19:38.000000000 +0100
@@ -43,7 +43,8 @@
 
 from cassandra import (ConsistencyLevel, AuthenticationFailed,
                        OperationTimedOut, UnsupportedOperation,
-                       SchemaTargetType, DriverException, ProtocolVersion)
+                       SchemaTargetType, DriverException, ProtocolVersion,
+                       UnresolvableContactPoints)
 from cassandra.connection import (ConnectionException, ConnectionShutdown,
                                   ConnectionHeartbeat, 
ProtocolVersionUnsupported)
 from cassandra.cqltypes import UserType
@@ -86,6 +87,7 @@
     import gevent.socket
     return socket.socket is gevent.socket.socket
 
+
 # default to gevent when we are monkey patched with gevent, eventlet when
 # monkey patched with eventlet, otherwise if libev is available, use that as
 # the default because it's fastest. Otherwise, use asyncore.
@@ -181,6 +183,7 @@
     for cluster in clusters:
         cluster.shutdown()
 
+
 atexit.register(_shutdown_clusters)
 
 
@@ -190,6 +193,35 @@
     return DCAwareRoundRobinPolicy()
 
 
+def _addrinfo_or_none(contact_point, port):
+    """
+    A helper function that wraps socket.getaddrinfo and returns None
+    when it fails to, e.g. resolve one of the hostnames. Used to address
+    PYTHON-895.
+    """
+    try:
+        return socket.getaddrinfo(contact_point, port,
+                                  socket.AF_UNSPEC, socket.SOCK_STREAM)
+    except socket.gaierror:
+        log.debug('Could not resolve hostname "{}" '
+                  'with port {}'.format(contact_point, port))
+        return None
+
+
+def _resolve_contact_points(contact_points, port):
+    resolved = tuple(_addrinfo_or_none(p, port)
+                     for p in contact_points)
+
+    if resolved and all((x is None for x in resolved)):
+        raise UnresolvableContactPoints(contact_points, port)
+
+    resolved = tuple(r for r in resolved if r is not None)
+
+    return [endpoint[4][0]
+            for addrinfo in resolved
+            for endpoint in addrinfo]
+
+
 class ExecutionProfile(object):
     load_balancing_policy = None
     """
@@ -822,8 +854,8 @@
 
         self.port = port
 
-        self.contact_points_resolved = [endpoint[4][0] for a in 
self.contact_points
-                                        for endpoint in socket.getaddrinfo(a, 
self.port, socket.AF_UNSPEC, socket.SOCK_STREAM)]
+        self.contact_points_resolved = 
_resolve_contact_points(self.contact_points,
+                                                               self.port)
 
         self.compression = compression
 
@@ -1058,7 +1090,7 @@
         if self._config_mode == _ConfigMode.LEGACY:
             raise ValueError("Cannot add execution profiles when legacy 
parameters are set explicitly.")
         if name in self.profile_manager.profiles:
-            raise ValueError("Profile %s already exists")
+            raise ValueError("Profile {} already exists".format(name))
         contact_points_but_no_lbp = (
             self._contact_points_explicit and not
             profile._load_balancing_policy_explicit)
@@ -1086,7 +1118,6 @@
         if not_done:
             raise OperationTimedOut("Failed to create all new connection pools 
in the %ss timeout.")
 
-
     def get_min_requests_per_connection(self, host_distance):
         return self._min_requests_per_connection[host_distance]
 
@@ -2029,7 +2060,6 @@
     .. versionadded:: 3.8.0
     """
 
-
     encoder = None
     """
     A :class:`~cassandra.encoder.Encoder` instance that will be used when
@@ -2219,7 +2249,6 @@
             load_balancing_policy = execution_profile.load_balancing_policy
             spec_exec_policy = execution_profile.speculative_execution_policy
 
-
         fetch_size = query.fetch_size
         if fetch_size is FETCH_SIZE_UNSET and self._protocol_version >= 2:
             fetch_size = self.default_fetch_size
@@ -4244,7 +4273,14 @@
         you know a query returns a single row. Consider using an iterator if 
the
         ResultSet contains more than one row.
         """
-        return self._current_rows[0] if self._current_rows else None
+        row = None
+        if self._current_rows:
+            try:
+                row = self._current_rows[0]
+            except TypeError:  # generator object is not subscriptable, 
PYTHON-1026
+                row = next(iter(self._current_rows))
+
+        return row
 
     def __iter__(self):
         if self._list_mode:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/python-driver-3.15.1/cassandra/cqlengine/query.py 
new/python-driver-3.16.0/cassandra/cqlengine/query.py
--- old/python-driver-3.15.1/cassandra/cqlengine/query.py       2018-09-06 
21:55:48.000000000 +0200
+++ new/python-driver-3.16.0/cassandra/cqlengine/query.py       2018-11-12 
19:19:38.000000000 +0100
@@ -76,7 +76,7 @@
     except Exception:
         applied = True  # result was not LWT form
     if not applied:
-        raise LWTException(result[0])
+        raise LWTException(result.one())
 
 
 class AbstractQueryableColumn(UnicodeMixin):
@@ -841,7 +841,7 @@
             query = self._select_query()
             query.count = True
             result = self._execute(query)
-            count_row = result[0].popitem()
+            count_row = result.one().popitem()
             self._count = count_row[1]
         return self._count
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/python-driver-3.15.1/cassandra/encoder.py 
new/python-driver-3.16.0/cassandra/encoder.py
--- old/python-driver-3.15.1/cassandra/encoder.py       2018-09-06 
21:55:48.000000000 +0200
+++ new/python-driver-3.16.0/cassandra/encoder.py       2018-11-12 
19:19:38.000000000 +0100
@@ -224,12 +224,15 @@
         """
         return '{%s}' % ', '.join(self.mapping.get(type(v), 
self.cql_encode_object)(v) for v in val)
 
-    def cql_encode_all_types(self, val):
+    def cql_encode_all_types(self, val, as_text_type=False):
         """
         Converts any type into a CQL string, defaulting to 
``cql_encode_object``
         if :attr:`~Encoder.mapping` does not contain an entry for the type.
         """
-        return self.mapping.get(type(val), self.cql_encode_object)(val)
+        encoded = self.mapping.get(type(val), self.cql_encode_object)(val)
+        if as_text_type and not isinstance(encoded, six.text_type):
+            return encoded.decode('utf-8')
+        return encoded
 
     if six.PY3:
         def cql_encode_ipaddress(self, val):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/python-driver-3.15.1/cassandra/io/asyncorereactor.py 
new/python-driver-3.16.0/cassandra/io/asyncorereactor.py
--- old/python-driver-3.15.1/cassandra/io/asyncorereactor.py    2018-09-06 
21:55:48.000000000 +0200
+++ new/python-driver-3.16.0/cassandra/io/asyncorereactor.py    2018-11-12 
19:19:38.000000000 +0100
@@ -424,10 +424,14 @@
                     break
         except socket.error as err:
             if ssl and isinstance(err, ssl.SSLError):
-                if err.args[0] not in (ssl.SSL_ERROR_WANT_READ, 
ssl.SSL_ERROR_WANT_WRITE):
+                if err.args[0] in (ssl.SSL_ERROR_WANT_READ, 
ssl.SSL_ERROR_WANT_WRITE):
+                    return
+                else:
                     self.defunct(err)
                     return
-            elif err.args[0] not in NONBLOCKING:
+            elif err.args[0] in NONBLOCKING:
+                return
+            else:
                 self.defunct(err)
                 return
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/python-driver-3.15.1/cassandra/io/libevreactor.py 
new/python-driver-3.16.0/cassandra/io/libevreactor.py
--- old/python-driver-3.15.1/cassandra/io/libevreactor.py       2018-09-06 
21:55:48.000000000 +0200
+++ new/python-driver-3.16.0/cassandra/io/libevreactor.py       2018-11-12 
19:19:38.000000000 +0100
@@ -344,10 +344,14 @@
                     break
         except socket.error as err:
             if ssl and isinstance(err, ssl.SSLError):
-                if err.args[0] not in (ssl.SSL_ERROR_WANT_READ, 
ssl.SSL_ERROR_WANT_WRITE):
+                if err.args[0] in (ssl.SSL_ERROR_WANT_READ, 
ssl.SSL_ERROR_WANT_WRITE):
+                    return
+                else:
                     self.defunct(err)
                     return
-            elif err.args[0] not in NONBLOCKING:
+            elif err.args[0] in NONBLOCKING:
+                return
+            else:
                 self.defunct(err)
                 return
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/python-driver-3.15.1/cassandra/metadata.py 
new/python-driver-3.16.0/cassandra/metadata.py
--- old/python-driver-3.15.1/cassandra/metadata.py      2018-09-06 
21:55:48.000000000 +0200
+++ new/python-driver-3.16.0/cassandra/metadata.py      2018-11-12 
19:19:38.000000000 +0100
@@ -1435,7 +1435,9 @@
                 index_target,
                 class_name)
             if options:
-                ret += " WITH OPTIONS = %s" % 
Encoder().cql_encode_all_types(options)
+                # PYTHON-1008: `ret` will always be a unicode
+                opts_cql_encoded = _encoder.cql_encode_all_types(options, 
as_text_type=True)
+                ret += " WITH OPTIONS = %s" % opts_cql_encoded
             return ret
 
     def export_as_string(self):
@@ -1665,8 +1667,30 @@
         self.connection = connection
         self.timeout = timeout
 
-    def _handle_results(self, success, result):
-        if success:
+    def _handle_results(self, success, result, expected_failures=tuple()):
+        """
+        Given a bool and a ResultSet (the form returned per result from
+        Connection.wait_for_responses), return a dictionary containing the
+        results. Used to process results from asynchronous queries to system
+        tables.
+
+        ``expected_failures`` will usually be used to allow callers to ignore
+        ``InvalidRequest`` errors caused by a missing system keyspace. For
+        example, some DSE versions report a 4.X server version, but do not have
+        virtual tables. Thus, running against 4.X servers, SchemaParserV4 uses
+        expected_failures to make a best-effort attempt to read those
+        keyspaces, but treat them as empty if they're not found.
+
+        :param success: A boolean representing whether or not the query
+        succeeded
+        :param result: The resultset in question.
+        :expected_failures: An Exception class or an iterable thereof. If the
+        query failed, but raised an instance of an expected failure class, this
+        will ignore the failure and return an empty list.
+        """
+        if not success and isinstance(result, expected_failures):
+            return []
+        elif success:
             return dict_factory(*result.results) if result else []
         else:
             raise result
@@ -1782,11 +1806,9 @@
         table_result = self._handle_results(cf_success, cf_result)
         col_result = self._handle_results(col_success, col_result)
 
-        # handle the triggers table not existing in Cassandra 1.2
-        if not triggers_success and isinstance(triggers_result, 
InvalidRequest):
-            triggers_result = []
-        else:
-            triggers_result = self._handle_results(triggers_success, 
triggers_result)
+        # the triggers table doesn't exist in C* 1.2
+        triggers_result = self._handle_results(triggers_success, 
triggers_result,
+                                               
expected_failures=InvalidRequest)
 
         if table_result:
             return self._build_table_metadata(table_result[0], col_result, 
triggers_result)
@@ -2529,12 +2551,21 @@
         self.indexes_result = self._handle_results(indexes_success, 
indexes_result)
         self.views_result = self._handle_results(views_success, views_result)
         # V4-only results
-        self.virtual_keyspaces_result = 
self._handle_results(virtual_ks_success,
-                                                             virtual_ks_result)
-        self.virtual_tables_result = 
self._handle_results(virtual_table_success,
-                                                          virtual_table_result)
-        self.virtual_columns_result = 
self._handle_results(virtual_column_success,
-                                                           
virtual_column_result)
+        # These tables don't exist in some DSE versions reporting 4.X so we can
+        # ignore them if we got an error
+        self.virtual_keyspaces_result = self._handle_results(
+            virtual_ks_success, virtual_ks_result,
+            expected_failures=InvalidRequest
+        )
+        self.virtual_tables_result = self._handle_results(
+            virtual_table_success, virtual_table_result,
+            expected_failures=InvalidRequest
+        )
+        self.virtual_columns_result = self._handle_results(
+            virtual_column_success, virtual_column_result,
+            expected_failures=InvalidRequest
+        )
+
         self._aggregate_results()
 
     def _aggregate_results(self):
@@ -2720,9 +2751,7 @@
 
 def get_schema_parser(connection, server_version, timeout):
     server_major_version = int(server_version.split('.')[0])
-    # check for DSE version
-    has_build_version = len(server_version.split('.')) > 3
-    if server_major_version >= 4 and not has_build_version:
+    if server_major_version >= 4:
         return SchemaParserV4(connection, timeout)
     if server_major_version >= 3:
         return SchemaParserV3(connection, timeout)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/python-driver-3.15.1/docs.yaml 
new/python-driver-3.16.0/docs.yaml
--- old/python-driver-3.15.1/docs.yaml  2018-09-06 21:55:48.000000000 +0200
+++ new/python-driver-3.16.0/docs.yaml  2018-11-12 19:19:38.000000000 +0100
@@ -1,6 +1,7 @@
 title: DataStax Python Driver for Apache Cassandra
 summary: DataStax Python Driver for Apache Cassandra Documentation
 output: docs/_build/
+swiftype_drivers: pythondrivers
 checks:
   external_links:
     exclude:
@@ -21,6 +22,8 @@
       # build extensions like libev
       CASS_DRIVER_NO_CYTHON=1 python setup.py build_ext --inplace --force
 versions:
+  - name: '3.16'
+    ref: '3.16.0'
   - name: '3.15'
     ref: '2ce0bd97'
   - name: '3.14'
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/python-driver-3.15.1/tests/integration/long/test_loadbalancingpolicies.py 
new/python-driver-3.16.0/tests/integration/long/test_loadbalancingpolicies.py
--- 
old/python-driver-3.15.1/tests/integration/long/test_loadbalancingpolicies.py   
    2018-09-06 21:55:48.000000000 +0200
+++ 
new/python-driver-3.16.0/tests/integration/long/test_loadbalancingpolicies.py   
    2018-11-12 19:19:38.000000000 +0100
@@ -65,13 +65,20 @@
             self.probe_session = self.probe_cluster.connect()
 
     def _wait_for_nodes_up(self, nodes, cluster=None):
+        log.debug('entered: _wait_for_nodes_up(nodes={ns}, '
+                  'cluster={cs})'.format(ns=nodes,
+                                         cs=cluster))
         if not cluster:
+            log.debug('connecting to cluster')
             self._connect_probe_cluster()
             cluster = self.probe_cluster
         for n in nodes:
             wait_for_up(cluster, n)
 
     def _wait_for_nodes_down(self, nodes, cluster=None):
+        log.debug('entered: _wait_for_nodes_down(nodes={ns}, '
+                  'cluster={cs})'.format(ns=nodes,
+                                         cs=cluster))
         if not cluster:
             self._connect_probe_cluster()
             cluster = self.probe_cluster
@@ -87,6 +94,11 @@
 
     def _insert(self, session, keyspace, count=12,
                 consistency_level=ConsistencyLevel.ONE):
+        log.debug('entered _insert('
+                  'session={session}, keyspace={keyspace}, '
+                  'count={count}, consistency_level={consistency_level}'
+                  ')'.format(session=session, keyspace=keyspace, count=count,
+                             consistency_level=consistency_level))
         session.execute('USE %s' % keyspace)
         ss = SimpleStatement('INSERT INTO cf(k, i) VALUES (0, 0)', 
consistency_level=consistency_level)
 
@@ -94,6 +106,7 @@
         while tries < 100:
             try:
                 execute_concurrent_with_args(session, ss, [None] * count)
+                log.debug('Completed _insert on try #{}'.format(tries + 1))
                 return
             except (OperationTimedOut, WriteTimeout, WriteFailure):
                 ex_type, ex, tb = sys.exc_info()
@@ -105,6 +118,13 @@
 
     def _query(self, session, keyspace, count=12,
                consistency_level=ConsistencyLevel.ONE, use_prepared=False):
+        log.debug('entered _query('
+                  'session={session}, keyspace={keyspace}, '
+                  'count={count}, consistency_level={consistency_level}, '
+                  'use_prepared={use_prepared}'
+                  ')'.format(session=session, keyspace=keyspace, count=count,
+                             consistency_level=consistency_level,
+                             use_prepared=use_prepared))
         if use_prepared:
             query_string = 'SELECT * FROM %s.cf WHERE k = ?' % keyspace
             if not self.prepared or self.prepared.query_string != query_string:
@@ -549,7 +569,7 @@
 
         self._check_query_order_changes(session=session, keyspace=keyspace)
 
-        #check TokenAwarePolicy still return the remaining replicas when one 
goes down
+        # check TokenAwarePolicy still return the remaining replicas when one 
goes down
         self.coordinator_stats.reset_counts()
         stop(2)
         self._wait_for_nodes_down([2], cluster)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/python-driver-3.15.1/tests/integration/long/utils.py 
new/python-driver-3.16.0/tests/integration/long/utils.py
--- old/python-driver-3.15.1/tests/integration/long/utils.py    2018-09-06 
21:55:48.000000000 +0200
+++ new/python-driver-3.16.0/tests/integration/long/utils.py    2018-11-12 
19:19:38.000000000 +0100
@@ -17,8 +17,9 @@
 import time
 
 from collections import defaultdict
-from ccmlib.node import Node
+from ccmlib.node import Node, ToolError
 
+from nose.tools import assert_in
 from cassandra.query import named_tuple_factory
 from cassandra.cluster import ConsistencyLevel
 
@@ -35,6 +36,7 @@
         self.coordinator_counts = defaultdict(int)
 
     def add_coordinator(self, future):
+        log.debug('adding coordinator from {}'.format(future))
         future.result()
         coordinator = future._current_host.address
         self.coordinator_counts[coordinator] += 1
@@ -100,11 +102,24 @@
 
 
 def decommission(node):
-    get_node(node).decommission()
+    try:
+        get_node(node).decommission()
+    except ToolError as e:
+        expected_errs = (('Not enough live nodes to maintain replication '
+                          'factor in keyspace system_distributed'),
+                         'Perform a forceful decommission to ignore.')
+        for err in expected_errs:
+            assert_in(err, e.stdout)
+        # in this case, we're running against a C* version with CASSANDRA-12510
+        # applied and need to decommission with `--force`
+        get_node(node).decommission(force=True)
     get_node(node).stop()
 
 
 def bootstrap(node, data_center=None, token=None):
+    log.debug('called bootstrap('
+              'node={node}, data_center={data_center}, '
+              'token={token})')
     node_instance = Node('node%s' % node,
                          get_cluster(),
                          auto_bootstrap=False,
@@ -118,12 +133,15 @@
 
     try:
         start(node)
-    except:
+    except Exception as e0:
+        log.debug('failed 1st bootstrap attempt with: \n{}'.format(e0))
         # Try only twice
         try:
             start(node)
-        except:
+        except Exception as e1:
+            log.debug('failed 2nd bootstrap attempt with: \n{}'.format(e1))
             log.error('Added node failed to start twice.')
+            raise e1
 
 
 def ring(node):
@@ -140,7 +158,7 @@
             log.debug("Done waiting for node %s to be up", node)
             return
         else:
-            log.debug("Host is still marked down, waiting")
+            log.debug("Host {} is still marked down, waiting".format(addr))
             tries += 1
             time.sleep(1)
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/python-driver-3.15.1/tests/integration/simulacron/__init__.py 
new/python-driver-3.16.0/tests/integration/simulacron/__init__.py
--- old/python-driver-3.15.1/tests/integration/simulacron/__init__.py   
2018-09-06 21:55:48.000000000 +0200
+++ new/python-driver-3.16.0/tests/integration/simulacron/__init__.py   
2018-11-12 19:19:38.000000000 +0100
@@ -35,20 +35,24 @@
 
 
 class SimulacronCluster(SimulacronBase):
+
+    cluster, connect = None, True
+
     @classmethod
     def setUpClass(cls):
         if SIMULACRON_JAR is None or CASSANDRA_VERSION < Version("2.1"):
             return
 
         start_and_prime_singledc()
-        cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION, 
compression=False)
-        cls.session = cls.cluster.connect(wait_for_all_pools=True)
+        if cls.connect:
+            cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION, 
compression=False)
+            cls.session = cls.cluster.connect(wait_for_all_pools=True)
 
     @classmethod
     def tearDownClass(cls):
         if SIMULACRON_JAR is None or CASSANDRA_VERSION < Version("2.1"):
             return
 
-        cls.cluster.shutdown()
+        if cls.cluster:
+            cls.cluster.shutdown()
         stop_simulacron()
-
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/python-driver-3.15.1/tests/integration/simulacron/test_cluster.py 
new/python-driver-3.16.0/tests/integration/simulacron/test_cluster.py
--- old/python-driver-3.15.1/tests/integration/simulacron/test_cluster.py       
2018-09-06 21:55:48.000000000 +0200
+++ new/python-driver-3.16.0/tests/integration/simulacron/test_cluster.py       
2018-11-12 19:19:38.000000000 +0100
@@ -17,10 +17,13 @@
     import unittest  # noqa
 
 from tests.integration.simulacron import SimulacronCluster
-from tests.integration import requiressimulacron
+from tests.integration import (requiressimulacron, PROTOCOL_VERSION)
 from tests.integration.simulacron.utils import prime_query
 
-from cassandra import WriteTimeout, WriteType, ConsistencyLevel
+from cassandra import (WriteTimeout, WriteType,
+                       ConsistencyLevel, UnresolvableContactPoints)
+from cassandra.cluster import Cluster
+
 
 @requiressimulacron
 class ClusterTests(SimulacronCluster):
@@ -53,3 +56,25 @@
         self.assertIn(consistency, str(wt))
         self.assertIn(str(received_responses), str(wt))
         self.assertIn(str(required_responses), str(wt))
+
+
+@requiressimulacron
+class ClusterDNSResolutionTests(SimulacronCluster):
+
+    connect = False
+
+    def tearDown(self):
+        if self.cluster:
+            self.cluster.shutdown()
+
+    def test_connection_with_one_unresolvable_contact_point(self):
+        # shouldn't raise anything due to name resolution failures
+        self.cluster = Cluster(['127.0.0.1', 'dns.invalid'],
+                               protocol_version=PROTOCOL_VERSION,
+                               compression=False)
+
+    def test_connection_with_only_unresolvable_contact_points(self):
+        with self.assertRaises(UnresolvableContactPoints):
+            self.cluster = Cluster(['dns.invalid'],
+                                   protocol_version=PROTOCOL_VERSION,
+                                   compression=False)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/python-driver-3.15.1/tests/integration/standard/test_dse.py 
new/python-driver-3.16.0/tests/integration/standard/test_dse.py
--- old/python-driver-3.15.1/tests/integration/standard/test_dse.py     
2018-09-06 21:55:48.000000000 +0200
+++ new/python-driver-3.16.0/tests/integration/standard/test_dse.py     
2018-11-12 19:19:38.000000000 +0100
@@ -18,6 +18,7 @@
 
 from cassandra.cluster import Cluster
 from tests import notwindows
+from tests.unit.cython.utils import notcython
 from tests.integration import (execute_until_pass,
                                execute_with_long_wait_retry, use_cluster)
 
@@ -27,8 +28,12 @@
     import unittest  # noqa
 
 
+CCM_IS_DSE = (os.environ.get('CCM_IS_DSE', None) == 'true')
+
+
 @unittest.skipIf(os.environ.get('CCM_ARGS', None), 'environment has custom 
CCM_ARGS; skipping')
 @notwindows
+@notcython  # no need to double up on this test; also __default__ setting 
doesn't work
 class DseCCMClusterTest(unittest.TestCase):
     """
     This class can be executed setting the DSE_VERSION variable, for example:
@@ -42,6 +47,10 @@
     def test_dse_60(self):
         self._test_basic(Version('6.0.2'))
 
+    @unittest.skipUnless(CCM_IS_DSE, 'DSE version unavailable')
+    def test_dse_67(self):
+        self._test_basic(Version('6.7.0'))
+
     def _test_basic(self, dse_version):
         """
         Test basic connection and usage
@@ -52,7 +61,8 @@
         use_cluster(cluster_name=cluster_name, nodes=[3],
                     dse_cluster=True, dse_options={}, dse_version=dse_version)
 
-        cluster = Cluster()
+        cluster = Cluster(
+            allow_beta_protocol_version=(dse_version >= Version('6.7.0')))
         session = cluster.connect()
         result = execute_until_pass(
             session,
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/python-driver-3.15.1/tests/integration/standard/test_row_factories.py 
new/python-driver-3.16.0/tests/integration/standard/test_row_factories.py
--- old/python-driver-3.15.1/tests/integration/standard/test_row_factories.py   
2018-09-06 21:55:48.000000000 +0200
+++ new/python-driver-3.16.0/tests/integration/standard/test_row_factories.py   
2018-11-12 19:19:38.000000000 +0100
@@ -181,6 +181,32 @@
         self.assertEqual(result[1]['k'], result[1]['v'])
         self.assertEqual(result[1]['k'], 2)
 
+    def test_generator_row_factory(self):
+        """
+        Test that ResultSet.one() works with a row_factory that contains a 
generator.
+
+        @since 3.16
+        @jira_ticket PYTHON-1026
+        @expected_result one() returns the first row
+
+        @test_category queries
+        """
+        def generator_row_factory(column_names, rows):
+            return _gen_row_factory(rows)
+
+        def _gen_row_factory(rows):
+            for r in rows:
+                yield r
+
+        session = self.session
+        session.row_factory = generator_row_factory
+
+        session.execute(self.insert1)
+        result = session.execute(self.select)
+        self.assertIsInstance(result, ResultSet)
+        first_row = result.one()
+        self.assertEqual(first_row[0], first_row[1])
+
 
 class NamedTupleFactoryAndNumericColNamesTests(unittest.TestCase):
     """
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/python-driver-3.15.1/tests/unit/io/utils.py 
new/python-driver-3.16.0/tests/unit/io/utils.py
--- old/python-driver-3.15.1/tests/unit/io/utils.py     2018-09-06 
21:55:48.000000000 +0200
+++ new/python-driver-3.16.0/tests/unit/io/utils.py     2018-11-12 
19:19:38.000000000 +0100
@@ -25,9 +25,11 @@
 from mock import Mock
 
 import errno
+import logging
 import math
 import os
 from socket import error as socket_error
+import ssl
 
 try:
     import unittest2 as unittest
@@ -37,6 +39,9 @@
 import time
 
 
+log = logging.getLogger(__name__)
+
+
 class TimerCallback(object):
 
     invoked = False
@@ -247,18 +252,31 @@
         return c
 
     def test_eagain_on_buffer_size(self):
+        self._check_error_recovery_on_buffer_size(errno.EAGAIN)
+
+    def test_ewouldblock_on_buffer_size(self):
+        self._check_error_recovery_on_buffer_size(errno.EWOULDBLOCK)
+
+    def test_sslwantread_on_buffer_size(self):
+        self._check_error_recovery_on_buffer_size(ssl.SSL_ERROR_WANT_READ)
+
+    def test_sslwantwrite_on_buffer_size(self):
+        self._check_error_recovery_on_buffer_size(ssl.SSL_ERROR_WANT_WRITE)
+
+    def _check_error_recovery_on_buffer_size(self, error_code):
         c = self.test_successful_connection()
 
         header = six.b('\x00\x00\x00\x00') + int32_pack(20000)
         responses = [
             header + (six.b('a') * (4096 - len(header))),
             six.b('a') * 4096,
-            socket_error(errno.EAGAIN),
+            socket_error(error_code),
             six.b('a') * 100,
-            socket_error(errno.EAGAIN)]
+            socket_error(error_code)]
 
         def side_effect(*args):
             response = responses.pop(0)
+            log.debug('about to mock return {}'.format(response))
             if isinstance(response, socket_error):
                 raise response
             else:
@@ -266,7 +284,6 @@
 
         self.get_socket(c).recv.side_effect = side_effect
         c.handle_read(*self.null_handle_function_args)
-        self.assertEqual(c._current_frame.end_pos, 20000 + len(header))
         # the EAGAIN prevents it from reading the last 100 bytes
         c._iobuf.seek(0, os.SEEK_END)
         pos = c._iobuf.tell()
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/python-driver-3.15.1/tests/unit/test_metadata.py 
new/python-driver-3.16.0/tests/unit/test_metadata.py
--- old/python-driver-3.15.1/tests/unit/test_metadata.py        2018-09-06 
21:55:48.000000000 +0200
+++ new/python-driver-3.16.0/tests/unit/test_metadata.py        2018-11-12 
19:19:38.000000000 +0100
@@ -18,6 +18,7 @@
     import unittest  # noqa
 
 from binascii import unhexlify
+import logging
 from mock import Mock
 import os
 import six
@@ -38,6 +39,9 @@
 from cassandra.pool import Host
 
 
+log = logging.getLogger(__name__)
+
+
 class StrategiesTest(unittest.TestCase):
 
     @classmethod
@@ -536,9 +540,12 @@
 
     def test_index(self):
         im = IndexMetadata(self.name, self.name, self.name, kind='', 
index_options={'target': self.name})
-        im.export_as_string()
+        log.debug(im.export_as_string())
         im = IndexMetadata(self.name, self.name, self.name, kind='CUSTOM', 
index_options={'target': self.name, 'class_name': 'Class'})
-        im.export_as_string()
+        log.debug(im.export_as_string())
+        # PYTHON-1008
+        im = IndexMetadata(self.name, self.name, self.name, kind='CUSTOM', 
index_options={'target': self.name, 'class_name': 'Class', 'delimiter': 
self.name})
+        log.debug(im.export_as_string())
 
     def test_function(self):
         fm = Function(self.name, self.name, (u'int', u'int'), (u'x', u'y'), 
u'int', u'language', self.name, False)


Reply via email to